diff --git a/.github/workflows/quic-organization-repolinter.yml b/.github/workflows/quic-organization-repolinter.yml deleted file mode 100644 index a9dd91c4..00000000 --- a/.github/workflows/quic-organization-repolinter.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: QuIC Organization Repolinter - -on: - push: - branches: [ "main" ] - pull_request: - branches: [ "main" ] - -jobs: - repolinter: - runs-on: ubuntu-latest - steps: - - name: Checkout Repo - uses: actions/checkout@v2 - - name: Verify repolinter config file is present - id: check_files - uses: andstor/file-existence-action@v1 - with: - files: "repolint.json" - - name: Run Repolinter with local repolint.json - if: steps.check_files.outputs.files_exists == 'true' - uses: todogroup/repolinter-action@v1 - with: - config_file: "repolint.json" - - name: Run Repolinter with default ruleset - if: steps.check_files.outputs.files_exists == 'false' - uses: todogroup/repolinter-action@v1 - with: - config_url: "https://raw.githubusercontent.com/quic/.github/main/repolint.json" - diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..318eb6a6 --- /dev/null +++ b/LICENSE @@ -0,0 +1,11 @@ +Copyright 2024 Qualcomm Innovation Center, Inc. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/LICENSE.pdf b/LICENSE.pdf deleted file mode 100644 index 705dfc9c..00000000 Binary files a/LICENSE.pdf and /dev/null differ diff --git a/README.md b/README.md index 4b1699a7..04135cb6 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,373 @@ -# Qualcomm AI Stack Models - -## Introduction - -

- - -Qualcomm AI Stack Models contains following parts. - -1. models-for-accuracy - Models verified for accuracy on target. Developers can directly use these models. -2. models-for-solutions - Models verified for functionality - will be merged with models-accuracy in future. -3. AI-Solutions - End-to-End AI solutions using (2) above, across Qualcomm Platforms. - -## Workflow for AI Solutions - -1. Use notebooks in 'models-for-solutions' to prepare models in compatible format -2. Use source code in 'ai-solutions' and models from step-1 to create end-to-end solutions - -## Report Issues - -Please report issues by raising an _issue_ in the GitHub respository. - -## Team - -Qualcomm AI Stack Model is a project maintained by Qualcomm Innovation Center, Inc. - -## License - -Please see the [LICENSE](LICENSE.pdf) for more details. +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](https://aihub.qualcomm.com) + +# Qualcomm® AI Hub Models + +The [Qualcomm® AI Hub Models](https://aihub.qualcomm.com/) are a collection of +state-of-the-art machine learning models optimized for performance (latency, +memory etc.) and ready to deploy on Qualcomm® devices. + +* Explore models optimized for on-device deployment of vision, speech, text, and genenrative AI. +* View open-source recipes to quantize, optimize, and deploy these models on-device. +* Browse through [performance metrics](https://aihub.qualcomm.com/models) captured for these models on several devices. +* Access the models through [Hugging Face](https://huggingface.co/qualcomm). +* [Sign up](https://aihub.qualcomm.com/) to run these models on hosted Qualcomm® devices. + +Supported runtimes +* [TensorFlow Lite](https://www.tensorflow.org/lite) +* [Qualcomm AI Engine Direct](https://www.qualcomm.com/developer/artificial-intelligence#overview) + +Supported operating systems: +* Android 11+ + +Supported compute units +* CPU, GPU, NPU (includes [Hexagon DSP](https://developer.qualcomm.com/software/hexagon-dsp-sdk/dsp-processor), [HTP](https://developer.qualcomm.com/hardware/qualcomm-innovators-development-kit/ai-resources-overview/ai-hardware-cores-accelerators)) + +Supported precision +* Floating Points: FP16 +* Integer: INT8 (8-bit weight and activation on select models), INT4 (4-bit weight, 16-bit activation on select models) + +Supported chipsets +* [Snapdragon 845](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-845-mobile-platform), [Snapdragon 855/855+](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-855-mobile-platform), [Snapdragon 865/865+](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-865-plus-5g-mobile-platform), [Snapdragon 888/888+](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-888-5g-mobile-platform) +* [Snapdragon 8 Gen 1](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-8-gen-1-mobile-platform), [Snapdragon 8 Gen 2](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-8-gen-2-mobile-platform), [Snapdragon 8 Gen 3](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-8-gen-3-mobile-platform) + +Select supported devices +* Samsung Galaxy S21 Series, Galaxy S22 Series, Galaxy S23 Series, Galaxy S24 Series +* Xiaomi 12, 13 +* Google Pixel 3, 4, 5 + +and many more. + +## Installation + +We currently support **Python >=3.8 and <= 3.10.** We recommend using a Python +virtual environment +([miniconda](https://docs.anaconda.com/free/miniconda/miniconda-install/) or +[virtualenv](https://virtualenv.pypa.io/en/latest/)). + +You can setup a virtualenv using: +``` +python -m venv qai_hub_models_env && source qai_hub_models_env/bin/activate +``` + +Once the environment is setup, you can install the base package using: + +```shell +pip install qai_hub_models +``` + +Some models (e.g. [YOLOv7](https://github.com/WongKinYiu/yolov7)) require +additional dependencies. You can install those dependencies automatically +using: + +```shell +pip install "qai_hub_models[yolov7]" +``` + +## Getting Started + +Each model comes with the following set of CLI demos: +* Locally runnable PyTorch based CLI demo to validate the model off device. +* On-device CLI demo that produces a model ready for on-device deployment and runs the model on a hosted Qualcomm® device (needs [sign up](https://aihub.qualcomm.com/)). + +All the models produced by these demos are freely available on [Hugging +Face](https://huggingface.co/qualcomm) or through our +[website](https://aihub.qualcomm.com/models). See the individual model readme +files (e.g. [YOLOv7](qai_hub_models/models/yolov7/README.md)) for more +details. + +### Local CLI Demo with PyTorch + +[All models](#model-directory) contain CLI demos that run the model in +**PyTorch** locally with sample input. Demos are optimized for code clarity +rather than latency, and run exclusively in PyTorch. Optimal model latency can +be achieved with model export via [Qualcomm® AI +Hub](https://www.aihub.qualcomm.com). + +```shell +python -m qai_hub_models.models.yolov7.demo +``` + +For additional details on how to use the demo CLI, use the `--help` option +```shell +python -m qai_hub_models.models.yolov7.demo --help +``` + +See the [model directory](#model-directory) below to explore all other models. + +--- + +Note that most ML use cases require some pre and post-processing that are not +part of the model itself. A python reference implementation of this is provided +for each model in `app.py`. Apps load & pre-process model input, run model +inference, and post-process model output before returning it to you. + +Here is an example of how the PyTorch CLI works for [YOLOv7](https://github.com/WongKinYiu/yolov7): + +```python +from PIL import Image +from qai_hub_models.models.yolov7 import Model as YOLOv7Model +from qai_hub_models.models.yolov7 import App as YOLOv7App +from qai_hub_models.utils.asset_loaders import load_image +from qai_hub_models.models.yolov7.demo import IMAGE_ADDRESS + +# Load pre-trained model +torch_model = YOLOv7Model.from_pretrained() + +# Load a simple PyTorch based application +app = YOLOv7App(torch_model) +image = load_image(IMAGE_ADDRESS, "yolov7") + +# Perform prediction on a sample image +pred_image = app.predict(image)[0] +Image.fromarray(pred_image).show() + +``` + +### CLI demo to run on hosted Qualcomm® devices + +[Some models](#model-directory) contain CLI demos that run the model on a hosted +Qualcomm® device using [Qualcomm® AI Hub](https://aihub.qualcomm.com). + +To run the model on a hosted device, [sign up for access to Qualcomm® AI +Hub](https://aihub.qualcomm.com). Sign-in to Qualcomm® AI Hub with your +Qualcomm® ID. Once signed in navigate to Account -> Settings -> API Token. + +With this API token, you can configure your client to run models on the cloud +hosted devices. + +```shell +qai-hub configure --api_token API_TOKEN +``` +Navigate to [docs](https://app.aihub.qualcomm.com/docs/) for more information. + +The on-device CLI demo performs the following: +* Exports the model for on-device execution. +* Profiles the model on-device on a cloud hosted Qualcomm® device. +* Runs the model on-device on a cloud hosted Qualcomm® device and compares accuracy between a local CPU based PyTorch run and the on-device run. +* Downloads models (and other required assets) that can be deployed on-device in an Android application. + +```shell +python -m qai_hub_models.models.yolov7.export +``` + +Many models may have initialization parameters that allow loading custom +weights and checkpoints. See `--help` for more details + +```shell +python -m qai_hub_models.models.yolov7.export --help +``` + +#### How does this export script work? + +As described above, the script above compiles, optimizes, and runs the model on +a cloud hosted Qualcomm® device. The demo uses [Qualcomm® AI Hub's Python +APIs](https://app.aihub.qualcomm.com/docs/). + +Qualcomm® AI Hub explained + +Here is a simplified example of code that can be used to run the entire model +on a cloud hosted device: + +```python +from typing import Tuple +import torch +import qai_hub as hub +from qai_hub_models.models.yolov7 import Model as YOLOv7Model + +# Load YOLOv7 in PyTorch +torch_model = YOLOv7Model.from_pretrained() +torch_model.eval() + +# Trace the PyTorch model using one data point of provided sample inputs to +# torch tensor to trace the model. +example_input = [torch.tensor(data[0]) for name, data in torch_model.sample_inputs().items()] +pt_model = torch.jit.trace(torch_model, example_input) + +# Select a device +device = hub.Device("Samsung Galaxy S23") + +# Compile model on a specific device +compile_job = hub.submit_compile_job( + model=pt_model, + device=device, + input_specs=torch_model.get_input_spec(), +) + +# Get target model to run on a cloud hosted device +target_model = compile_job.get_target_model() + +# Profile the previously compiled model +profile_job = hub.submit_profile_job( + model=target_model, + device=device, +) + +# Perform on-device inference on the cloud hosted device +input_data = torch_model.sample_inputs() +inference_job = hub.submit_inference_job( + model=target_model, + device=device, + inputs=input_data, +) + +# Returns the output as dict{name: numpy} +on_device_output = inference_job.download_output_data() +``` + +--- + +### Working with source code + +You can clone the repository using: + +```shell +git clone https://github.com/quic/ai-hub-models/blob/main +cd main +pip install -e . +``` + +Install additional dependencies to prepare a model before using the following: +```shell +cd main +pip install -e ".[yolov7]" +``` + +All models have accuracy and end-to-end tests when applicable. These tests as +designed to be run locally and verify that the PyTorch code produces correct +results. To run the tests for a model: +```shell +python -m pytest --pyargs qai_hub_models.models.yolov7.test +``` +--- + +For any issues, please contact us at ai-hub-support@qti.qualcomm.com. + +--- + +## Model Directory + +### Computer Vision + +| Model | README | Torch App | Device Export | CLI Demo +| -- | -- | -- | -- | -- +| | | | | +| **Image Classification** +| [ResNet50](https://aihub.qualcomm.com/models/resnet50) | [qai_hub_models.models.resnet50](qai_hub_models/models/resnet50/README.md) | ✔️ | ✔️ | ✔️ +| [SqueezeNet-1_1Quantized](https://aihub.qualcomm.com/models/squeezenet1_1_quantized) | [qai_hub_models.models.squeezenet1_1_quantized](qai_hub_models/models/squeezenet1_1_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [MNASNet05](https://aihub.qualcomm.com/models/mnasnet05) | [qai_hub_models.models.mnasnet05](qai_hub_models/models/mnasnet05/README.md) | ✔️ | ✔️ | ✔️ +| [Swin-Small](https://aihub.qualcomm.com/models/swin_small) | [qai_hub_models.models.swin_small](qai_hub_models/models/swin_small/README.md) | ✔️ | ✔️ | ✔️ +| [Swin-Base](https://aihub.qualcomm.com/models/swin_base) | [qai_hub_models.models.swin_base](qai_hub_models/models/swin_base/README.md) | ✔️ | ✔️ | ✔️ +| [MobileNet-v3-Small](https://aihub.qualcomm.com/models/mobilenet_v3_small) | [qai_hub_models.models.mobilenet_v3_small](qai_hub_models/models/mobilenet_v3_small/README.md) | ✔️ | ✔️ | ✔️ +| [RegNet](https://aihub.qualcomm.com/models/regnet) | [qai_hub_models.models.regnet](qai_hub_models/models/regnet/README.md) | ✔️ | ✔️ | ✔️ +| [GoogLeNetQuantized](https://aihub.qualcomm.com/models/googlenet_quantized) | [qai_hub_models.models.googlenet_quantized](qai_hub_models/models/googlenet_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [WideResNet50-Quantized](https://aihub.qualcomm.com/models/wideresnet50_quantized) | [qai_hub_models.models.wideresnet50_quantized](qai_hub_models/models/wideresnet50_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [MobileNet-v3-Large](https://aihub.qualcomm.com/models/mobilenet_v3_large) | [qai_hub_models.models.mobilenet_v3_large](qai_hub_models/models/mobilenet_v3_large/README.md) | ✔️ | ✔️ | ✔️ +| [MobileNet-v2-Quantized](https://aihub.qualcomm.com/models/mobilenet_v2_quantized) | [qai_hub_models.models.mobilenet_v2_quantized](qai_hub_models/models/mobilenet_v2_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [ResNeXt101Quantized](https://aihub.qualcomm.com/models/resnext101_quantized) | [qai_hub_models.models.resnext101_quantized](qai_hub_models/models/resnext101_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [DenseNet-121](https://aihub.qualcomm.com/models/densenet121) | [qai_hub_models.models.densenet121](qai_hub_models/models/densenet121/README.md) | ✔️ | ✔️ | ✔️ +| [ResNet101Quantized](https://aihub.qualcomm.com/models/resnet101_quantized) | [qai_hub_models.models.resnet101_quantized](qai_hub_models/models/resnet101_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [ResNet18](https://aihub.qualcomm.com/models/resnet18) | [qai_hub_models.models.resnet18](qai_hub_models/models/resnet18/README.md) | ✔️ | ✔️ | ✔️ +| [ResNet101](https://aihub.qualcomm.com/models/resnet101) | [qai_hub_models.models.resnet101](qai_hub_models/models/resnet101/README.md) | ✔️ | ✔️ | ✔️ +| [Swin-Tiny](https://aihub.qualcomm.com/models/swin_tiny) | [qai_hub_models.models.swin_tiny](qai_hub_models/models/swin_tiny/README.md) | ✔️ | ✔️ | ✔️ +| [WideResNet50](https://aihub.qualcomm.com/models/wideresnet50) | [qai_hub_models.models.wideresnet50](qai_hub_models/models/wideresnet50/README.md) | ✔️ | ✔️ | ✔️ +| [ResNet18Quantized](https://aihub.qualcomm.com/models/resnet18_quantized) | [qai_hub_models.models.resnet18_quantized](qai_hub_models/models/resnet18_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [MobileNet-v2](https://aihub.qualcomm.com/models/mobilenet_v2) | [qai_hub_models.models.mobilenet_v2](qai_hub_models/models/mobilenet_v2/README.md) | ✔️ | ✔️ | ✔️ +| [VIT](https://aihub.qualcomm.com/models/vit) | [qai_hub_models.models.vit](qai_hub_models/models/vit/README.md) | ✔️ | ✔️ | ✔️ +| [ResNeXt50](https://aihub.qualcomm.com/models/resnext50) | [qai_hub_models.models.resnext50](qai_hub_models/models/resnext50/README.md) | ✔️ | ✔️ | ✔️ +| [EfficientNet-B0](https://aihub.qualcomm.com/models/efficientnet_b0) | [qai_hub_models.models.efficientnet_b0](qai_hub_models/models/efficientnet_b0/README.md) | ✔️ | ✔️ | ✔️ +| [Inception-v3Quantized](https://aihub.qualcomm.com/models/inception_v3_quantized) | [qai_hub_models.models.inception_v3_quantized](qai_hub_models/models/inception_v3_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [ConvNext-Tiny](https://aihub.qualcomm.com/models/convnext_tiny) | [qai_hub_models.models.convnext_tiny](qai_hub_models/models/convnext_tiny/README.md) | ✔️ | ✔️ | ✔️ +| [ResNeXt101](https://aihub.qualcomm.com/models/resnext101) | [qai_hub_models.models.resnext101](qai_hub_models/models/resnext101/README.md) | ✔️ | ✔️ | ✔️ +| [Shufflenet-v2](https://aihub.qualcomm.com/models/shufflenet_v2) | [qai_hub_models.models.shufflenet_v2](qai_hub_models/models/shufflenet_v2/README.md) | ✔️ | ✔️ | ✔️ +| [Shufflenet-v2Quantized](https://aihub.qualcomm.com/models/shufflenet_v2_quantized) | [qai_hub_models.models.shufflenet_v2_quantized](qai_hub_models/models/shufflenet_v2_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [SqueezeNet-1_1](https://aihub.qualcomm.com/models/squeezenet1_1) | [qai_hub_models.models.squeezenet1_1](qai_hub_models/models/squeezenet1_1/README.md) | ✔️ | ✔️ | ✔️ +| [GoogLeNet](https://aihub.qualcomm.com/models/googlenet) | [qai_hub_models.models.googlenet](qai_hub_models/models/googlenet/README.md) | ✔️ | ✔️ | ✔️ +| [Inception-v3](https://aihub.qualcomm.com/models/inception_v3) | [qai_hub_models.models.inception_v3](qai_hub_models/models/inception_v3/README.md) | ✔️ | ✔️ | ✔️ +| | | | | +| **Image Editing** +| [LaMa-Dilated](https://aihub.qualcomm.com/models/lama_dilated) | [qai_hub_models.models.lama_dilated](qai_hub_models/models/lama_dilated/README.md) | ✔️ | ✔️ | ✔️ +| | | | | +| **Image Generation** +| [StyleGAN2](https://aihub.qualcomm.com/models/stylegan2) | [qai_hub_models.models.stylegan2](qai_hub_models/models/stylegan2/README.md) | ✔️ | ✔️ | ✔️ +| | | | | +| **Super Resolution** +| [QuickSRNetLarge](https://aihub.qualcomm.com/models/quicksrnetlarge) | [qai_hub_models.models.quicksrnetlarge](qai_hub_models/models/quicksrnetlarge/README.md) | ✔️ | ✔️ | ✔️ +| [XLSR-Quantized](https://aihub.qualcomm.com/models/xlsr_quantized) | [qai_hub_models.models.xlsr_quantized](qai_hub_models/models/xlsr_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [QuickSRNetMedium](https://aihub.qualcomm.com/models/quicksrnetmedium) | [qai_hub_models.models.quicksrnetmedium](qai_hub_models/models/quicksrnetmedium/README.md) | ✔️ | ✔️ | ✔️ +| [SESR-M5](https://aihub.qualcomm.com/models/sesr_m5) | [qai_hub_models.models.sesr_m5](qai_hub_models/models/sesr_m5/README.md) | ✔️ | ✔️ | ✔️ +| [XLSR](https://aihub.qualcomm.com/models/xlsr) | [qai_hub_models.models.xlsr](qai_hub_models/models/xlsr/README.md) | ✔️ | ✔️ | ✔️ +| [Real-ESRGAN-General-x4v3](https://aihub.qualcomm.com/models/real_esrgan_general_x4v3) | [qai_hub_models.models.real_esrgan_general_x4v3](qai_hub_models/models/real_esrgan_general_x4v3/README.md) | ✔️ | ✔️ | ✔️ +| [QuickSRNetSmall](https://aihub.qualcomm.com/models/quicksrnetsmall) | [qai_hub_models.models.quicksrnetsmall](qai_hub_models/models/quicksrnetsmall/README.md) | ✔️ | ✔️ | ✔️ +| [SESR-M5-Quantized](https://aihub.qualcomm.com/models/sesr_m5_quantized) | [qai_hub_models.models.sesr_m5_quantized](qai_hub_models/models/sesr_m5_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [Real-ESRGAN-x4plus](https://aihub.qualcomm.com/models/real_esrgan_x4plus) | [qai_hub_models.models.real_esrgan_x4plus](qai_hub_models/models/real_esrgan_x4plus/README.md) | ✔️ | ✔️ | ✔️ +| [ESRGAN](https://aihub.qualcomm.com/models/esrgan) | [qai_hub_models.models.esrgan](qai_hub_models/models/esrgan/README.md) | ✔️ | ✔️ | ✔️ +| | | | | +| **Semantic Segmentation** +| [FFNet-40S-Quantized](https://aihub.qualcomm.com/models/ffnet_40s_quantized) | [qai_hub_models.models.ffnet_40s_quantized](qai_hub_models/models/ffnet_40s_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [FFNet-54S](https://aihub.qualcomm.com/models/ffnet_54s) | [qai_hub_models.models.ffnet_54s](qai_hub_models/models/ffnet_54s/README.md) | ✔️ | ✔️ | ✔️ +| [DDRNet23-Slim](https://aihub.qualcomm.com/models/ddrnet23_slim) | [qai_hub_models.models.ddrnet23_slim](qai_hub_models/models/ddrnet23_slim/README.md) | ✔️ | ✔️ | ✔️ +| [Yolo-v8-Segmentation](https://aihub.qualcomm.com/models/yolov8_seg) | [qai_hub_models.models.yolov8_seg](qai_hub_models/models/yolov8_seg/README.md) | ✔️ | ✔️ | ✔️ +| [FFNet-54S-Quantized](https://aihub.qualcomm.com/models/ffnet_54s_quantized) | [qai_hub_models.models.ffnet_54s_quantized](qai_hub_models/models/ffnet_54s_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [SINet](https://aihub.qualcomm.com/models/sinet) | [qai_hub_models.models.sinet](qai_hub_models/models/sinet/README.md) | ✔️ | ✔️ | ✔️ +| [FFNet-40S](https://aihub.qualcomm.com/models/ffnet_40s) | [qai_hub_models.models.ffnet_40s](qai_hub_models/models/ffnet_40s/README.md) | ✔️ | ✔️ | ✔️ +| [FFNet-78S](https://aihub.qualcomm.com/models/ffnet_78s) | [qai_hub_models.models.ffnet_78s](qai_hub_models/models/ffnet_78s/README.md) | ✔️ | ✔️ | ✔️ +| [FFNet-78S-LowRes](https://aihub.qualcomm.com/models/ffnet_78s_lowres) | [qai_hub_models.models.ffnet_78s_lowres](qai_hub_models/models/ffnet_78s_lowres/README.md) | ✔️ | ✔️ | ✔️ +| [DeepLabV3-ResNet50](https://aihub.qualcomm.com/models/deeplabv3_resnet50) | [qai_hub_models.models.deeplabv3_resnet50](qai_hub_models/models/deeplabv3_resnet50/README.md) | ✔️ | ✔️ | ✔️ +| [FFNet-78S-Quantized](https://aihub.qualcomm.com/models/ffnet_78s_quantized) | [qai_hub_models.models.ffnet_78s_quantized](qai_hub_models/models/ffnet_78s_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [Unet-Segmentation](https://aihub.qualcomm.com/models/unet_segmentation) | [qai_hub_models.models.unet_segmentation](qai_hub_models/models/unet_segmentation/README.md) | ✔️ | ✔️ | ✔️ +| [Segment-Anything-Model](https://aihub.qualcomm.com/models/sam) | [qai_hub_models.models.sam](qai_hub_models/models/sam/README.md) | ✔️ | ✔️ | ✔️ +| [FFNet-122NS-LowRes](https://aihub.qualcomm.com/models/ffnet_122ns_lowres) | [qai_hub_models.models.ffnet_122ns_lowres](qai_hub_models/models/ffnet_122ns_lowres/README.md) | ✔️ | ✔️ | ✔️ +| [FastSam-S](https://aihub.qualcomm.com/models/fastsam_s) | [qai_hub_models.models.fastsam_s](qai_hub_models/models/fastsam_s/README.md) | ✔️ | ✔️ | ✔️ +| [FCN_ResNet50](https://aihub.qualcomm.com/models/fcn_resnet50) | [qai_hub_models.models.fcn_resnet50](qai_hub_models/models/fcn_resnet50/README.md) | ✔️ | ✔️ | ✔️ +| [MediaPipe-Selfie-Segmentation](https://aihub.qualcomm.com/models/mediapipe_selfie) | [qai_hub_models.models.mediapipe_selfie](qai_hub_models/models/mediapipe_selfie/README.md) | ✔️ | ✔️ | ✔️ +| [FastSam-X](https://aihub.qualcomm.com/models/fastsam_x) | [qai_hub_models.models.fastsam_x](qai_hub_models/models/fastsam_x/README.md) | ✔️ | ✔️ | ✔️ +| | | | | +| **Object Detection** +| [MediaPipe-Hand-Detection](https://aihub.qualcomm.com/models/mediapipe_hand) | [qai_hub_models.models.mediapipe_hand](qai_hub_models/models/mediapipe_hand/README.md) | ✔️ | ✔️ | ✔️ +| [Yolo-v8-Detection](https://aihub.qualcomm.com/models/yolov8_det) | [qai_hub_models.models.yolov8_det](qai_hub_models/models/yolov8_det/README.md) | ✔️ | ✔️ | ✔️ +| [DETR-ResNet50-DC5](https://aihub.qualcomm.com/models/detr_resnet50_dc5) | [qai_hub_models.models.detr_resnet50_dc5](qai_hub_models/models/detr_resnet50_dc5/README.md) | ✔️ | ✔️ | ✔️ +| [DETR-ResNet101-DC5](https://aihub.qualcomm.com/models/detr_resnet101_dc5) | [qai_hub_models.models.detr_resnet101_dc5](qai_hub_models/models/detr_resnet101_dc5/README.md) | ✔️ | ✔️ | ✔️ +| [DETR-ResNet50](https://aihub.qualcomm.com/models/detr_resnet50) | [qai_hub_models.models.detr_resnet50](qai_hub_models/models/detr_resnet50/README.md) | ✔️ | ✔️ | ✔️ +| [Yolo-v7](https://aihub.qualcomm.com/models/yolov7) | [qai_hub_models.models.yolov7](qai_hub_models/models/yolov7/README.md) | ✔️ | ✔️ | ✔️ +| [Yolo-v6](https://aihub.qualcomm.com/models/yolov6) | [qai_hub_models.models.yolov6](qai_hub_models/models/yolov6/README.md) | ✔️ | ✔️ | ✔️ +| [MediaPipe-Face-Detection](https://aihub.qualcomm.com/models/mediapipe_face) | [qai_hub_models.models.mediapipe_face](qai_hub_models/models/mediapipe_face/README.md) | ✔️ | ✔️ | ✔️ +| [DETR-ResNet101](https://aihub.qualcomm.com/models/detr_resnet101) | [qai_hub_models.models.detr_resnet101](qai_hub_models/models/detr_resnet101/README.md) | ✔️ | ✔️ | ✔️ +| | | | | +| **Pose Estimation** +| [OpenPose](https://aihub.qualcomm.com/models/openpose) | [qai_hub_models.models.openpose](qai_hub_models/models/openpose/README.md) | ✔️ | ✔️ | ✔️ +| [MediaPipe-Pose-Estimation](https://aihub.qualcomm.com/models/mediapipe_pose) | [qai_hub_models.models.mediapipe_pose](qai_hub_models/models/mediapipe_pose/README.md) | ✔️ | ✔️ | ✔️ +| [HRNetPoseQuantized](https://aihub.qualcomm.com/models/hrnet_pose_quantized) | [qai_hub_models.models.hrnet_pose_quantized](qai_hub_models/models/hrnet_pose_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [LiteHRNet](https://aihub.qualcomm.com/models/litehrnet) | [qai_hub_models.models.litehrnet](qai_hub_models/models/litehrnet/README.md) | ✔️ | ✔️ | ✔️ +| [HRNetPose](https://aihub.qualcomm.com/models/hrnet_pose) | [qai_hub_models.models.hrnet_pose](qai_hub_models/models/hrnet_pose/README.md) | ✔️ | ✔️ | ✔️ + +### Audio + +| Model | README | Torch App | Device Export | CLI Demo +| -- | -- | -- | -- | -- +| | | | | +| **Speech Recognition** +| [HuggingFace-WavLM-Base-Plus](https://aihub.qualcomm.com/models/huggingface_wavlm_base_plus) | [qai_hub_models.models.huggingface_wavlm_base_plus](qai_hub_models/models/huggingface_wavlm_base_plus/README.md) | ✔️ | ✔️ | ✔️ +| [Whisper-Base](https://aihub.qualcomm.com/models/whisper_asr) | [qai_hub_models.models.whisper_asr](qai_hub_models/models/whisper_asr/README.md) | ✔️ | ✔️ | ✔️ +| | | | | +| **Audio Enhancement** +| [Facebook-Denoiser](https://aihub.qualcomm.com/models/facebook_denoiser) | [qai_hub_models.models.facebook_denoiser](qai_hub_models/models/facebook_denoiser/README.md) | ✔️ | ✔️ | ✔️ + +### Multimodal + +| Model | README | Torch App | Device Export | CLI Demo +| -- | -- | -- | -- | -- +| | | | | +| [OpenAI-Clip](https://aihub.qualcomm.com/models/openai_clip) | [qai_hub_models.models.openai_clip](qai_hub_models/models/openai_clip/README.md) | ✔️ | ✔️ | ✔️ +| [TrOCR](https://aihub.qualcomm.com/models/trocr) | [qai_hub_models.models.trocr](qai_hub_models/models/trocr/README.md) | ✔️ | ✔️ | ✔️ + +### Generative Ai + +| Model | README | Torch App | Device Export | CLI Demo +| -- | -- | -- | -- | -- +| | | | | +| **Image Generation** +| [ControlNet](https://aihub.qualcomm.com/models/controlnet_quantized) | [qai_hub_models.models.controlnet_quantized](qai_hub_models/models/controlnet_quantized/README.md) | ✔️ | ✔️ | ✔️ +| [Stable-Diffusion](https://aihub.qualcomm.com/models/stable_diffusion_quantized) | [qai_hub_models.models.stable_diffusion_quantized](qai_hub_models/models/stable_diffusion_quantized/README.md) | ✔️ | ✔️ | ✔️ +| | | | | +| **Text Generation** +| [Baichuan-7B](https://aihub.qualcomm.com/models/baichuan_7b_quantized) | [qai_hub_models.models.baichuan_7b_quantized](qai_hub_models/models/baichuan_7b_quantized/README.md) | ✔️ | ✔️ | ✔️ diff --git a/ai-solutions/QCS8550-embedded-linux/README.md b/ai-solutions/QCS8550-embedded-linux/README.md deleted file mode 100644 index 1a304a5d..00000000 --- a/ai-solutions/QCS8550-embedded-linux/README.md +++ /dev/null @@ -1,142 +0,0 @@ -## Table of Contents - -- [Table of Contents](#table-of-contents) -- [LE Build setup](#le-build-setup) -- [Generating ai-solutions binary](#generating-ai-solutions-binary) -- [Running ai-solutions application](#running-ai-solutions-application) - -## LE Build setup - -1. Follow "00067.1 Release Note for QCS8550.LE.1.0" to Setup "qti-distro-rb-debug" LE.1.0 build server for QCS8550 -2. Make sure "bitbake qti-robotics-image" is successful -3. Verify the "qti-distro-rb-debug" build by flashing on target using "fastboot". Commands to flash: - - ``` - cd build-qti-distro-rb-debug/tmp-glibc/deploy/images/kalama/qti-robotics-image/ - adb root - adb reboot bootloader - - fastboot flash abl_a abl.elf - fastboot flash abl_b abl.elf - fastboot flash dtbo_a dtbo.img - fastboot flash dtbo_b dtbo.img - fastboot flash boot_a boot.img - fastboot flash boot_b boot.img - fastboot flash system_a system.img - fastboot flash system_b system.img - fastboot flash userdata userdata.img - fastboot flash persist persist.img - - fastboot reboot - ``` - -## Generating ai-solutions binary - -1. Copy snpe-2.x folder to "/poky/meta-qti-ml-prop/recipes/snpe-sdk/files/snpe/". - ``` - cp -r /* /poky/meta-qti-ml-prop/recipes/snpe-sdk/files/snpe/ - ``` -2. Copy "meta-qti-ai-solutions" into "/poky/" folder - ``` - cp -r meta-qti-ai-solutions /poky/ - ``` -3. Copy SNPE,DiagLog,DlContainer,DlSystem and Wrapper.hpp - ``` - cp -r /include/SNPE/Dl* /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ - cp -r /include/SNPE/DiagLog/ /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ - cp -r /include/SNPE/Wrapper.hpp /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ - cp -r /include/SNPE/SNPE/ /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ - ``` -4. Update "snpe.bb" in "poky/meta-qti-ml-prop/recipes/snpe-sdk" folder - 1. Make sure platform "aarch64-oe-linux-gcc11.2" is selected - 2. Update DSP lib path - ``` - -- install -m 0755 ${S}/lib/dsp/* ${D}/${libdir}/rfsa/adsp - ++ install -m 0755 ${S}/lib/hexagon-v73/unsigned/lib* ${D}/${libdir}/rfsa/adsp - ``` -5. Run the following commands - ``` - cd /LE.PRODUCT.2.1.r1/apps_proc/poky - export MACHINE=kalama DISTRO=qti-distro-rb-debug - source qti-conf/set_bb_env.sh - export PREBUILT_SRC_DIR="/prebuilt_HY11" - bitbake qti-robotics-image - ``` -6. Flash the latest build on target. (Note: Check if "ai-solutions" binary is generated in the "build-qti-distro-fullstack-debug/tmp-glibc/work/qrb5165_rb5-oe-linux/qti-robotics-image/1.0-r0/rootfs/usr/bin/" path) - -## Running ai-solutions application -1. Execute the following commands to remount the target - ``` - adb root - adb disable-verity - adb reboot - adb root - adb shell "mount -o remount,rw /" - ``` -2. Push "meta-qti-ai-solutions/recipes/ai-solutions/files/app/" and "SNPE-2.14" onto the device - ``` - adb push - ``` -3. Execute the following commands to setup snpe on target - ``` - adb shell - cd - cp -r lib/aarch64-oe-linux-gcc11.2/lib* /usr/lib/ - cp bin/aarch64-oe-linux-gcc11.2/snpe-net-run /usr/bin/ - cp -r lib/hexagon-v73/unsigned/lib* /usr/lib/rfsa/adsp/ - chmod +x /usr/bin/snpe-net-run - snpe-net-run --version - ``` - Expected output: SNPE v2.14.2.230905160328_61726 -4. Run ai-solutions application - ``` - adb shell - cd - ai-solutions -c -i -o - ``` - Example: - - ``` - ai-solutions -c data/config.json -i Sample1.jpg -o output.jpg - ``` - - ### Details on Input arguments: - - #### Sample config.json - - ```json - "model-configs":[ - - "model-name":"QSrnet-medium", -> model name which is used while enabling solution - "model-type":"superresolution", -> To specify the use case such superresolution or detection or segmentation etc.. - "model-path":"models/quicksrnet_medium_quantized.dlc", -> Path at which model is located on target - "runtime":"DSP", -> Select Runtime either CPU or DSP - "input-layers":[ -> Input layer of the model - "t.1" - ], - "output-layers":[ - "depth_to_space#1" -> Output layer of the model - ], - "output-tensors":[ - "65" -> Output node for post processing - ] - ] - ``` - - solution-config: - ```json - "solution-configs":[ - { - "solution-name":"AI-Solutions", -> To identify usecase - "model-name":"SESR", -> Specify model name to be executed - "input-config-name":"image", -> To read input from image - "Enable":0 -> Enable specific solution - }, - { - "solution-name":"AI-Solutions", - "model-name":"SRGAN", - "input-config-name":"image", - "Enable":1 - } - ] - ``` diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/conf/layer.conf b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/conf/layer.conf deleted file mode 100644 index 4fa44c6f..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/conf/layer.conf +++ /dev/null @@ -1,7 +0,0 @@ -BBFILES += "${LAYERDIR}/recipes/*/*.bb ${LAYERDIR}/recipes/*/*.bbappend ${LAYERDIR}/recipes-*/*/*.bb ${LAYERDIR}/recipes-*/*/*.bbappend" -BBPATH .= ":${LAYERDIR}" -BBFILE_COLLECTIONS += "ai-solutions" -BBFILE_PRIORITY_ai-solutions = "17" -BBFILE_PATTERN_ai-solutions := "^${LAYERDIR}/" -LAYERSERIES_COMPAT_ai-solutions = " dunfell kirkstone " -IMAGE_INSTALL:append = " ai-solutions " diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/ai-solutions.bb b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/ai-solutions.bb deleted file mode 100644 index 2a4e4989..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/ai-solutions.bb +++ /dev/null @@ -1,25 +0,0 @@ -inherit cmake pkgconfig - -HOMEPAGE = "http://support.cdmatech.com" -LICENSE = "Qualcomm-Technologies-Inc.-Proprietary" -LIC_FILES_CHKSUM = "file://${COREBASE}/meta-qti-bsp-prop/files/qcom-licenses/\ -${LICENSE};md5=92b1d0ceea78229551577d4284669bb8" - -SUMMARY = "AI-Solutions on QCS8550" -DESCRIPTION = "AI-Solutions" - -LICENSE = "Qualcomm-Technologies-Inc.-Proprietary" - -SRC_URI = "file://app" -S = "${WORKDIR}/app" - -DEPENDS += " jsoncpp json-glib gflags gstreamer1.0 gstreamer1.0-plugins-base opencv snpe" - -do_install(){ - install -d ${D}/${bindir} - install -m 0777 ${WORKDIR}/build/out/ai-solutions ${D}/${bindir} -} - -INSANE_SKIP_${PN} += "arch" - -FILES_${PN} += "${bindir}/*" diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMake/FindGStreamer.cmake b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMake/FindGStreamer.cmake deleted file mode 100644 index 3e4148c0..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMake/FindGStreamer.cmake +++ /dev/null @@ -1,5 +0,0 @@ -find_package(PkgConfig) -pkg_search_module(GLIB REQUIRED glib-2.0) -pkg_check_modules(GSTREAMER REQUIRED gstreamer-1.0) -pkg_check_modules(GST_APP REQUIRED gstreamer-app-1.0) -pkg_check_modules(GST_VIDEO REQUIRED gstreamer-video-1.0) \ No newline at end of file diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMakeLists.txt b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMakeLists.txt deleted file mode 100644 index 62db1972..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMakeLists.txt +++ /dev/null @@ -1,25 +0,0 @@ -# CMake lowest version requirement -cmake_minimum_required(VERSION 3.5.1) - -# project information -project(AI-SOLUTIONS) - -include(FindPkgConfig) -pkg_check_modules(JSONCPP REQUIRED jsoncpp) -pkg_check_modules(JSON REQUIRED json-glib-1.0) -pkg_check_modules(GFLAGS REQUIRED gflags) - -set(PROJECT_ROOT ${CMAKE_CURRENT_LIST_DIR}) -set(CMAKE_MODULE_PATH ${PROJECT_ROOT}/CMake) -set(CMAKE_CXX_STANDARD 17) - -find_package(GStreamer REQUIRED) -find_package(OpenCV REQUIRED ) - -add_subdirectory("./src") - -link_directories( - ${JSONCPP_LIBRARY_DIRS} - ${JSON_LIBRARY_DIRS} - ${GFLAGS_LIBRARY_DIRS} -) diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/data/config.json b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/data/config.json deleted file mode 100644 index 5e887618..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/data/config.json +++ /dev/null @@ -1,345 +0,0 @@ -{ - "input-configs":[ - { - "input-config-name":"image", - "stream-type":"image" - } - ], - "model-configs":[ - { - "model-name":"QSrnet-small", - "model-type":"superresolution", - "model-path":"models/quicksrnet_small_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "t.1" - ], - "output-layers":[ - "depth_to_space#1" - ], - "output-tensors":[ - "41" - ], - "global-threshold":0.2 - }, - { - "model-name":"QSrnet-medium", - "model-type":"superresolution", - "model-path":"models/quicksrnet_medium_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "t.1" - ], - "output-layers":[ - "depth_to_space#1" - ], - "output-tensors":[ - "65" - ], - "global-threshold":0.2 - }, - { - "model-name":"QSrnet-large", - "model-type":"superresolution", - "model-path":"models/quicksrnet_large_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "t.1" - ], - "output-layers":[ - "depth_to_space#1" - ], - "output-tensors":[ - "124" - ], - "global-threshold":0.2 - }, - { - "model-name":"XLSR", - "model-type":"superresolution", - "model-path":"models/xlsr_quantized.dlc", - "runtime": "DSP", - "input-layers":[ - "t.1" - ], - "output-layers":[ - "clipped_relu" - ], - "output-tensors":[ - "100" - ], - "global-threshold":0.2 - }, - { - "model-name":"SESR", - "model-type":"superresolution", - "model-path":"models/sesr_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "lr" - ], - "output-layers":[ - "DepthToSpace_52" - ], - "output-tensors":[ - "sr" - ], - "global-threshold":0.2 - }, - { - "model-name":"ESRGAN", - "model-type":"superresolution", - "model-path":"models/esrgan_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "keras_layer_input" - ], - "output-layers":[ - "convolution_168" - ], - "output-tensors":[ - "Identity" - ], - "global-threshold":0.2 - }, - - { - "model-name":"ssd-mobilenet-v2", - "model-type":"detection", - "model-path":"models/ssd_mobilenetV2_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.3, - "conf-threshold":0.7, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "Softmax_350", - "Concat_397" - ], - "output-tensors":[ - "935", - "986" - ], - "global-threshold":0.2 - }, - { - "model-name":"yolo-nas", - "model-type":"detection", - "model-path":"models/yolo_nas_s_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/heads/Sigmoid", - "/heads/Mul" - ], - "output-tensors":[ - "877", - "885" - ], - "global-threshold":0.2 - }, - { - "model-name":"yolo-x", - "model-type":"detection", - "model-path":"models/yolox_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.3, - "input-layers":[ - "images" - ], - "output-layers":[ - "Transpose_570" - ], - "output-tensors":[ - "output" - ], - "global-threshold":0.2 - }, - - { - "model-name":"mbllen", - "model-type":"lowlight", - "model-path":"models/mbllen_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/model/Clip" - ], - "output-tensors":[ - "352" - ], - "global-threshold":0.2 - }, - { - "model-name":"ruas", - "model-type":"lowlight", - "model-path":"models/ruas_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "onnx::Pad_0" - ], - "output-layers":[ - "/denoise_net/Sub" - ], - "output-tensors":[ - "403" - ], - "global-threshold":0.2 - }, - { - "model-name":"SCI", - "model-type":"lowlight", - "model-path":"models/sci_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Clip" - ], - "output-tensors":[ - "30" - ], - "global-threshold":0.2 - }, - { - "model-name":"StableLLve", - "model-type":"lowlight", - "model-path":"models/StableLLVE_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/outc/conv/Conv" - ], - "output-tensors":[ - "248" - ], - "global-threshold":0.2 - }, - { - "model-name":"zero_dce", - "model-type":"lowlight", - "model-path":"models/zero_dce_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Add_7" - ], - "output-tensors":[ - "80" - ], - "global-threshold":0.2 - }, - - { - "model-name":"DeepLabv3Plus-resnet++", - "model-type":"segmentation", - "model-path":"models/DeepLabv3Plus_resnet101_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "Resize_284" - ], - "output-tensors":[ - "1089" - ], - "global-threshold":0.2 - }, - { - "model-name":"DeepLabv3-resnet101", - "model-type":"segmentation", - "model-path":"models/deeplabv3_resnet101_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "1089" - ], - "global-threshold":0.2 - }, - { - "model-name":"DeepLabv3-resnet50", - "model-type":"segmentation", - "model-path":"models/deeplabv3_resnet50_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "613" - ], - "global-threshold":0.2 - }, - { - "model-name":"FCN_resnet101", - "model-type":"segmentation", - "model-path":"models/fcn_resnet101_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "1018" - ], - "global-threshold":0.2 - }, - { - "model-name":"FCN_resnet50", - "model-type":"segmentation", - "model-path":"models/fcn_resnet50_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "542" - ], - "global-threshold":0.2 - } - ], - "solution-configs":[ - { - "solution-name":"AI-Solutions", - "model-name":"yolo-nas", - "input-config-name":"image", - "Enable":1 - } - ] -} diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Configuration.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Configuration.h deleted file mode 100644 index 6cce6ab6..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Configuration.h +++ /dev/null @@ -1,118 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef CONFIGURATION_H_ -#define CONFIGURATION_H_ - -#include -#include -#include -#include "Utils.h" - -using namespace cv; -using namespace std; - -const string input_configs = "input-configs"; -const string model_configs = "model-configs"; -const string solution_configs = "solution-configs"; - -// Input Configs; -const string pipeline_input_config = "input-config-name"; -const string stream_type = "stream-type"; -const string camera_url = "camera-url"; -const string skipframe = "SkipFrame"; - -// Model Configs -const string model_config_name = "model-name"; -const string model_type = "model-type"; -const string model_path = "model-path"; -const string runtime = "runtime"; -const string nms_threshold = "nms-threshold"; -const string conf_threshold = "conf-threshold"; -const string input_layers = "input-layers"; -const string output_layers = "output-layers"; -const string output_tensors = "output-tensors"; - -// Solution Configs -const string solution_name = "solution-name"; -const string model_name = "model-name"; -const string Enable = "Enable"; -const string solution_input_config = "input-config-name"; -const string output_type = "output-type"; - -class ObjectDetectionSnpeConfig { - public: - string model_name; - string model_type; - std::string model_path; - runtime_t runtime; - float nmsThresh; - float confThresh; - std::vector labels; - std::vector inputLayers; - std::vector outputLayers; - std::vector outputTensors; -}; - -class InputConfiguration{ - public: - int SkipFrame; - int StreamNumber=0; - string StreamType; - string Url; - string ConfigName; -}; - -class SolutionConfiguration { - public: - string solution_name; - string model_name; - string input_config_name; - bool Enable; - string output_type; - std::shared_ptr input_config; - std::shared_ptr model_config; -}; - -class DebugConfiguration -{ - public: - bool DumpData=false; - string Directory; -}; - -class Configuration -{ -public: - static Configuration &getInstance() - { - static Configuration instance; - return instance; - } - -private: - Configuration() {} -public: - Configuration(Configuration const &) = delete; - void operator=(Configuration const &) = delete; - - DebugConfiguration Debug; - ObjectDetectionSnpeConfig Config; - SolutionConfiguration Sol_Config; - std::unordered_map> inputconfigs; - std::unordered_map> modelsconfig; - std::unordered_map> solutionsconfig; - - void LoadConfiguration(string file); - int LoadInputConfig(Json::Value& input); - int LoadModelsConfig(Json::Value& models); - int LoadSolutionsConfig(Json::Value& solutions); -}; - -#endif diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Detection.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Detection.h deleted file mode 100644 index ffc2ad3a..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Detection.h +++ /dev/null @@ -1,61 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef DETECTION_H -#define DETECTION_H - -#include -#include -#include -#include - -using namespace std; -using namespace cv; - -struct ObjectData { - // Bounding box information: top-left coordinate and width, height - cv::Rect bbox; - // Confidence of this bounding box - float confidence = -1.0f; - // The label of this Bounding box - int label = -1; - // Time cost of detecting this frame - size_t time_cost = 0; - uint32_t Width=512; - uint32_t Height=512; - cv::Mat *output=NULL; - -}; - -struct Detection -{ - cv::Rect bbox; - float score; - int label; -}; - -struct DetectionDetail -{ - vector Result; - string ModelName; -}; - -struct DetectionItem -{ - uint32_t Width; - uint32_t Height; - uint32_t FrameId; - size_t Size; - string StreamName; - int StreamId; - shared_ptr ImageBuffer; - ObjectData Results; -}; - -#endif diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DetectionSnpe.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DetectionSnpe.h deleted file mode 100644 index 0aa944ab..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DetectionSnpe.h +++ /dev/null @@ -1,52 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __Detection_IMPL_H__ -#define __Detection_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" -#include "Detection.h" -namespace detectionsnpe -{ - class DETECTIONSnpe - { - public: - DETECTIONSnpe(); - ~DETECTIONSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool SetScoreThresh(const float& conf_thresh, const float& nms_thresh); - bool IsInitialized() const; - - private: - bool m_isInit; - float m_nmsThresh; - float m_confThresh; - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess( cv::Mat image,cv::Mat& output_image,string model_name); - float computeIoU(const cv::Rect& a, const cv::Rect& b); - std::vector doNMS(std::vector winList, const float& nms_thresh); - }; - -} // namespace detection - -#endif // __DETECTION_IMPL_H__ diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/LowlightSnpe.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/LowlightSnpe.h deleted file mode 100644 index e6ee6b75..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/LowlightSnpe.h +++ /dev/null @@ -1,46 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __LOWLIGHT_IMPL_H__ -#define __LOWLIGHT_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" - -namespace lowlightsnpe -{ - class LOWLIGHTSnpe - { - public: - LOWLIGHTSnpe(); - ~LOWLIGHTSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool IsInitialized() const; - - private: - bool m_isInit; - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess(cv::Mat& output_image,string model_name); - }; - -} // namespace lowlightsnpe - -#endif // __LOWLIGHT_IMPL_H__ diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ModelInference.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ModelInference.h deleted file mode 100644 index 7223e7e0..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ModelInference.h +++ /dev/null @@ -1,34 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef MODEL_INFERENCE_H_ -#define MODEL_INFERENCE_H_ -#include -#include -#include -#include -#include -#include "Configuration.h" - -class ModelInference{ -public: - ModelInference(); - ModelInference(const string model_name); - int Initialization(const ObjectDetectionSnpeConfig& config); - bool IsInitialized(); - bool UnInitialization(); - ~ModelInference(); - int Inference(cv::Mat input,cv::Mat& output_image,string model_name); -private: - void *Impl = nullptr; - enum Models{SUPERRESOLUTION, DETECTION,LOWLIGHT,SEGMENTATION}; - int Model; -}; - -#endif \ No newline at end of file diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SNPERuntime.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SNPERuntime.h deleted file mode 100644 index 854ae9bb..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SNPERuntime.h +++ /dev/null @@ -1,79 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef _SNPERUNTIME_H_ -#define _SNPERUNTIME_H_ - -#include -#include -#include -#include -#include - -#include "SNPE/SNPE.hpp" -#include "SNPE/SNPEFactory.hpp" -#include "SNPE/SNPEBuilder.hpp" -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/DlError.hpp" -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/IUserBufferFactory.hpp" -#include "DlSystem/TensorShape.hpp" -#include "DlContainer/IDlContainer.hpp" - -#include "Utils.h" - -namespace snperuntime { - -class SNPERuntime { -public: - SNPERuntime(); - - bool Initialize(const std::string& model_path, const runtime_t runtime); - bool Deinitialize(); - bool SetOutputLayers(std::vector& outputLayers); - - std::vector GetInputShape(const std::string& name); - std::vector GetOutputShape(const std::string& name); - - float* GetInputTensor(const std::string& name); - float* GetOutputTensor(const std::string& name); - - bool IsInit() { - return m_isInit; - } - - bool execute(); - -private: - bool m_isInit = false; - - std::unique_ptr m_container; - std::unique_ptr m_snpe; - zdl::DlSystem::Runtime_t m_runtime; - zdl::DlSystem::StringList m_outputLayers; - - std::map > m_inputShapes; - std::map > m_outputShapes; - - std::vector > m_inputUserBuffers; - std::vector > m_outputUserBuffers; - zdl::DlSystem::UserBufferMap m_inputUserBufferMap; - zdl::DlSystem::UserBufferMap m_outputUserBufferMap; - zdl::DlSystem::PerformanceProfile_t m_profile; - - void setTargetRuntime(const runtime_t runtime); - void setPerformanceProfile(const performance_t perfprofile); - - std::unordered_map> m_applicationInputBuffers; - std::unordered_map> m_applicationOutputBuffers; -}; - -} - -#endif // _SNPERUNTIME_H_ \ No newline at end of file diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SegmentationSnpe.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SegmentationSnpe.h deleted file mode 100644 index 022dd918..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SegmentationSnpe.h +++ /dev/null @@ -1,52 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __Segmentation_IMPL_H__ -#define __Segmentation_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" - -namespace segmentationsnpe -{ - - class SEGMENTATIONSnpe - { - public: - SEGMENTATIONSnpe(); - ~SEGMENTATIONSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool SetScoreThresh(const float& conf_thresh, const float& nms_thresh); - bool IsInitialized() const; - - private: - bool m_isInit; - float m_nmsThresh; - float m_confThresh; - - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess( cv::Mat image,cv::Mat& output_image,string model_name); - }; - -} // namespace segmentation - -#endif // __SEGMENTATION_IMPL_H__ diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SuperresolutionSnpe.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SuperresolutionSnpe.h deleted file mode 100644 index e90e91d9..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SuperresolutionSnpe.h +++ /dev/null @@ -1,46 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __SUPERRES_IMPL_H__ -#define __SUPERRES_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" - -namespace superressnpe -{ - class SUPERRESSnpe - { - public: - SUPERRESSnpe(); - ~SUPERRESSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool IsInitialized() const; - - private: - bool m_isInit; - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess(cv::Mat& output_image,string model_name); - }; -} // namespace superressnpe - -#endif // __SUPERRES_IMPL_H__ diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Utils.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Utils.h deleted file mode 100644 index 5f0c95d8..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Utils.h +++ /dev/null @@ -1,98 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef UTILS_H_ -#define UTILS_H_ - -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace std; -using namespace cv; - -using chrono::high_resolution_clock; -using chrono::duration_cast; -using chrono::duration; -using chrono::milliseconds; - -#define QS_SUCCESS 0 -#define QS_ERROR -1 - -#define PRINT(fmt, ...) { \ - printf(fmt, ##__VA_ARGS__); \ -} - -#define LOG(level, fmt, ...) { \ - PRINT("[%s] - %s: " fmt, #level, __func__, ##__VA_ARGS__); \ -} - -//#define DEBUG -#ifdef DEBUG - #define LOG_DEBUG(fmt, ...) LOG(DEBUG, fmt, ##__VA_ARGS__) -#else - #define LOG_DEBUG(fmt, ...) ((void)0) -#endif - -#define LOG_INFO(fmt, ...) { \ - LOG(INFO, fmt, ##__VA_ARGS__); \ -} - -#define LOG_WARN(fmt, ...) { \ - LOG(WARN, fmt, ##__VA_ARGS__); \ -} - -#define LOG_ERROR(fmt, ...) { \ - LOG(ERROR, fmt, ##__VA_ARGS__); \ -} - -#define IMAGE_CHAN_SIZE_F32(width, height) ((width) * (height)*4) -#define RGB_IMAGE_SIZE_F32(width, height) ((width) * (height)*3 * 4) - -// Inference hardware runtime. -typedef enum runtime { - CPU = 0, - DSP -}runtime_t; - -typedef enum PerformanceProfile { - DEFAULT = 0, - /// Run in a balanced mode. - BALANCED = 0, - /// Run in high performance mode - HIGH_PERFORMANCE = 1, - /// Run in a power sensitive mode, at the expense of performance. - POWER_SAVER = 2, - /// Use system settings. SNPE makes no calls to any performance related APIs. - SYSTEM_SETTINGS = 3, - /// Run in sustained high performance mode - SUSTAINED_HIGH_PERFORMANCE = 4, - /// Run in burst mode - BURST = 5, - /// Run in lower clock than POWER_SAVER, at the expense of performance. - LOW_POWER_SAVER = 6, - /// Run in higher clock and provides better performance than POWER_SAVER. - HIGH_POWER_SAVER = 7, - /// Run in lower balanced mode - LOW_BALANCED = 8, -}performance_t; - -template -void ClearVector(std::vector& vt) -{ - std::vector vtTemp; - vtTemp.swap(vt); -} - -#endif diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/models/README.md b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/models/README.md deleted file mode 100644 index 22397b6f..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/models/README.md +++ /dev/null @@ -1 +0,0 @@ -Place dlc files in this folder diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/CMakeLists.txt b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/CMakeLists.txt deleted file mode 100644 index a0d4b817..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/CMakeLists.txt +++ /dev/null @@ -1,39 +0,0 @@ -cmake_minimum_required(VERSION 3.5.1) - - -# Compile options -add_compile_options(-std=c++11) - -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "../out") -set(CMAKE_CXX_FLAGS_DEBUG "-fPIC -O0 -g -Wall") -set(CMAKE_CXX_FLAGS_RELEASE "-fPIC -O2 -Wall") - -message(STATUS "source file path" ${PROJECT_SRC_ROOT}) - -file(GLOB_RECURSE SRC_FILE - ../src/*.cpp -) - -set(SOURCE_FILE - ${SRC_FILE} -) - -add_executable(ai-solutions ${SRC_FILE}) -target_compile_options(ai-solutions PUBLIC -fPIC -O0 -g -Wall -Wnon-virtual-dtor) - -# Header path -include_directories( - "../inc" - "/usr/include/glib-2.0" - "/usr/lib/aarch64-linux-gnu/glib-2.0/include" - "/usr/include/gstreamer-1.0" - "/usr/local/include/opencv4" - ${OpenCV_INCLUDE_DIRS} - ${JSON_INCLUDE_DIRS} - ${JSONCPP_INCLUDE_DIRS} -) - -message(STATUS "JSON file path" ${JSON_INCLUDE_DIRS}) -message(STATUS "JSONCPP file path" ${JSONCPP_INCLUDE_DIRS}) - -target_link_libraries(ai-solutions PUBLIC pthread dl ${OpenCV_LIBS} ${GST_APP_LIBRARIES} ${JSON_LIBRARIES} jsoncpp SNPE jsoncpp) diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/Configuration.cpp b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/Configuration.cpp deleted file mode 100644 index 60e711dc..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/Configuration.cpp +++ /dev/null @@ -1,152 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "Configuration.h" -#include "Utils.h" -#include - -/** @brief To convert runtime from string to int - * @param device which contains runtime as a string - * @return int value corresponding to runtime -*/ - -static runtime_t device2runtime(std::string&& device) -{ - /** - * To convert all characters to lower case - */ - std::transform(device.begin(), device.end(), device.begin(), - [](unsigned char ch){ return tolower(ch); }); - - if (0 == device.compare("dsp")) - { - return DSP; - } - else - { - return CPU; - } -} - -/** @brief To parse Input config from config file - * @param input contains input config array -*/ -int Configuration::LoadInputConfig(Json::Value& input) -{ - if (input.isArray()) - { - int size = input.size(); - for (int i = 0; i < size; ++i) - { - std::shared_ptr inputconfig = std::shared_ptr(new InputConfiguration()); - inputconfig->ConfigName = input[i][pipeline_input_config].asString(); - inputconfig->StreamType = input[i][stream_type].asString(); - inputconfig->Url = input[i][camera_url].asString(); - inputconfig->SkipFrame = input[i][skipframe].asInt(); - inputconfigs[inputconfig->ConfigName] = inputconfig; - } - } - LOG_INFO("Input streams size=%u \n", input.size()); - return 0; -} - -/** @brief To parse model config - * @param models contains model config array - */ - -int Configuration::LoadModelsConfig(Json::Value& models) -{ - std::string line; - if (models.isArray()) - { - int size = models.size(); - for (int i = 0; i < size; ++i) - { - std::shared_ptr modelconfig = - std::shared_ptr(new ObjectDetectionSnpeConfig()); - modelconfig->model_name = models[i][model_config_name].asString(); - modelconfig->model_type = models[i][model_type].asString(); - modelconfig->model_path = models[i][model_path].asString(); - modelconfig->runtime = device2runtime(models[i][runtime].asString()); - modelconfig->nmsThresh = models[i][nms_threshold].asFloat(); - modelconfig->confThresh = models[i][conf_threshold].asFloat(); - - /** - * To access input layer names from config - */ - if (models[i]["input-layers"].isArray()) { - int num = models[i]["input-layers"].size(); - for (int j= 0; j < num; j++) { - modelconfig->inputLayers.push_back(models[i]["input-layers"][j].asString()); - } - } - /** - * To access output layer names from config - */ - if (models[i][output_layers].isArray()) { - int num = models[i]["output-layers"].size(); - for (int j = 0; j < num; j++) { - modelconfig->outputLayers.push_back(models[i]["output-layers"][j].asString()); - } - } - /** - * To access output tensor names from config - */ - if (models[i][output_tensors].isArray()) { - int num = models[i]["output-tensors"].size(); - for (int j = 0; j < num; j++) { - modelconfig->outputTensors.push_back(models[i]["output-tensors"][j].asString()); - } - } - - modelsconfig[modelconfig->model_name] = modelconfig; - } - } - - LOG_INFO("modelsconfig size = %lu \n", modelsconfig.size()); - return 0; -} - -/** @brief To parse solution config - * @param solutions contains solution array - * -*/ - -int Configuration::LoadSolutionsConfig(Json::Value& solutions) { - if (solutions.isArray()) { - int size = solutions.size(); - for (int i = 0; i < size; ++i) { - std::shared_ptr solutionconfig = std::shared_ptr(new SolutionConfiguration()); - solutionconfig->solution_name = solutions[i][solution_name].asString(); - solutionconfig->model_name = solutions[i][model_name].asString(); - solutionconfig->Enable = solutions[i][Enable].asBool(); - solutionconfig->input_config_name = solutions[i][solution_input_config].asString(); - solutionconfig->output_type = solutions[i][output_type].asString(); - solutionsconfig[i] = solutionconfig; - } - } - LOG_DEBUG("Solutions size %lu", solutionsconfig.size() ); - return 0; -} - - -/** @brief To parse config file - * @param configFilePath contains json file passed as an argument -*/ -void Configuration::LoadConfiguration(string configFilePath) -{ - Json::Reader reader; - Json::Value root; - std::ifstream in(configFilePath, std::ios::binary); - reader.parse(in, root); - - LoadInputConfig(root[input_configs]); - LoadModelsConfig(root[model_configs]); - LoadSolutionsConfig(root[solution_configs]); -} \ No newline at end of file diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/DetectionSnpe.cpp b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/DetectionSnpe.cpp deleted file mode 100644 index bcc8cc86..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/DetectionSnpe.cpp +++ /dev/null @@ -1,542 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include -#include "Configuration.h" -#include "DetectionSnpe.h" - -namespace detectionsnpe -{ - - /** @brief Constructor - */ - DETECTIONSnpe::DETECTIONSnpe() : m_isInit(false),m_snperuntime(nullptr) - { - - } - - /** @brief Destructor - */ - DETECTIONSnpe::~DETECTIONSnpe() - { - DeInitialize(); - } - - /** @brief To read model config and set output layers - * @param config model config parameters - * @return true if success;false otherwise - */ - bool DETECTIONSnpe::Initialize(const ObjectDetectionSnpeConfig& config) - { - m_snperuntime = std::move(std::unique_ptr(new snperuntime::SNPERuntime())); - - m_inputLayers = config.inputLayers; - m_outputLayers = config.outputLayers; - m_outputTensors = config.outputTensors; - m_nmsThresh = config.nmsThresh; - m_confThresh = config.confThresh; - - /** - * To set output layer from model config - */ - m_snperuntime->SetOutputLayers(m_outputLayers); - /** - * To initialize snperuntime - */ - if (!m_snperuntime->Initialize(config.model_path, config.runtime)) - { - LOG_ERROR("Failed to Initialize snpe instance.\n"); - return false; - } - - m_isInit = true; - return true; - } - - /** @brief To deallocate buffers and reset - */ - bool DETECTIONSnpe::DeInitialize() - { - if (m_isInit) - { - m_snperuntime->Deinitialize(); - m_snperuntime.reset(nullptr); - } - m_isInit = false; - return true; - } - - bool DETECTIONSnpe::SetScoreThresh(const float& conf_thresh, const float& nms_thresh = 0.5) - { - this->m_nmsThresh = nms_thresh; - this->m_confThresh = conf_thresh; - return true; - } - - bool DETECTIONSnpe::IsInitialized() const - { - return m_isInit; - } - - /** @brief To preprocess input image - * @param input_image Input image for inference - * @return true if succuess; false otherwise - */ - bool DETECTIONSnpe::PreProcessInput(const cv::Mat& input_image,string model_name) - { - if (input_image.empty()) { - LOG_ERROR("Invalid image!\n"); - return false; - } - - auto inputShape = m_snperuntime->GetInputShape(m_inputLayers[0]); - size_t model_h = inputShape[1]; - size_t model_w = inputShape[2]; - - if (m_snperuntime->GetInputTensor(m_inputLayers[0]) == nullptr) - { - LOG_ERROR("Empty input tensor\n"); - return false; - } - - cv::Mat image = cv::Mat(model_h,model_w, CV_32FC3, Scalar(0.)); - cv::resize(input_image,image,cv::Size(model_h,model_w)); - cv::Mat input(model_h, model_w, CV_32FC3, m_snperuntime->GetInputTensor(m_inputLayers[0])); - - if(model_name.compare("ssd-mobilenet-v2") == 0 ) - { - cv::cvtColor(image, image, cv::COLOR_BGR2RGB); - image.convertTo(image, CV_32S); - subtract(image,Scalar(123.0, 117.0, 104.0),image); - image.convertTo(input, CV_32FC3, 1.0); - } - else if(model_name.compare("yolo-nas") == 0) - { - image.convertTo(input, CV_32FC3, 1/255.0); - } - else if(model_name.compare("yolo-x") == 0) - { - image.convertTo(input, CV_32FC3, 1.0); - } - - return true; - } - - /** @brief To preprocess,execute and postprocess - * @param input_image Input image for inference - * @param output_image Inference output image - * @param model_name To identify model for specific post-processing - * @return true if success; false otherwise - */ - bool DETECTIONSnpe::Detect(cv::Mat image,cv::Mat& output_image,string model_name) - { - /** - * Preprocessing image - */ - if(PreProcessInput(image, model_name) != true) - { - LOG_ERROR("PreProcess failed\n"); - return false; - } - /** - * Inferencing model on target - */ - if (!m_snperuntime->execute()) { - LOG_ERROR("SNPERuntime execute failed."); - return false; - } - /** - * Postprocessing to extract bounding boxes - */ - if(PostProcess(image,output_image,model_name) != true) - { - LOG_ERROR("PostProcess failed\n"); - return false; - } - return true; - } - - float DETECTIONSnpe::computeIoU(const cv::Rect& a, const cv::Rect& b) - { - float xOverlap = std::max( - 0., - std::min(a.x + a.width, b.x + b.width) - std::max(a.x, b.x) + 1.); - float yOverlap = std::max( - 0., - std::min(a.y + a.height, b.y + b.height) - std::max(a.y, b.y) + 1.); - float intersection = xOverlap * yOverlap; - float unio = - (a.width + 1.) * (a.height + 1.) + - (b.width + 1.) * (b.height + 1.) - intersection; - return intersection / unio; - } - - std::vector DETECTIONSnpe::doNMS(std::vector winList, const float& nms_thresh) - { - if (winList.empty()) { - return winList; - } - - std::sort(winList.begin(), winList.end(), [] (const ObjectData& left, const ObjectData& right) { - if (left.confidence > right.confidence) { - return true; - } else { - return false; - } - }); - - std::vector flag(winList.size(), false); - for (unsigned int i = 0; i < winList.size(); i++) { - if (flag[i]) { - continue; - } - - for (unsigned int j = i + 1; j < winList.size(); j++) { - if (computeIoU(winList[i].bbox, winList[j].bbox) > nms_thresh) { - flag[j] = true; - } - } - } - - std::vector ret; - for (unsigned int i = 0; i < winList.size(); i++) { - if (!flag[i]) - ret.push_back(winList[i]); - } - return ret; - } - - /** @brief Object Detection postprocess - * @param output_image Image with bounding boxes - * @param model_name To identify model for specific post-processing - */ - bool DETECTIONSnpe::PostProcess( cv::Mat image,cv::Mat& output_image,string model_name) - { - int width = image.cols, height = image.rows; - cv::resize(image,output_image,cv::Size(width,height)); - if(model_name.compare("ssd-mobilenet-v2") == 0) - { - vectorclasses = { - "background","aeroplane","bicycle","bird","boat", - "bottle","bus","car","cat","chair","cow", - "diningtable","dog","horse","motorbike","person", - "pottedplant","sheep","sofa","train","tvmonitor", - }; - - auto outputShape_score = m_snperuntime->GetOutputShape(m_outputTensors[0]); - int elements_score = outputShape_score[1]; - int channels_score = outputShape_score[2]; - - auto outputShape_box = m_snperuntime->GetOutputShape(m_outputTensors[1]); - float *score_confidence = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - float *box_coordinates = m_snperuntime->GetOutputTensor(m_outputTensors[1]); - - if( (score_confidence == nullptr) || (box_coordinates == nullptr)) - { - return false; - } - for(size_t class_index = 1; class_index winList; - for(int row=0; row m_confThresh && (class_pred==class_index) ) - { - ObjectData rect; - rect.bbox.x = box_coordinates[row*4 ] * width; - rect.bbox.y = box_coordinates[row*4+ 1] * height; - rect.bbox.width = box_coordinates[row*4 + 2] * width; - rect.bbox.height = box_coordinates[row*4 + 3] * height; - rect.confidence = value; - rect.label = class_pred; - winList.push_back(rect); - } - } - } - winList = doNMS(winList, m_nmsThresh); - for(size_t i =0;i classes = { - "person", "bicycle", "car", "motorcycle", "airplane", "bus", - "train", "truck", "boat", "traffic", "fire", "stop", "parking", - "bench", "bird", "cat", "dog", "horse", "sheep", "cow", - "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", - "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", - "sports", "kite", "baseball", "baseball", "skateboard", "surfboard", - "tennis", "bottle", "wine", "cup", "fork", "knife","spoon", - "bowl", "banana", "apple", "sandwich", "orange", "broccoli", - "carrot", "hot", "pizza", "donut", "cake", "chair", "couch", - "potted", "bed", "dining", "toilet", "tv", "laptop", "mouse", - "remote", "keyboard", "cell", "microwave", "oven", "toaster", - "sink", "refrigerator", "book", "clock", "vase", "scissors", - "teddy", "hair", "toothbrush" - }; - - float *class_scores = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - auto outputShape_scores = m_snperuntime->GetOutputShape(m_outputTensors[0]); - float *bboxes = m_snperuntime->GetOutputTensor(m_outputTensors[1]); - auto outputShape_bboxes = m_snperuntime->GetOutputShape(m_outputTensors[1]); - - if( (class_scores == nullptr) || (bboxes == nullptr)) - { - return false; - } - float ratio1 = width/320.0; - float ratio2 = height/320.0; - - int out_coordinates = outputShape_scores[1]; - int out_scores = outputShape_scores[2]; - - std::vector winList; - for(int i =0;i= m_confThresh) - { - float x1 = bboxes[i*4 ]*ratio1; - float y1 = bboxes[i*4 + 1]*ratio2; - float x2 = bboxes[i*4 + 2]*ratio1; - float y2 = bboxes[i*4 + 3]*ratio2; - ObjectData rect; - rect.bbox.x = x1 ; - rect.bbox.y = y1 ; - rect.bbox.width = x2 - x1; - rect.bbox.height = y2 - y1; - rect.confidence = class_scores[out_scores*i + j]; - rect.label = j; - winList.push_back(rect); - } - } - } - winList = doNMS(winList,m_nmsThresh); - for(size_t i =0;i classes = { - "person", "bicycle", "car", "motorcycle", "airplane", "bus", - "train", "truck", "boat", "traffic", "fire", "stop", "parking", - "bench", "bird", "cat", "dog", "horse", "sheep", "cow", - "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", - "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", - "sports", "kite", "baseball", "baseball", "skateboard", "surfboard", - "tennis", "bottle", "wine", "cup", "fork", "knife","spoon", - "bowl", "banana", "apple", "sandwich", "orange", "broccoli", - "carrot", "hot", "pizza", "donut", "cake", "chair", "couch", - "potted", "bed", "dining", "toilet", "tv", "laptop", "mouse", - "remote", "keyboard", "cell", "microwave", "oven", "toaster", - "sink", "refrigerator", "book", "clock", "vase", "scissors", - "teddy", "hair", "toothbrush" - }; - - float *scores = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - auto outputShape = m_snperuntime->GetOutputShape(m_outputTensors[0]); - if(scores == nullptr) - { - return false; - } - int model_h = outputShape[1]; - int model_w = outputShape[2]; - float output[model_h][model_w]; - - for(int i=0;i grid; - static vector expanded_stride; - static int sum=0; - if(flag == false) - { - const int strides[3] = {8, 16, 32}; - int hsizes[3] = {80, 40, 20}; - int wsizes[3] = {80, 40, 20}; - - vector> grids, expanded_strides; - - for(int i=0;i<3;i++) - { - vector grid; - vector expanded_stride; - for(int j=0; j> boxes; - vector> scores_vec; - for(int i=0;i box; - for(int j=0;j<4;j++) - { - box.push_back(output[i][j]); - } - boxes.push_back(box); - } - - for(int i=0;i score; - float val = output[i][4]; - for(int j=5;j<85;j++) - { - score.push_back(output[i][j] * val); - } - scores_vec.push_back(score); - } - - std::vector winList; - for(int i=0;i=m_confThresh) - { - for(int j=0;j<4;j++) - { - int x1 = boxes[i][0]; - int y1 = boxes[i][1]; - int x2 = boxes[i][2]; - int y2 = boxes[i][3]; - - int x = (int)(x1 - x2/2); - int y = (int)(y1 - y2/2); - int w = (int)(x1 + x2/2); - int h = (int)(y1 + y2/2); - - ObjectData rect; - float ratio1 = width/640.0; - float ratio2 = height/640.0; - rect.bbox.x = x * ratio1; - rect.bbox.y = y * ratio2; - rect.bbox.width = w *ratio1; - rect.bbox.height = h *ratio2; - rect.confidence = maxScore; - rect.label = maxClassIndex.y; - - winList.push_back(rect); - } - } - } - - winList = doNMS(winList, m_nmsThresh); - for(size_t i =0;i -#include -#include -#include "Configuration.h" -#include "LowlightSnpe.h" - -namespace lowlightsnpe -{ - - /** @brief Constructor - */ - LOWLIGHTSnpe::LOWLIGHTSnpe() : m_isInit(false),m_snperuntime(nullptr) - { - - } - - /** @brief Destructor - */ - LOWLIGHTSnpe::~LOWLIGHTSnpe() - { - DeInitialize(); - } - - /** @brief To read model config and set output layers - * @param config model config parameters - * @return true if success;false otherwise - */ - bool LOWLIGHTSnpe::Initialize(const ObjectDetectionSnpeConfig& config) - { - m_snperuntime = std::move(std::unique_ptr(new snperuntime::SNPERuntime())); - - m_inputLayers = config.inputLayers; - m_outputLayers = config.outputLayers; - m_outputTensors = config.outputTensors; - - /** - * To set output layer from model config - */ - m_snperuntime->SetOutputLayers(m_outputLayers); - /** - * To initialize snperuntime - */ - if (!m_snperuntime->Initialize(config.model_path, config.runtime)) - { - LOG_ERROR("Failed to Initialize snpe instance.\n"); - return false; - } - - m_isInit = true; - return true; - } - - - /** @brief To deallocate buffers and reset - */ - bool LOWLIGHTSnpe::DeInitialize() - { - if (m_isInit) - { - m_snperuntime->Deinitialize(); - m_snperuntime.reset(nullptr); - } - - m_isInit = false; - return true; - } - - bool LOWLIGHTSnpe::IsInitialized() const - { - return m_isInit; - } - - /** @brief To preprocess input image - * @param input_image Input image for inference - * @return true if succuess; false otherwise - */ - bool LOWLIGHTSnpe::PreProcessInput(const cv::Mat& input_image,string model_name) - { - if (input_image.empty()) { - LOG_ERROR("Invalid image!\n"); - return false; - } - - auto inputShape = m_snperuntime->GetInputShape(m_inputLayers[0]); - int model_h = inputShape[1]; - int model_w = inputShape[2]; - int channels = inputShape[3]; - - cv::Mat image(model_h, model_w, CV_32FC3,cv::Scalar(0.0)); - cv::resize(input_image,image,cv::Size(model_h,model_w),cv::INTER_CUBIC); - cv::cvtColor(image, image, cv::COLOR_BGR2RGB); - - cv::Mat input(model_h, model_w, CV_32FC3, cv::Scalar(0.0)); - image.convertTo(input, CV_32FC3,1.0); - - vector app_vect; - - if (input.isContinuous()) - { - app_vect.assign((float*)input.data, (float*)input.data + input.total()*input.channels()); - } - else - { - for (int i = 0; i < input.rows; ++i) - { - app_vect.insert(app_vect.end(), input.ptr(i), input.ptr(i)+input.cols*input.channels()); - } - } - - float ***app = new float**[model_w]; - for (int i = 0; i < model_w; ++i) - { - app[i] = new float*[model_h]; - for (int j = 0; j < model_h; ++j) - app[i][j] = new float[channels]; - } - - for(int i = 0;iGetInputTensor(m_inputLayers[0]); - if (input_tensor == nullptr) { - LOG_ERROR("Empty input tensor\n"); - return false; - } - - float* pdata = (float*)(input.data); - for(int i = 0;iexecute()) { - LOG_ERROR("SNPERuntime execute failed."); - return false; - } - /** - * Postprocessing - */ - if(PostProcess(output_image,model_name) != true) - { - LOG_ERROR("PostProcess failed\n"); - return false; - } - return true; - } - - /** @brief Superres postprocess - * @param output_image Enhanced image - * @param model_name To identify model for specific post-processing - */ - bool LOWLIGHTSnpe::PostProcess(cv::Mat& output_image,string model_name) - { - auto outputShape = m_snperuntime->GetOutputShape(m_outputTensors[0]); - float *predOutput = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - - if(predOutput == nullptr) - { - return false; - } - int height = outputShape[1]; - int width = outputShape[2]; - int channels = outputShape[3]; - - cv::Mat temp0(cv::Size(width,height), CV_32FC3, predOutput); - cv::cvtColor(temp0, temp0, cv::COLOR_RGB2BGR); - - vector app_vect; - - if (temp0.isContinuous()) - { - app_vect.assign((float*)temp0.data, (float*)temp0.data + temp0.total()*temp0.channels()); - } - else - { - for (int i = 0; i < temp0.rows; ++i) - { - app_vect.insert(app_vect.end(), temp0.ptr(i), temp0.ptr(i)+temp0.cols*temp0.channels()); - } - } - - float ***app = new float**[channels]; - for (int i = 0; i < channels; ++i) - { - app[i] = new float*[width]; - for (int j = 0; j < width; ++j) - app[i][j] = new float[height]; - } - - for(int i = 0;i app_t_vec; - - for(int i = 0;i255.0) - x = 255.0; - app_t_vec.push_back(x); - } - } - } - - output_image = cv::Mat(width, height, CV_32FC3,cv::Scalar(0.0)); - float* pdata = (float*)(output_image.data); - for (int i = 0; i < channels*width*height; i++) - { - float x = app_t_vec[i]; - *pdata = x; - pdata += 1; - } - output_image.convertTo(output_image,CV_8UC3); - - for (int i = 0; i < channels; ++i) - { - for (int j = 0; j < width; ++j) - { - delete [] app[i][j]; - } - delete [] app[i]; - } - delete [] app; - app = NULL; - - return true; - } - -} // namespace lowlightsnpe diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/ModelInference.cpp b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/ModelInference.cpp deleted file mode 100644 index ae01891d..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/ModelInference.cpp +++ /dev/null @@ -1,221 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "ModelInference.h" -#include "Configuration.h" -#include "SuperresolutionSnpe.h" -#include "DetectionSnpe.h" -#include "LowlightSnpe.h" -#include "SegmentationSnpe.h" - -using namespace std; -using namespace cv; -using namespace superressnpe; -using namespace detectionsnpe; -using namespace lowlightsnpe; -using namespace segmentationsnpe; - -/** @brief contructor -*/ -ModelInference::ModelInference() -{ - Impl = new SUPERRESSnpe(); -} - -/** @brief Parameter constructor - * @param model_type To check model type from config file -*/ -ModelInference::ModelInference(const string model_type) -{ - if (model_type.compare("superresolution") == 0) { - Impl = new SUPERRESSnpe(); - Model = SUPERRESOLUTION; - } - else if(model_type.compare("detection") == 0) - { - Impl = new DETECTIONSnpe(); - Model = DETECTION; - } - else if(model_type.compare("lowlight") == 0) - { - Impl = new LOWLIGHTSnpe(); - Model = LOWLIGHT; - } - else if(model_type.compare("segmentation") == 0) - { - Impl = new SEGMENTATIONSnpe(); - Model = SEGMENTATION; - } - else - LOG_ERROR("Model implementation not found\n"); - - LOG_INFO("Initialized model = %s \n", model_type.c_str()); - -} - -/** @brief destructor -*/ -ModelInference::~ModelInference() -{ - if (nullptr != Impl) - { - if (Model == SUPERRESOLUTION) - { - delete static_cast(Impl); - } - else if(Model == DETECTION) - { - delete static_cast(Impl); - } - else if(Model == LOWLIGHT) - { - delete static_cast(Impl); - } - else if(Model == SEGMENTATION) - { - delete static_cast(Impl); - } - Impl = nullptr; - } -} - -/** @brief For model inference - * @param item contains image buffer and results object to store results - * @return true if success -*/ -int ModelInference::Inference(cv::Mat input,cv::Mat& output_image,string model_name) -{ - int ret=0; - if (nullptr != Impl && IsInitialized()) - { - if (Model == SUPERRESOLUTION) - { - ret = static_cast(Impl)->Detect(input,output_image, model_name); - } - else if(Model == DETECTION) - { - ret = static_cast(Impl)->Detect(input, output_image,model_name); - } - else if(Model == LOWLIGHT) - { - ret = static_cast(Impl)->Detect(input, output_image,model_name); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->Detect(input,output_image, model_name); - } - } - return ret; -} - -/** @brief To intialize SNPE - * @param contains SNPE configuration - * @return true if success -*/ -int ModelInference::Initialization(const ObjectDetectionSnpeConfig& config) -{ - int ret=0; - if (IsInitialized()) { - if (Model == SUPERRESOLUTION) - { - ret = static_cast(Impl)->DeInitialize() && static_cast(Impl)->Initialize(config); - } - else if(Model == DETECTION) - { - ret = static_cast(Impl)->DeInitialize() && static_cast(Impl)->Initialize(config); - } - else if(Model == LOWLIGHT) - { - ret = static_cast(Impl)->DeInitialize() && static_cast(Impl)->Initialize(config); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->DeInitialize() && static_cast(Impl)->Initialize(config); - } - } - else - { - if (Model == SUPERRESOLUTION) - { - ret = static_cast(Impl)->Initialize(config); - } - else if(Model == DETECTION) - { - ret = static_cast(Impl)->Initialize(config); - } - else if(Model == LOWLIGHT) - { - ret = static_cast(Impl)->Initialize(config); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->Initialize(config); - } - } - return ret; -} - -/** @brief To uninitialize SNPE - * @return true if success -*/ -bool ModelInference::UnInitialization() -{ - bool ret=false; - if (nullptr != Impl && IsInitialized()) - { - if (Model == SUPERRESOLUTION) - { - ret = static_cast(Impl)->DeInitialize(); - } - else if(Model == DETECTION) - { - ret = static_cast(Impl)->DeInitialize(); - } - else if(Model == LOWLIGHT) - { - ret = static_cast(Impl)->DeInitialize(); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->DeInitialize(); - } - } - else - { - LOG_ERROR("ObjectDetection: deinit failed!\n"); - ret = false; - } - return ret; -} - -/** @brief To check if SNPE is initialized - * @return true if already inititalized -*/ -bool ModelInference::IsInitialized() -{ - bool ret=false; - if (Model == SUPERRESOLUTION) - { - ret = static_cast(Impl)->IsInitialized(); - } - else if(Model == DETECTION) - { - ret = static_cast(Impl)->IsInitialized(); - } - else if(Model == LOWLIGHT) - { - ret = static_cast(Impl)->IsInitialized(); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->IsInitialized(); - } - return ret; -} - diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SNPERuntime.cpp b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SNPERuntime.cpp deleted file mode 100644 index febc4e0f..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SNPERuntime.cpp +++ /dev/null @@ -1,426 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "SNPERuntime.h" - -namespace snperuntime{ - - /** @brief SNPE constructor - */ - SNPERuntime::SNPERuntime() - { - static zdl::DlSystem::Version_t version = zdl::SNPE::SNPEFactory::getLibraryVersion(); - LOG_INFO("Using SNPE: '%s' \n", version.asString().c_str()); - } - - /** @brief To calculate buffer size for memory allocation - * @return buffer size - */ - static size_t calcSizeFromDims(const zdl::DlSystem::Dimension* dims, size_t rank, size_t elementSize) - { - if (rank == 0) return 0; - size_t size = elementSize; - while (rank--) { - size *= *dims; - dims++; - } - return size; - } - - /** @brief To create userbuffer - */ - void CreateUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - const zdl::DlSystem::TensorShape& bufferShape, - const char* name) - { - size_t bufferElementSize = sizeof(float); - - /** - * To calculate stride based on buffer strides - * Note: Strides = Number of bytes to advance to the next element in each dimension. - * For example, if a float tensor of dimension 2x4x3 is tightly packed in a buffer of 96 bytes, then the strides would be (48,12,4) - */ - std::vector strides(bufferShape.rank()); - strides[strides.size() - 1] = bufferElementSize; - size_t stride = strides[strides.size() - 1]; - for (size_t i = bufferShape.rank() - 1; i > 0; i--) - { - stride *= bufferShape[i]; - strides[i - 1] = stride; - } - - size_t bufSize = calcSizeFromDims(bufferShape.getDimensions(), bufferShape.rank(), bufferElementSize); - - /** - * To set the buffer encoding type - */ - zdl::DlSystem::UserBufferEncodingFloat userBufferEncodingFloat; - /** - * To create user-backed storage to load input data onto it - */ - applicationBuffers.emplace(name, std::vector(bufSize / bufferElementSize)); - /** - * To create SNPE user buffer from the user-backed buffer - */ - zdl::DlSystem::IUserBufferFactory& ubFactory = zdl::SNPE::SNPEFactory::getUserBufferFactory(); - snpeUserBackedBuffers.push_back(ubFactory.createUserBuffer((void*)applicationBuffers.at(name).data(), - bufSize, - strides, - &userBufferEncodingFloat)); - /** - * To add the user-backed buffer to the inputMap, which is later on fed to the network for execution - */ - if (snpeUserBackedBuffers.back() == nullptr) - { - std::cerr << "Error while creating user buffer." << std::endl; - } - userBufferMap.add(name, snpeUserBackedBuffers.back().get()); - } - - /** @brief To set SNPERuntime - * @param runtime contains SNPERuntime value - */ - void SNPERuntime::setTargetRuntime(const runtime_t runtime) - { - switch (runtime) { - case DSP: - m_runtime = zdl::DlSystem::Runtime_t::DSP; - break; - default: - m_runtime = zdl::DlSystem::Runtime_t::CPU; - break; - } - - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(m_runtime)) { - LOG_ERROR("Selected runtime not present. Falling back to CPU.\n"); - m_runtime = zdl::DlSystem::Runtime_t::CPU; - } - } - - /** @brief To set performance profile - * @param perfprofile contains performance value - */ - void SNPERuntime::setPerformanceProfile(const performance_t perfprofile) - { - switch (perfprofile) { - case BALANCED: - m_profile = zdl::DlSystem::PerformanceProfile_t::BALANCED; - break; - case HIGH_PERFORMANCE: - m_profile = zdl::DlSystem::PerformanceProfile_t::HIGH_PERFORMANCE; - break; - case POWER_SAVER: - m_profile = zdl::DlSystem::PerformanceProfile_t::POWER_SAVER; - break; - case SUSTAINED_HIGH_PERFORMANCE: - m_profile = zdl::DlSystem::PerformanceProfile_t::SUSTAINED_HIGH_PERFORMANCE; - break; - case BURST: - m_profile = zdl::DlSystem::PerformanceProfile_t::BURST; - break; - case LOW_POWER_SAVER: - m_profile = zdl::DlSystem::PerformanceProfile_t::LOW_POWER_SAVER; - break; - case HIGH_POWER_SAVER: - m_profile = zdl::DlSystem::PerformanceProfile_t::HIGH_POWER_SAVER; - break; - case LOW_BALANCED: - m_profile = zdl::DlSystem::PerformanceProfile_t::LOW_BALANCED; - break; - case SYSTEM_SETTINGS: - m_profile = zdl::DlSystem::PerformanceProfile_t::SYSTEM_SETTINGS; - break; - default: - m_profile = zdl::DlSystem::PerformanceProfile_t::BALANCED; - break; - } - LOG_DEBUG("Choose performance: %d, Set performance: %d \n", perfprofile, (int)m_profile); - } - - /** @brief To initialize SNPERuntime - * @param dlc_path contains dlc path from the config file - * @param runtime SNPERuntime value - * @return true if success; false otherwise - */ - bool SNPERuntime::Initialize(const std::string& dlc_path, const runtime_t runtime) - { - setTargetRuntime(runtime); - setPerformanceProfile(BURST); - /** - * To read dlc from dlc_path - */ - m_container = zdl::DlContainer::IDlContainer::open(dlc_path); - /** - * To create snpeBuilder from m_container based on runtime,performance profile - */ - std::vector runtimeStrVector; - switch (runtime) - { - case CPU: - runtimeStrVector.push_back("cpu_float32"); - runtimeStrVector.push_back("dsp_fixed8_tf"); - LOG_INFO("Runtime = CPU \n"); - break; - - case DSP: - runtimeStrVector.push_back("dsp_fixed8_tf"); - runtimeStrVector.push_back("cpu_float32"); - LOG_INFO("Runtime = DSP \n"); - break; - - } - //std::vector runtimeStrVector = {"dsp_fixed8_tf","gpu_float16","cpu_float32"}; - zdl::DlSystem::RuntimeList runtimeList; - - runtimeList.clear(); - for(auto& runtimeStr : runtimeStrVector) - { - zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::RuntimeList::stringToRuntime(runtimeStr.c_str()); - if(runtime != zdl::DlSystem::Runtime_t::UNSET) - { - bool ret = runtimeList.add(runtime); - if(ret == false) - { - std::cerr <getInputTensorNames(); - if (!inputNamesOpt) throw std::runtime_error("Error obtaining input tensor names\n"); - const zdl::DlSystem::StringList& inputNames = *inputNamesOpt; - - /** - * To create SNPE user buffers for each application storage buffer - */ - for (const char* name : inputNames) - { - /** - * To get attributes of buffer by name - */ - auto bufferAttributesOpt = m_snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) - { - LOG_ERROR("Error obtaining attributes for input tensor: %s\n", name); - return false; - } - - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - std::vector tensorShape; - for (size_t j = 0; j < bufferShape.rank(); j++) - { - tensorShape.push_back(bufferShape[j]); - } - m_inputShapes.emplace(name, tensorShape); - - CreateUserBuffer(m_inputUserBufferMap, m_applicationInputBuffers, m_inputUserBuffers, bufferShape, name); - } - - /** - * To get output tensor names of the network that need to be populated - */ - const auto& outputNamesOpt = m_snpe->getOutputTensorNames(); - if (!outputNamesOpt) throw std::runtime_error("Error obtaining output tensor names\n"); - const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; - - /** - * To create SNPE user buffers for each application storage buffer - */ - for (const char* name : outputNames) - { - // get attributes of buffer by name - auto bufferAttributesOpt = m_snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) - { - LOG_ERROR("Error obtaining attributes for input tensor: %s\n", name); - return false; - } - - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - std::vector tensorShape; - for (size_t j = 0; j < bufferShape.rank(); j++) { - tensorShape.push_back(bufferShape[j]); - } - m_outputShapes.emplace(name, tensorShape); - - CreateUserBuffer(m_outputUserBufferMap, m_applicationOutputBuffers, m_outputUserBuffers, bufferShape, name); - } - - m_isInit = true; - - return true; - } - - /** @brief To deinitialize SNPERuntime - */ - bool SNPERuntime::Deinitialize() - { - if (nullptr != m_snpe) - { - m_snpe.reset(nullptr); - } - - for (auto [k, v] : m_applicationInputBuffers) ClearVector(v); - for (auto [k, v] : m_applicationOutputBuffers) ClearVector(v); - return true; - } - - /** @brief To store output layers for each model - * @param outputlayers contains output layers defined in the config file - */ - bool SNPERuntime::SetOutputLayers(std::vector& outputLayers) - { - for (size_t i = 0; i < outputLayers.size(); i ++) - { - m_outputLayers.append(outputLayers[i].c_str()); - } - - return true; - } - - /** @brief To get input shape for each model - * @param name contains name of input layer - * @return shape of input layer if success; empty otherwise - */ - std::vector SNPERuntime::GetInputShape(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of input - */ - if (IsInit()) { - if (m_inputShapes.find(name) != m_inputShapes.end()) - { - return m_inputShapes.at(name); - } - LOG_ERROR("Can't find any input layer named %s\n", name.c_str()); - return {}; - } else { - LOG_ERROR("GetInputShape Failed: SNPE Init Failed !!!\n"); - return {}; - } - } - - /** @brief To get output shape for each model - * @param name contains name of output layers - * @return shape of output layer if success; empty otherwise - */ - std::vector SNPERuntime::GetOutputShape(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of output - */ - if (IsInit()) - { - if (m_outputShapes.find(name) != m_outputShapes.end()) - { - return m_outputShapes.at(name); - } - LOG_ERROR("Can't find any ouput layer named %s\n", name.c_str()); - return {}; - } - else - { - LOG_ERROR("GetOutputShape Failed: SNPE Init Failed !!!\n"); - return {}; - } - } - - - /** @brief To get input tensor for each model - * @param name contains name of input layer - * @return shape of input tensor if success; NULL otherwise - */ - float* SNPERuntime::GetInputTensor(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of input - */ - if (IsInit()) - { - if (m_applicationInputBuffers.find(name) != m_applicationInputBuffers.end()) - { - return m_applicationInputBuffers.at(name).data(); - } - LOG_ERROR("Can't find any input tensor named '%s' \n", name.c_str()); - return nullptr; - } - else - { - LOG_ERROR("GetInputTensor Failed: SNPE Init Failed !!!\n"); - return nullptr; - } - } - - /** @brief To get output tensor for each model - * @param name contains name of output layer - * @return shape of output tensor if success; NULL otherwise - */ - - float* SNPERuntime::GetOutputTensor(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of output - */ - if (IsInit()) - { - if (m_applicationOutputBuffers.find(name) != m_applicationOutputBuffers.end()) - { - return m_applicationOutputBuffers.at(name).data(); - } - LOG_ERROR("Can't find any output tensor named '%s' \n", name.c_str()); - return nullptr; - } - else - { - LOG_ERROR("GetOutputTensor Failed: SNPE Init Failed !!!"); - return nullptr; - } - } - - /** @brief To execute inference on target - * @return QS_SUCCESS if success; QS_FAIL otherwise - */ - bool SNPERuntime::execute() - { - if (!m_snpe->execute(m_inputUserBufferMap, m_outputUserBufferMap)) - { - LOG_ERROR("SNPE Task execute failed: %s\n", zdl::DlSystem::getLastErrorString()); - return false; - } - - return true; - } - -} // namespace snperuntime \ No newline at end of file diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SegmentationSnpe.cpp b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SegmentationSnpe.cpp deleted file mode 100644 index f51117bd..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SegmentationSnpe.cpp +++ /dev/null @@ -1,481 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include -#include "Configuration.h" -#include "SegmentationSnpe.h" - -namespace segmentationsnpe -{ - - /** @brief Constructor - */ - SEGMENTATIONSnpe::SEGMENTATIONSnpe() : m_isInit(false),m_snperuntime(nullptr) - { - - } - - /** @brief Destructor - */ - SEGMENTATIONSnpe::~SEGMENTATIONSnpe() - { - DeInitialize(); - } - - /** @brief To read model config and set output layers - * @param config model config parameters - * @return true if success;false otherwise - */ - bool SEGMENTATIONSnpe::Initialize(const ObjectDetectionSnpeConfig& config) - { - m_snperuntime = std::move(std::unique_ptr(new snperuntime::SNPERuntime())); - - m_inputLayers = config.inputLayers; - m_outputLayers = config.outputLayers; - m_outputTensors = config.outputTensors; - m_nmsThresh = config.nmsThresh; - m_confThresh = config.confThresh; - - /** - * To set output layer from model config - */ - m_snperuntime->SetOutputLayers(m_outputLayers); - /** - * To initialize snperuntime - */ - if (!m_snperuntime->Initialize(config.model_path, config.runtime)) { - LOG_ERROR("Failed to Initialize snpe instance.\n"); - return false; - } - - m_isInit = true; - return true; - } - - /** @brief To deallocate buffers - */ - bool SEGMENTATIONSnpe::DeInitialize() - { - if (m_isInit) { - m_snperuntime->Deinitialize(); - m_snperuntime.reset(nullptr); - } - - m_isInit = false; - return true; - } - - bool SEGMENTATIONSnpe::SetScoreThresh(const float& conf_thresh, const float& nms_thresh = 0.5) - { - this->m_nmsThresh = nms_thresh; - this->m_confThresh = conf_thresh; - return true; - } - - bool SEGMENTATIONSnpe::IsInitialized() const - { - return m_isInit; - } - - /** @brief To preprocess input image - * @param input_image Input image for inference - * @return true if succuess; false otherwise - */ - bool SEGMENTATIONSnpe::PreProcessInput(const cv::Mat& input_image,string model_name) - { - if (input_image.empty()) { - LOG_ERROR("Invalid image!\n"); - return false; - } - - auto inputShape = m_snperuntime->GetInputShape(m_inputLayers[0]); - int model_h = inputShape[1]; - int model_w = inputShape[2]; - - if (m_snperuntime->GetInputTensor(m_inputLayers[0]) == nullptr) - { - LOG_ERROR("Empty input tensor\n"); - return false; - } - - cv::Mat image = cv::Mat(model_h,model_w, CV_32FC3, Scalar(0.)); - cv::resize(input_image,image,cv::Size(model_h,model_w)); - cv::Mat input(model_h, model_w, CV_32FC3, m_snperuntime->GetInputTensor(m_inputLayers[0])); - - if(model_name.compare("DeepLabv3Plus-resnet++") == 0 || model_name.compare("DeepLabv3-resnet101") == 0 || model_name.compare("DeepLabv3-resnet50") == 0 || model_name.compare("FCN_resnet101") == 0 || model_name.compare("FCN_resnet50") == 0) - { - cv::resize(image,image,cv::Size(model_w,model_h)); - image.convertTo(input,CV_32FC3,1.0); - const float mean_vals[3] = {0.485, 0.456, 0.406}; - const float norm_vals[3] = {0.229, 0.224, 0.225}; - for (int i = 0; i < input.rows; i++) - { - float* pdata = (float*)(input.data + i * input.step); - for (int j = 0; j < input.cols; j++) - { - float x = pdata[2], y=pdata[1], z = pdata[0]; - pdata[0] = (x / 255.0 - mean_vals[0]) / norm_vals[0]; - pdata[1] = (y / 255.0 - mean_vals[1]) / norm_vals[1]; - pdata[2] = (z / 255.0 - mean_vals[2]) / norm_vals[2]; - pdata += 3; - } - } - } - return true; - } - - /** @brief To preprocess,execute and postprocess - * @param input_image Input image for inference - * @param output_image Inference output image - * @param model_name To identify model for specific post-processing - * @return true if success; false otherwise - */ - bool SEGMENTATIONSnpe::Detect(cv::Mat image,cv::Mat& output_image,string model_name) - { - /** - * Preprocessing image - */ - if(PreProcessInput(image, model_name) != true) - { - LOG_ERROR("PreProcess failed\n"); - return false; - } - /** - * Inferencing model on target - */ - if (!m_snperuntime->execute()) { - LOG_ERROR("SNPERuntime execute failed."); - return false; - } - /** - * Postprocessing - */ - if(PostProcess(image,output_image,model_name) != true) - { - LOG_ERROR("PostProcess failed\n"); - return false; - } - return true; - } - - /** @brief postprocess to overlay segmentation - * @param output_image Overlayed image - * @param model_name To identify model for specific post-processing - */ - bool SEGMENTATIONSnpe::PostProcess( cv::Mat image,cv::Mat& output_image,string model_name) - { - auto outputShape = m_snperuntime->GetOutputShape(m_outputTensors[0]); - float *predOutput = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - - if(predOutput == nullptr) - { - return false; - } - int height = outputShape[1]; - int width = outputShape[2]; - int channels = outputShape[3]; - - cv::Mat temp = cv::Mat(height,width, CV_8UC3); - vector app_vect; - - float ***app = new float**[height]; - for (int i = 0; i < height; ++i) - { - app[i] = new float*[width]; - for (int j = 0; j < width; ++j) - app[i][j] = new float[channels]; - } - - for(int i = 0;i app_t_vec; - - for(int i = 0;i < channels;i++) - { - for (int j = 0; j < width; j++) - { - for (int k = 0; k < height; k++) - { - float x = app[j][k][i]; - app_t_vec.push_back(x); - } - } - } - - float ***app_t=NULL; - - app_t = new float**[channels]; - for (int i = 0; i < channels; ++i) - { - app_t[i] = new float*[width]; - for (int j = 0; j < width; ++j) - app_t[i][j] = new float[height]; - } - - for(int i =0;i> colors_res = { - { 0, 0, 0},{128, 0, 0},{ 0, 128, 0},{128, 128, 0},{ 0, 0, 128}, - {128, 0, 128},{ 0, 128, 128},{128, 128, 128},{ 64, 0, 0},{192, 0, 0}, - { 64, 128, 0},{192, 128, 0},{ 64, 0, 128},{192, 0, 128},{ 64, 128, 128}, - {192, 128, 128},{ 0, 64, 0},{128, 64, 0},{ 0, 192, 0},{128, 192, 0}, - { 0, 64, 128},{128, 64, 128},{ 0, 192, 128},{128, 192, 128},{ 64, 64, 0}, - {192, 64, 0},{ 64, 192, 0},{192, 192, 0},{ 64, 64, 128},{192, 64, 128}, - { 64, 192, 128},{192, 192, 128},{ 0, 0, 64},{128, 0, 64},{ 0, 128, 64}, - {128, 128, 64},{ 0, 0, 192},{128, 0, 192},{ 0, 128, 192},{128, 128, 192}, - { 64, 0, 64},{192, 0, 64},{ 64, 128, 64},{192, 128, 64},{ 64, 0, 192}, - {192, 0, 192},{ 64, 128, 192},{192, 128, 192},{ 0, 64, 64},{128, 64, 64}, - { 0, 192, 64},{128, 192, 64},{ 0, 64, 192},{128, 64, 192},{ 0, 192, 192}, - {128, 192, 192},{ 64, 64, 64},{192, 64, 64},{ 64, 192, 64},{192, 192, 64}, - { 64, 64, 192},{192, 64, 192},{ 64, 192, 192},{192, 192, 192},{ 32, 0, 0}, - {160, 0, 0},{ 32, 128, 0},{160, 128, 0},{ 32, 0, 128},{160, 0, 128}, - { 32, 128, 128},{160, 128, 128},{ 96, 0, 0},{224, 0, 0},{ 96, 128, 0}, - {224, 128, 0},{ 96, 0, 128},{224, 0, 128},{ 96, 128, 128},{224, 128, 128}, - { 32, 64, 0},{160, 64, 0},{ 32, 192, 0},{160, 192, 0},{ 32, 64, 128}, - {160, 64, 128},{ 32, 192, 128},{160, 192, 128},{ 96, 64, 0},{224, 64, 0}, - { 96, 192, 0},{224, 192, 0},{ 96, 64, 128},{224, 64, 128},{ 96, 192, 128}, - {224, 192, 128},{ 32, 0, 64},{160, 0, 64},{ 32, 128, 64},{160, 128, 64}, - { 32, 0, 192},{160, 0, 192},{ 32, 128, 192},{160, 128, 192},{ 96, 0, 64}, - {224, 0, 64},{ 96, 128, 64},{224, 128, 64},{ 96, 0, 192},{224, 0, 192}, - { 96, 128, 192},{224, 128, 192},{ 32, 64, 64},{160, 64, 64},{ 32, 192, 64}, - {160, 192, 64},{ 32, 64, 192},{160, 64, 192},{ 32, 192, 192},{160, 192, 192}, - { 96, 64, 64},{224, 64, 64},{ 96, 192, 64},{224, 192, 64},{ 96, 64, 192}, - {224, 64, 192},{ 96, 192, 192},{224, 192, 192},{ 0, 32, 0},{128, 32, 0}, - { 0, 160, 0},{128, 160, 0},{ 0, 32, 128},{128, 32, 128},{ 0, 160, 128}, - {128, 160, 128},{ 64, 32, 0},{192, 32, 0},{ 64, 160, 0},{192, 160, 0}, - { 64, 32, 128},{192, 32, 128},{ 64, 160, 128},{192, 160, 128},{ 0, 96, 0}, - {128, 96, 0},{ 0, 224, 0},{128, 224, 0},{ 0, 96, 128},{128, 96, 128}, - { 0, 224, 128},{128, 224, 128},{ 64, 96, 0},{192, 96, 0},{ 64, 224, 0}, - {192, 224, 0},{ 64, 96, 128},{192, 96, 128},{ 64, 224, 128},{192, 224, 128}, - { 0, 32, 64},{128, 32, 64},{ 0, 160, 64},{128, 160, 64},{ 0, 32, 192}, - {128, 32, 192},{ 0, 160, 192},{128, 160, 192},{ 64, 32, 64},{192, 32, 64}, - { 64, 160, 64},{192, 160, 64},{ 64, 32, 192},{192, 32, 192},{ 64, 160, 192}, - {192, 160, 192},{ 0, 96, 64},{128, 96, 64},{ 0, 224, 64},{128, 224, 64}, - { 0, 96, 192},{128, 96, 192},{ 0, 224, 192},{128, 224, 192},{ 64, 96, 64}, - {192, 96, 64},{ 64, 224, 64},{192, 224, 64},{ 64, 96, 192},{192, 96, 192}, - { 64, 224, 192},{192, 224, 192},{ 32, 32, 0},{160, 32, 0},{ 32, 160, 0}, - {160, 160, 0},{ 32, 32, 128},{160, 32, 128},{ 32, 160, 128},{160, 160, 128}, - { 96, 32, 0},{224, 32, 0},{ 96, 160, 0},{224, 160, 0},{ 96, 32, 128}, - {224, 32, 128},{ 96, 160, 128},{224, 160, 128},{ 32, 96, 0},{160, 96, 0}, - { 32, 224, 0},{160, 224, 0},{ 32, 96, 128},{160, 96, 128},{ 32, 224, 128}, - {160, 224, 128},{ 96, 96, 0},{224, 96, 0},{ 96, 224, 0},{224, 224, 0}, - { 96, 96, 128},{224, 96, 128},{ 96, 224, 128},{224, 224, 128},{ 32, 32, 64}, - {160, 32, 64},{ 32, 160, 64},{160, 160, 64},{ 32, 32, 192},{160, 32, 192}, - { 32, 160, 192},{160, 160, 192},{ 96, 32, 64},{224, 32, 64},{ 96, 160, 64}, - {224, 160, 64},{ 96, 32, 192},{224, 32, 192},{ 96, 160, 192},{224, 160, 192}, - { 32, 96, 64},{160, 96, 64},{ 32, 224, 64},{160, 224, 64},{ 32, 96, 192}, - {160, 96, 192},{ 32, 224, 192},{160, 224, 192},{ 96, 96, 64},{224, 96, 64}, - { 96, 224, 64},{224, 224, 64},{ 96, 96, 192},{224, 96, 192},{ 96, 224, 192}, - {224, 224, 192} - }; - - int **app_t_max=NULL; - - app_t_max = new int*[width]; - for (int j = 0; j < width; ++j) - { - app_t_max[j] = new int[height]; - } - - vector max_values; - for(int i=0;i max) - { - max = temp; - app_t_max[i][j] = k; - } - } - max_values.push_back(max); - } - } - - vector max_vec; - - for(int i = 0; i< height;i++) - { - for(int j=0;j> color; - color = colors_res; - - for (int i = 0; i < temp.rows; i++) - { - char* pdata = (char*)(temp.data + i * temp.step); - for (int j = 0; j < temp.cols; j++) - { - int id = app_t_max[i][j]; - pdata[0] = color[id][2]; - pdata[1] = color[id][1]; - pdata[2] = color[id][0]; - pdata += 3; - } - } - - for (int j = 0; j < width; ++j) - { - delete [] app_t_max[j]; - } - delete [] app_t_max; - app_t_max = NULL; - - } - else if(model_name.compare("DeepLabv3-resnet101") == 0 || model_name.compare("DeepLabv3-resnet50") == 0 || model_name.compare("FCN_resnet101") == 0 || model_name.compare("FCN_resnet50") == 0) - { - - vector> label_map = { - {0, 0, 0}, // background - {128, 0, 0}, // aeroplane - {0, 128, 0}, // bicycle - {128, 128, 0}, // bird - {0, 0, 128}, // boat - {128, 0, 128}, // bottle - {0, 128, 128}, // bus - {128, 128, 128}, // car - {64, 0, 0}, // cat - {192, 0, 0}, // chair - {64, 128, 0}, // cow - {192, 128, 0}, // dining table - {64, 0, 128}, // dog - {192, 0, 128}, // horse - {64, 128, 128}, // motorbike - {192, 128, 128}, // person - {0, 64, 0}, // potted plant - {128, 64, 0}, // sheep - {0, 192, 0}, // sofa - {128, 192, 0}, // train - {0, 64, 128} // tv/monitor - }; - - int **app_t_max=NULL; - - app_t_max = new int*[width]; - for (int j = 0; j < width; j++) - { - app_t_max[j] = new int[height]; - } - - vector max_values; - for(int i=0; i max) - { - max = temp; - app_t_max[i][j] = k; - } - } - max_values.push_back(max); - - } - } - - vector max_vec; - - for(int i = 0; i< height;i++) - { - for(int j=0;j -#include -#include -#include "Configuration.h" -#include "SuperresolutionSnpe.h" - -namespace superressnpe { - - /** @brief Constructor - */ - SUPERRESSnpe::SUPERRESSnpe() : m_isInit(false),m_snperuntime(nullptr) - { - - } - - /** @brief Destructor - */ - SUPERRESSnpe::~SUPERRESSnpe() { - DeInitialize(); - } - - /** @brief To read model config and set output layers - * @param config model config parameters - * @return true if success;false otherwise - */ - bool SUPERRESSnpe::Initialize(const ObjectDetectionSnpeConfig& config) - { - m_snperuntime = std::move(std::unique_ptr(new snperuntime::SNPERuntime())); - - m_inputLayers = config.inputLayers; - m_outputLayers = config.outputLayers; - m_outputTensors = config.outputTensors; - - /** - * To set output layer from model config - */ - m_snperuntime->SetOutputLayers(m_outputLayers); - /** - * To initialize snperuntime - */ - if (!m_snperuntime->Initialize(config.model_path, config.runtime)) { - LOG_ERROR("Failed to Initialize snpe instance.\n"); - return false; - } - m_isInit = true; - return true; - } - - /** @brief To deallocate buffers - */ - bool SUPERRESSnpe::DeInitialize() - { - if (m_isInit) { - m_snperuntime->Deinitialize(); - m_snperuntime.reset(nullptr); - } - m_isInit = false; - return true; - } - - bool SUPERRESSnpe::IsInitialized() const - { - return m_isInit; - } - - /** @brief To preprocess input image - * @param input_image Input image for inference - * @return true if succuess; false otherwise - */ - bool SUPERRESSnpe::PreProcessInput(const cv::Mat& input_image,string model_name) - { - if (input_image.empty()) - { - LOG_ERROR("Invalid image!\n"); - return false; - } - - auto inputShape = m_snperuntime->GetInputShape(m_inputLayers[0]); - size_t model_h = inputShape[1]; - size_t model_w = inputShape[2]; - - if (m_snperuntime->GetInputTensor(m_inputLayers[0]) == nullptr) - { - LOG_ERROR("Empty input tensor\n"); - return false; - } - - cv::Mat image; - cv::resize(input_image,image,cv::Size(model_h,model_w),cv::INTER_CUBIC); - cv::cvtColor(image, image, cv::COLOR_BGR2RGB); - cv::Mat input(model_h, model_w, CV_32FC3, m_snperuntime->GetInputTensor(m_inputLayers[0])); - if(model_name.compare("ESRGAN") == 0) - { - image.convertTo(input, CV_32FC3, 1.0); - } - else - { - image.convertTo(input, CV_32FC3, 1.0/255.0); - } - return true; - } - - /** @brief To preprocess,execute and postprocess - * @param input_image Input image for inference - * @param output_image Inference output image - * @param model_name To identify model for specific post-processing - * @return true if success; false otherwise - */ - bool SUPERRESSnpe::Detect(cv::Mat input_image,cv::Mat& output_image, string model_name) - { - /** - * Preprocessing image - */ - PreProcessInput(input_image, model_name); - /** - * Inferencing model on target - */ - if (!m_snperuntime->execute()) { - LOG_ERROR("SNPERuntime execute failed."); - return false; - } - /** - * Postprocessing - */ - PostProcess(output_image, model_name); - return true; - } - - /** @brief Superres postprocess - * @param output_image upscaled image - * @param model_name To identify model for specific post-processing - */ - bool SUPERRESSnpe::PostProcess(cv::Mat& output_image,string model_name) - { - auto outputShape = m_snperuntime->GetOutputShape(m_outputTensors[0]); - float *output = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - - int height = outputShape[1]; - int width = outputShape[2]; - - output_image = cv::Mat(cv::Size(width,height), CV_32FC3, output); - if(model_name.compare("ESRGAN") == 0) - { - output_image.convertTo(output_image, CV_8UC3, 1.0); - } - else - { - output_image.convertTo(output_image, CV_8UC3, 255.0); - } - cv::cvtColor(output_image, output_image, cv::COLOR_RGB2BGR); - return true; - } - -} // namespace superressnpe diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/main.cpp b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/main.cpp deleted file mode 100644 index b18c45c9..00000000 --- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/main.cpp +++ /dev/null @@ -1,180 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "ModelInference.h" -#include "Configuration.h" -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace std; - -/** @brief To intialize and configure the runtime based on the solution - * @param sol_conf contains information about the solution -*/ -void Inference_Image(void *sol_conf, string inputimage, string outputimage) -{ - LOG_DEBUG("InferenceThread \n"); - - SolutionConfiguration *solution_config = (SolutionConfiguration *)sol_conf; - /** - * TO initialize layers and buffers based on model type - */ - shared_ptr shInference; - shInference = std::make_shared(solution_config->model_config->model_type); - shInference->Initialization(*solution_config->model_config.get()); - - /** - * start inferencing on the image buffer - */ - auto start1 = chrono::steady_clock::now(); - cv::Mat input = cv::imread(inputimage, cv::IMREAD_COLOR); - if(input.empty()) - { - LOG_ERROR("Invalid image!\n"); - return; - } - LOG_ERROR("model name = %s\n",solution_config->model_name.c_str()); - cv::Mat output_image; - if(shInference->Inference(input,output_image,solution_config->model_name) == true) - { - auto end1 = chrono::steady_clock::now(); - auto costTime1 = chrono::duration_cast(end1 - start1).count(); - LOG_INFO("Elapsed inference time in milliseconds: %ld ms\n",costTime1); - cv::imwrite(outputimage,output_image); - } - else - { - LOG_ERROR("Model Inference failed\n"); - } - shInference->UnInitialization(); -} - -/** @brief Execution starts from here - * @param argc for total argument count - * @param argv arguments to be passed -*/ - -int main(int argc, char **argv) -{ - /** - * To store config file name passed in argument - */ - const char* inputFile=NULL; - string inputimage,outputimage; - int opt = 0; - /** - * Check if 'h' or 'c' passed in argument - */ - while ((opt = getopt(argc, argv, ":hc:i:o:")) != EOF) - { - switch (opt) - { - case 'h': std::cout - << "\nDESCRIPTION:\n" - << "------------\n" - << "Example application demonstrating how to run the use case\n" - << "using the SNPE C++ API.\n" - << "REQUIRED ARGUMENTS:\n" - << "-------------------\n" - << " -c Path to the config json file.\n" - << "Example: ai-solutions -c data/config.json -i image_path -o Output_path\n"; - break; - case 'c': - inputFile = optarg; - LOG_INFO("Path to config file = %s \n", inputFile); - break; - case 'i': - inputimage = optarg; - LOG_INFO(" input image = %s \n",inputimage.c_str()); - break; - case 'o': - outputimage = optarg; - LOG_INFO(" output image = %s \n",outputimage.c_str()); - break; - - default: - LOG_INFO("Invalid parameter specified. Please run sample with the -h flag to see required arguments\n"); - exit(0); - }; - } - /** - * To parse input,model and solution config from inputFile - */ - Configuration::getInstance().LoadConfiguration(inputFile); - - /** - * To access enabled soultion model - */ - vector selected_model; - /** - * To access enabled solution configuration - */ - vector solutions_config; - /** - * To intialize each enabled solution - */ - - for (auto i : Configuration::getInstance().solutionsconfig) { - /** - * To access solution configuration - */ - std::shared_ptr config = i.second; - /** - * To check if solution is enabled - */ - if (config->Enable == true) { - /** - * To access the input configuration - */ - config->input_config = Configuration::getInstance().inputconfigs[config->input_config_name]; - if (config->input_config == NULL) { - LOG_ERROR("NULL Input configuration for selected solution name = %s \n", config->solution_name.c_str()); - exit(1); - } - config->input_config->StreamNumber = i.first; - /** - * To access the model configuration - */ - config->model_config = Configuration::getInstance().modelsconfig[config->model_name]; - if (config->model_config == NULL) { - LOG_ERROR("NULL Model configuration for selected solution name = %s \n", config->solution_name.c_str()); - exit(1); - } - /** - * To store the enabled solution configuration - */ - solutions_config.emplace_back(*config); - /** - * Append the selected models - */ - selected_model.push_back(config->model_name); - } - } - /** - * Check if any solution is enabled - */ - if (selected_model.size() == 0) { - LOG_ERROR("Solution not enabled, Enable the desired solution in config.json file\n"); - exit(1); - } - if(inputimage.empty() || outputimage.empty()) - { - LOG_ERROR("Example: ai-solutions -c data/config.json -i image_path -o Output_path\n"); - return 0; - } - Inference_Image((void *)(&solutions_config[0]), inputimage, outputimage ); - - return 0; -} diff --git a/ai-solutions/QRB5165-embedded-linux/README.md b/ai-solutions/QRB5165-embedded-linux/README.md deleted file mode 100644 index 4414675a..00000000 --- a/ai-solutions/QRB5165-embedded-linux/README.md +++ /dev/null @@ -1,134 +0,0 @@ -## Table of Contents - -- [Table of Contents](#table-of-contents) -- [LE Build setup](#le-build-setup) -- [Generating ai-solutions binary](#generating-ai-solutions-binary) -- [Running ai-solutions application](#running-ai-solutions-application) - * [Details on Input arguments:](#details-on-input-arguments) - + [Sample config.json](#sample-configjson) - -## LE Build setup - -1. Follow "00023.4 Release Note for QRB5165.LE.1.0" to Setup "qti-distro-fullstack-debug" LE.1.0 build server for QRB5165 -2. Make sure "bitbake qti-robotics-image" is successful -3. Verify the "qti-distro-fullstack-debug" build by flashing on target using "QFIL" - -## Generating ai-solutions binary -1. Copy snpe-2.x folder to "/poky/meta-qti-ml-prop/recipes/snpe-sdk/files/snpe/". - ``` - cp -r /* /poky/meta-qti-ml-prop/recipes/snpe-sdk/files/snpe/ - ``` -2. Copy "meta-qti-ai-solutions" into "/poky/" folder - ``` - cp -r meta-qti-ai-solutions /poky/ - ``` -3. Copy SNPE,DiagLog,DlContainer,DlSystem and Wrapper.hpp - ``` - cp -r /include/SNPE/Dl* /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ - cp -r /include/SNPE/DiagLog/ /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ - cp -r /include/SNPE/Wrapper.hpp /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ - cp -r /include/SNPE/SNPE/ /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ - ``` -4. Update "snpe.bb" in "poky/meta-qti-ml-prop/recipes/snpe-sdk" folder - 1. Make sure platform "aarch64-oe-linux-gcc9.3" is selected - 2. Update DSP lib path - ``` - -- install -m 0755 ${S}/lib/dsp/* ${D}/${libdir}/rfsa/adsp - ++ install -m 0755 ${S}/lib/hexagon-v66/unsigned/lib* ${D}/${libdir}/rfsa/adsp - ``` -5. Run the following commands - ```bash - cd /poky - export MACHINE=qrb5165-rb5 DISTRO=qti-distro-fullstack-debug - source qti-conf/set_bb_env.sh - export PREBUILT_SRC_DIR="/prebuilt_HY11" - bitbake qti-robotics-image - ``` -6. Flash the latest build on target. (Note: Check if "ai-solutions" binary is generated in the "build-qti-distro-fullstack-debug/tmp-glibc/work/qrb5165_rb5-oe-linux/qti-robotics-image/1.0-r0/rootfs/usr/bin/" path) - -## Running ai-solutions application -1. Execute the following commands to remount the target - - ```bash - adb root - adb disable-verity - adb reboot - adb root - adb shell "mount -o remount,rw /" - ``` -2. Push "meta-qti-ai-solutions/recipes/ai-solutions/files/app/" and "SNPE-2.12" onto the device - ```bash - adb push - ``` -3. Execute the following commands to setup snpe on target - ```bash - adb shell - cd - cp -r lib/aarch64-oe-linux-gcc9.3/* /usr/lib/ - cp bin/aarch64-oe-linux-gcc9.3/snpe-net-run /usr/bin/ - cp -r lib/hexagon-v66/unsigned/lib* /usr/lib/rfsa/adsp/ - chmod +x /usr/bin/snpe-net-run - snpe-net-run --version - ``` - Expected output: SNPE v2.12.0.230626174329_59328 -4. Run ai-solutions application - ``` - adb shell - cd - - ```bash - export XDG_RUNTIME_DIR=/run/user/root - ``` - #### To run inference on input image - NOTE: Make sure "input-config-name":"image" in data/config.json - ```bash - ./out/ai-solutions -c ../data/config.json -i Sample1.jpg -o output.jpg - ``` - #### To run inference on camera stream - NOTE: Make sure "input-config-name":"camera" in data/config.json - ```bash - ./out/ai-solutions -c ../data/config.json - ``` - - ### Details on Input arguments: - - #### Sample config.json - model-config: - ```json - "model-configs":[ - - "model-name":"QSrnet-medium", -> model name which is used while enabling solution - "model-type":"superresolution", -> To specify the use case such superresolution or detection or segmentation etc.. - "model-path":"models/quicksrnet_medium_quantized.dlc", -> Path at which model is located on target - "runtime":"DSP", -> Select Runtime either CPU or DSP - "input-layers":[ -> Input layer of the model - "t.1" - ], - "output-layers":[ - "depth_to_space#1" -> Output layer of the model - ], - "output-tensors":[ - "65" -> Output node for post processing - ] - ] - ``` - - solution-config: - ```json - "solution-configs":[ - { - "solution-name":"AI-Solutions", -> To identify usecase - "model-name":"mobilenet-ssd", -> Specify model name to be executed - "input-config-name":"camera", -> To read input from camera stream - "Enable":1, -> Enable specific solution - "output-type":"wayland" -> To display output on monitor - }, - { - "solution-name":"AI-Solutions", - "model-name":"SRGAN", - "input-config-name":"image", -> To read input from image - "Enable":0, - "output-type":"wayland" - } - ] - ``` \ No newline at end of file diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/conf/layer.conf b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/conf/layer.conf deleted file mode 100644 index a182c92a..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/conf/layer.conf +++ /dev/null @@ -1,6 +0,0 @@ -BBFILES += "${LAYERDIR}/recipes/*/*.bb ${LAYERDIR}/recipes/*/*.bbappend ${LAYERDIR}/recipes-*/*/*.bb ${LAYERDIR}/recipes-*/*/*.bbappend" -BBPATH .= ":${LAYERDIR}" -BBFILE_COLLECTIONS += "ai-solutions" -BBFILE_PRIORITY_ai-solutions = "17" -BBFILE_PATTERN_ai-solutions := "^${LAYERDIR}/" -IMAGE_INSTALL_append = " ai-solutions " diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/ai-solutions.bb b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/ai-solutions.bb deleted file mode 100644 index e4a9099f..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/ai-solutions.bb +++ /dev/null @@ -1,26 +0,0 @@ -inherit cmake pkgconfig - -HOMEPAGE = "http://support.cdmatech.com" -LICENSE = "Qualcomm-Technologies-Inc.-Proprietary" -LIC_FILES_CHKSUM = "file://${COREBASE}/meta-qti-bsp-prop/files/qcom-licenses/\ -${LICENSE};md5=92b1d0ceea78229551577d4284669bb8" - -SUMMARY = "AI-Solutions on QRB5165" -DESCRIPTION = "AI-Solutions" - -LICENSE = "Qualcomm-Technologies-Inc.-Proprietary" - -SRC_URI = "file://app" -S = "${WORKDIR}/app" - -DEPENDS += " jsoncpp json-glib gflags gstreamer1.0 gstreamer1.0-plugins-base opencv snpe wayland gbm wayland-protocols wayland-native libxkbcommon gdk-pixbuf" -IMAGE_INSTALL_append = " ai-solutions " - -do_install(){ - install -d ${D}/${bindir} - install -m 0777 ${WORKDIR}/build/out/ai-solutions ${D}/${bindir} -} - -INSANE_SKIP_${PN} += "arch" - -FILES_${PN} += "${bindir}/*" diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMake/FindGStreamer.cmake b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMake/FindGStreamer.cmake deleted file mode 100644 index 3e4148c0..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMake/FindGStreamer.cmake +++ /dev/null @@ -1,5 +0,0 @@ -find_package(PkgConfig) -pkg_search_module(GLIB REQUIRED glib-2.0) -pkg_check_modules(GSTREAMER REQUIRED gstreamer-1.0) -pkg_check_modules(GST_APP REQUIRED gstreamer-app-1.0) -pkg_check_modules(GST_VIDEO REQUIRED gstreamer-video-1.0) \ No newline at end of file diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMakeLists.txt b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMakeLists.txt deleted file mode 100644 index 62db1972..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMakeLists.txt +++ /dev/null @@ -1,25 +0,0 @@ -# CMake lowest version requirement -cmake_minimum_required(VERSION 3.5.1) - -# project information -project(AI-SOLUTIONS) - -include(FindPkgConfig) -pkg_check_modules(JSONCPP REQUIRED jsoncpp) -pkg_check_modules(JSON REQUIRED json-glib-1.0) -pkg_check_modules(GFLAGS REQUIRED gflags) - -set(PROJECT_ROOT ${CMAKE_CURRENT_LIST_DIR}) -set(CMAKE_MODULE_PATH ${PROJECT_ROOT}/CMake) -set(CMAKE_CXX_STANDARD 17) - -find_package(GStreamer REQUIRED) -find_package(OpenCV REQUIRED ) - -add_subdirectory("./src") - -link_directories( - ${JSONCPP_LIBRARY_DIRS} - ${JSON_LIBRARY_DIRS} - ${GFLAGS_LIBRARY_DIRS} -) diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/data/config.json b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/data/config.json deleted file mode 100644 index ba2c94a0..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/data/config.json +++ /dev/null @@ -1,339 +0,0 @@ -{ - "input-configs":[ - { - "input-config-name":"camera", - "stream-type":"camera", - "stream-width":1280, - "stream-height":720, - "SkipFrame":1, - "fps-n":30, - "fps-d":1 - }, - { - "input-config-name":"image", - "stream-type":"image" - } - ], - "model-configs":[ - { - "model-name":"QSrnet-small", - "model-type":"superresolution", - "model-path":"models/quicksrnet_small_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "t.1" - ], - "output-layers":[ - "depth_to_space#1" - ], - "output-tensors":[ - "41" - ], - "global-threshold":0.2 - }, - { - "model-name":"QSrnet-medium", - "model-type":"superresolution", - "model-path":"models/quicksrnet_medium_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "t.1" - ], - "output-layers":[ - "depth_to_space#1" - ], - "output-tensors":[ - "65" - ], - "global-threshold":0.2 - }, - { - "model-name":"QSrnet-large", - "model-type":"superresolution", - "model-path":"models/quicksrnet_large_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "t.1" - ], - "output-layers":[ - "depth_to_space#1" - ], - "output-tensors":[ - "124" - ], - "global-threshold":0.2 - }, - { - "model-name":"XLSR", - "model-type":"superresolution", - "model-path":"models/xlsr_quantized.dlc", - "runtime": "DSP", - "input-layers":[ - "t.1" - ], - "output-layers":[ - "clipped_relu" - ], - "output-tensors":[ - "100" - ], - "global-threshold":0.2 - }, - { - "model-name":"SESR", - "model-type":"superresolution", - "model-path":"models/sesr_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "lr" - ], - "output-layers":[ - "DepthToSpace_52" - ], - "output-tensors":[ - "sr" - ], - "global-threshold":0.2 - }, - { - "model-name":"ESRGAN", - "model-type":"superresolution", - "model-path":"models/esrgan_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "keras_layer_input" - ], - "output-layers":[ - "convolution_168" - ], - "output-tensors":[ - "Identity" - ], - "global-threshold":0.2 - }, - - { - "model-name":"ssd-mobilenet-v2", - "model-type":"detection", - "model-path":"models/ssd_mobilenetV2_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "Softmax_350", - "Concat_397" - ], - "output-tensors":[ - "935", - "986" - ], - "global-threshold":0.2 - }, - { - "model-name":"yolo-nas", - "model-type":"detection", - "model-path":"models/yolo_nas_s_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/heads/Sigmoid", - "/heads/Mul" - ], - "output-tensors":[ - "877", - "885" - ], - "global-threshold":0.2 - }, - { - "model-name":"yolo-x", - "model-type":"detection", - "model-path":"models/yolox_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.3, - "input-layers":[ - "images" - ], - "output-layers":[ - "Transpose_570" - ], - "output-tensors":[ - "output" - ], - "global-threshold":0.2 - }, - - { - "model-name":"ruas", - "model-type":"lowlight", - "model-path":"models/ruas_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "onnx::Pad_0" - ], - "output-layers":[ - "/denoise_net/Sub" - ], - "output-tensors":[ - "403" - ], - "global-threshold":0.2 - }, - { - "model-name":"SCI", - "model-type":"lowlight", - "model-path":"models/sci_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Clip" - ], - "output-tensors":[ - "30" - ], - "global-threshold":0.2 - }, - { - "model-name":"StableLLve", - "model-type":"lowlight", - "model-path":"models/StableLLVE_quantized_212.dlc", - "runtime":"DSP", - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/outc/conv/Conv" - ], - "output-tensors":[ - "248" - ], - "global-threshold":0.2 - }, - { - "model-name":"zero_dce", - "model-type":"lowlight", - "model-path":"models/zero_dce_quantized.dlc", - "runtime":"DSP", - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Add_7" - ], - "output-tensors":[ - "80" - ], - "global-threshold":0.2 - }, - - { - "model-name":"DeepLabv3Plus-resnet++", - "model-type":"segmentation", - "model-path":"models/DeepLabv3Plus_resnet101_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "Resize_284" - ], - "output-tensors":[ - "1089" - ], - "global-threshold":0.2 - }, - { - "model-name":"DeepLabv3-resnet101", - "model-type":"segmentation", - "model-path":"models/deeplabv3_resnet101_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "1089" - ], - "global-threshold":0.2 - }, - { - "model-name":"DeepLabv3-resnet50", - "model-type":"segmentation", - "model-path":"models/deeplabv3_resnet50_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "613" - ], - "global-threshold":0.2 - }, - { - "model-name":"FCN_resnet101", - "model-type":"segmentation", - "model-path":"models/fcn_resnet101_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "1018" - ], - "global-threshold":0.2 - }, - { - "model-name":"FCN_resnet50", - "model-type":"segmentation", - "model-path":"models/fcn_resnet50_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "542" - ], - "global-threshold":0.2 - } - ], - "solution-configs":[ - { - "solution-name":"AI-Solutions", - "model-name":"yolo-nas", - "input-config-name":"camera", - "Enable":1, - "output-type":"wayland" - } - ] -} diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Configuration.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Configuration.h deleted file mode 100644 index 6cce6ab6..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Configuration.h +++ /dev/null @@ -1,118 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef CONFIGURATION_H_ -#define CONFIGURATION_H_ - -#include -#include -#include -#include "Utils.h" - -using namespace cv; -using namespace std; - -const string input_configs = "input-configs"; -const string model_configs = "model-configs"; -const string solution_configs = "solution-configs"; - -// Input Configs; -const string pipeline_input_config = "input-config-name"; -const string stream_type = "stream-type"; -const string camera_url = "camera-url"; -const string skipframe = "SkipFrame"; - -// Model Configs -const string model_config_name = "model-name"; -const string model_type = "model-type"; -const string model_path = "model-path"; -const string runtime = "runtime"; -const string nms_threshold = "nms-threshold"; -const string conf_threshold = "conf-threshold"; -const string input_layers = "input-layers"; -const string output_layers = "output-layers"; -const string output_tensors = "output-tensors"; - -// Solution Configs -const string solution_name = "solution-name"; -const string model_name = "model-name"; -const string Enable = "Enable"; -const string solution_input_config = "input-config-name"; -const string output_type = "output-type"; - -class ObjectDetectionSnpeConfig { - public: - string model_name; - string model_type; - std::string model_path; - runtime_t runtime; - float nmsThresh; - float confThresh; - std::vector labels; - std::vector inputLayers; - std::vector outputLayers; - std::vector outputTensors; -}; - -class InputConfiguration{ - public: - int SkipFrame; - int StreamNumber=0; - string StreamType; - string Url; - string ConfigName; -}; - -class SolutionConfiguration { - public: - string solution_name; - string model_name; - string input_config_name; - bool Enable; - string output_type; - std::shared_ptr input_config; - std::shared_ptr model_config; -}; - -class DebugConfiguration -{ - public: - bool DumpData=false; - string Directory; -}; - -class Configuration -{ -public: - static Configuration &getInstance() - { - static Configuration instance; - return instance; - } - -private: - Configuration() {} -public: - Configuration(Configuration const &) = delete; - void operator=(Configuration const &) = delete; - - DebugConfiguration Debug; - ObjectDetectionSnpeConfig Config; - SolutionConfiguration Sol_Config; - std::unordered_map> inputconfigs; - std::unordered_map> modelsconfig; - std::unordered_map> solutionsconfig; - - void LoadConfiguration(string file); - int LoadInputConfig(Json::Value& input); - int LoadModelsConfig(Json::Value& models); - int LoadSolutionsConfig(Json::Value& solutions); -}; - -#endif diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DecodeQueue.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DecodeQueue.h deleted file mode 100644 index 8ffdb0fa..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DecodeQueue.h +++ /dev/null @@ -1,40 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef DECODE_QUEUE_H -#define DECODE_QUEUE_H - -#include "Detection.h" -#include -#include -#include -#include - -static const int DEFAULT_MAX_QUEUE_SIZE = 64; - -class DecodeQueue -{ -public: - DecodeQueue(uint32_t maxSize = DEFAULT_MAX_QUEUE_SIZE) : max_size_(maxSize), is_stoped_(false) {} - ~DecodeQueue() {} - int Dequeue(shared_ptr& item, unsigned int timeOutMs); - int Enqueue(const shared_ptr& item, bool isWait); - void Unlock(); - std::list> GetRemainItems(); - int IsEmpty(); -private: - std::list> queue_; - std::mutex mutex_; - std::condition_variable empty_cond_; - std::condition_variable full_cond_; - uint32_t max_size_; - bool is_stoped_; -}; - -#endif \ No newline at end of file diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Detection.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Detection.h deleted file mode 100644 index de81ba1e..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Detection.h +++ /dev/null @@ -1,62 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef DETECTION_H -#define DETECTION_H - -#include -#include -#include -#include - -using namespace std; -using namespace cv; - -struct ObjectData { - // Bounding box information: top-left coordinate and width, height - cv::Rect bbox; - // Confidence of this bounding box - float confidence = -1.0f; - // The label of this Bounding box - int label = -1; - // Time cost of detecting this frame - size_t time_cost = 0; - uint32_t Width=512; - uint32_t Height=512; - cv::Mat *output=NULL; - -}; - -struct Detection -{ - cv::Rect bbox; - float score; - int label; -}; - -struct DetectionDetail -{ - vector Result; - string ModelName; -}; - -struct DetectionItem -{ - uint32_t Width; - uint32_t Height; - uint32_t FrameId; - size_t Size; - string StreamName; - int StreamId; - shared_ptr ImageBuffer; -// vector Results; - ObjectData Results; -}; - -#endif diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DetectionSnpe.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DetectionSnpe.h deleted file mode 100644 index 566f7048..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DetectionSnpe.h +++ /dev/null @@ -1,52 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __Detection_IMPL_H__ -#define __Detection_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" - -namespace detectionsnpe -{ - class DETECTIONSnpe - { - public: - DETECTIONSnpe(); - ~DETECTIONSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool SetScoreThresh(const float& conf_thresh, const float& nms_thresh); - bool IsInitialized() const; - - private: - bool m_isInit; - float m_nmsThresh; - float m_confThresh; - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess( cv::Mat image,cv::Mat& output_image,string model_name); - float computeIoU(const cv::Rect& a, const cv::Rect& b); - std::vector doNMS(std::vector winList, const float& nms_thresh); - }; - -} // namespace detection - -#endif // __DETECTION_IMPL_H__ diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/LowlightSnpe.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/LowlightSnpe.h deleted file mode 100644 index e6ee6b75..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/LowlightSnpe.h +++ /dev/null @@ -1,46 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __LOWLIGHT_IMPL_H__ -#define __LOWLIGHT_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" - -namespace lowlightsnpe -{ - class LOWLIGHTSnpe - { - public: - LOWLIGHTSnpe(); - ~LOWLIGHTSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool IsInitialized() const; - - private: - bool m_isInit; - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess(cv::Mat& output_image,string model_name); - }; - -} // namespace lowlightsnpe - -#endif // __LOWLIGHT_IMPL_H__ diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ModelInference.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ModelInference.h deleted file mode 100644 index e95ca4e1..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ModelInference.h +++ /dev/null @@ -1,35 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef MODEL_INFERENCE_H_ -#define MODEL_INFERENCE_H_ -#include "DecodeQueue.h" -#include -#include -#include -#include -#include -#include "Configuration.h" - -class ModelInference{ -public: - ModelInference(); - ModelInference(const string model_name); - int Initialization(const ObjectDetectionSnpeConfig& config); - bool IsInitialized(); - bool UnInitialization(); - ~ModelInference(); - int Inference(cv::Mat input,cv::Mat& output_image,string model_name); -private: - void *Impl = nullptr; - enum Models{SUPERRESOLUTION, DETECTION,LOWLIGHT,SEGMENTATION}; - int Model; -}; - -#endif \ No newline at end of file diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SNPERuntime.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SNPERuntime.h deleted file mode 100644 index 854ae9bb..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SNPERuntime.h +++ /dev/null @@ -1,79 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef _SNPERUNTIME_H_ -#define _SNPERUNTIME_H_ - -#include -#include -#include -#include -#include - -#include "SNPE/SNPE.hpp" -#include "SNPE/SNPEFactory.hpp" -#include "SNPE/SNPEBuilder.hpp" -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/DlError.hpp" -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/IUserBufferFactory.hpp" -#include "DlSystem/TensorShape.hpp" -#include "DlContainer/IDlContainer.hpp" - -#include "Utils.h" - -namespace snperuntime { - -class SNPERuntime { -public: - SNPERuntime(); - - bool Initialize(const std::string& model_path, const runtime_t runtime); - bool Deinitialize(); - bool SetOutputLayers(std::vector& outputLayers); - - std::vector GetInputShape(const std::string& name); - std::vector GetOutputShape(const std::string& name); - - float* GetInputTensor(const std::string& name); - float* GetOutputTensor(const std::string& name); - - bool IsInit() { - return m_isInit; - } - - bool execute(); - -private: - bool m_isInit = false; - - std::unique_ptr m_container; - std::unique_ptr m_snpe; - zdl::DlSystem::Runtime_t m_runtime; - zdl::DlSystem::StringList m_outputLayers; - - std::map > m_inputShapes; - std::map > m_outputShapes; - - std::vector > m_inputUserBuffers; - std::vector > m_outputUserBuffers; - zdl::DlSystem::UserBufferMap m_inputUserBufferMap; - zdl::DlSystem::UserBufferMap m_outputUserBufferMap; - zdl::DlSystem::PerformanceProfile_t m_profile; - - void setTargetRuntime(const runtime_t runtime); - void setPerformanceProfile(const performance_t perfprofile); - - std::unordered_map> m_applicationInputBuffers; - std::unordered_map> m_applicationOutputBuffers; -}; - -} - -#endif // _SNPERUNTIME_H_ \ No newline at end of file diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SegmentationSnpe.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SegmentationSnpe.h deleted file mode 100644 index 022dd918..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SegmentationSnpe.h +++ /dev/null @@ -1,52 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __Segmentation_IMPL_H__ -#define __Segmentation_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" - -namespace segmentationsnpe -{ - - class SEGMENTATIONSnpe - { - public: - SEGMENTATIONSnpe(); - ~SEGMENTATIONSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool SetScoreThresh(const float& conf_thresh, const float& nms_thresh); - bool IsInitialized() const; - - private: - bool m_isInit; - float m_nmsThresh; - float m_confThresh; - - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess( cv::Mat image,cv::Mat& output_image,string model_name); - }; - -} // namespace segmentation - -#endif // __SEGMENTATION_IMPL_H__ diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/StreamDecode.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/StreamDecode.h deleted file mode 100644 index ff8c03c4..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/StreamDecode.h +++ /dev/null @@ -1,91 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef INC_STREAM_DECODE_H -#define INC_STREAM_DECODE_H - -#include "DecodeQueue.h" -#include "Configuration.h" -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -using namespace std; -/* Structure to contain all our information, so we can pass it to callbacks */ -typedef struct _PipelineData -{ - shared_ptr pipeline; - shared_ptr source; - shared_ptr main_capsfilter;; - shared_ptr videoDepay; - shared_ptr videoParse; - shared_ptr h264dec; - shared_ptr transform; - shared_ptr sink; -} PipelineData; - -typedef struct _FrameProcessData -{ - uint32_t frameId; - int interval = 25; - shared_ptr blockQueue; - string streamName; - int StreamId; -} FrameProcessData; - - -class StreamDecode -{ -public: - StreamDecode(std::string streamtype, std::string rtspUrl); - ~StreamDecode(); - int Initialization(shared_ptr &queue); - void UnInitialization(); - void DecodeAndInference(); - void SetSkipFrame(int interval); - void SetStreamName(string name); - void SetStreamId(int uuid); - - static void OnPadAdd(GstElement *element, GstPad *pad, gpointer data); - static GstFlowReturn OnAppsinkNewSample(GstElement *appsink, gpointer user_data); - void Stop(); -protected: - static void UnRefElement(GstElement *elem); - -private: - PipelineData data_; - shared_ptr bus_ = nullptr; - bool terminate_ = FALSE; - std::string StreamType; - FrameProcessData *frameProcess_ = nullptr; - int gst_camera_pipeline_init(); -}; - -class CaptureController -{ - public: - void CreateCapture(shared_ptr &pipeline_config, shared_ptr &gDecodeQueue); - void EndOfStream(int streamId); - void StopAll(); - void InterruptClose(); - - private: - map> decoder; - vector threads; -}; - -#endif \ No newline at end of file diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/StreamEncode.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/StreamEncode.h deleted file mode 100644 index 3705ed11..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/StreamEncode.h +++ /dev/null @@ -1,74 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef INC_STREAM_ENCODE_H -#define INC_STREAM_ENCODE_H - -#include "DecodeQueue.h" -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "Utils.h" -#include "Configuration.h" - -using namespace std; -/* Structure to contain all our information, so we can pass it to callbacks */ -typedef struct _EncodePipeline -{ - shared_ptr pipeline; - shared_ptr appsrc; - shared_ptr vidconv; - shared_ptr vtransform; - shared_ptr capsfilter; - shared_ptr videoscale; - shared_ptr x264enc; - shared_ptr h264parse; - shared_ptr qtmux; - shared_ptr waylandsink; - shared_ptr videoconvert; -} EncodePipeline; - -class StreamEncode{ -public: - StreamEncode()=default; - ~StreamEncode()=default; - int Initialization(string output_type); - void UnInitialization(); - void PushData(uint8_t *data, int len); - int Loop(); - void Stop(); -private: - EncodePipeline data; - shared_ptr bus=nullptr; - bool terminate=false; - string outputFile; - int gst_wayland_pipeline_init(string output_type); -}; - -class EncodeController -{ - public: - void CreateEncoder(std::shared_ptr sol_conf); - void EncodeFrame(int streamId, uint8_t *pushData, int len); - void EndOfStream(int streamId); - void Stop(); - void InterruptClose(); - private: - map> encoders; - vector threads; -}; -#endif \ No newline at end of file diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SuperresolutionSnpe.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SuperresolutionSnpe.h deleted file mode 100644 index e90e91d9..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SuperresolutionSnpe.h +++ /dev/null @@ -1,46 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __SUPERRES_IMPL_H__ -#define __SUPERRES_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" - -namespace superressnpe -{ - class SUPERRESSnpe - { - public: - SUPERRESSnpe(); - ~SUPERRESSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool IsInitialized() const; - - private: - bool m_isInit; - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess(cv::Mat& output_image,string model_name); - }; -} // namespace superressnpe - -#endif // __SUPERRES_IMPL_H__ diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Utils.h b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Utils.h deleted file mode 100644 index 5f0c95d8..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Utils.h +++ /dev/null @@ -1,98 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef UTILS_H_ -#define UTILS_H_ - -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace std; -using namespace cv; - -using chrono::high_resolution_clock; -using chrono::duration_cast; -using chrono::duration; -using chrono::milliseconds; - -#define QS_SUCCESS 0 -#define QS_ERROR -1 - -#define PRINT(fmt, ...) { \ - printf(fmt, ##__VA_ARGS__); \ -} - -#define LOG(level, fmt, ...) { \ - PRINT("[%s] - %s: " fmt, #level, __func__, ##__VA_ARGS__); \ -} - -//#define DEBUG -#ifdef DEBUG - #define LOG_DEBUG(fmt, ...) LOG(DEBUG, fmt, ##__VA_ARGS__) -#else - #define LOG_DEBUG(fmt, ...) ((void)0) -#endif - -#define LOG_INFO(fmt, ...) { \ - LOG(INFO, fmt, ##__VA_ARGS__); \ -} - -#define LOG_WARN(fmt, ...) { \ - LOG(WARN, fmt, ##__VA_ARGS__); \ -} - -#define LOG_ERROR(fmt, ...) { \ - LOG(ERROR, fmt, ##__VA_ARGS__); \ -} - -#define IMAGE_CHAN_SIZE_F32(width, height) ((width) * (height)*4) -#define RGB_IMAGE_SIZE_F32(width, height) ((width) * (height)*3 * 4) - -// Inference hardware runtime. -typedef enum runtime { - CPU = 0, - DSP -}runtime_t; - -typedef enum PerformanceProfile { - DEFAULT = 0, - /// Run in a balanced mode. - BALANCED = 0, - /// Run in high performance mode - HIGH_PERFORMANCE = 1, - /// Run in a power sensitive mode, at the expense of performance. - POWER_SAVER = 2, - /// Use system settings. SNPE makes no calls to any performance related APIs. - SYSTEM_SETTINGS = 3, - /// Run in sustained high performance mode - SUSTAINED_HIGH_PERFORMANCE = 4, - /// Run in burst mode - BURST = 5, - /// Run in lower clock than POWER_SAVER, at the expense of performance. - LOW_POWER_SAVER = 6, - /// Run in higher clock and provides better performance than POWER_SAVER. - HIGH_POWER_SAVER = 7, - /// Run in lower balanced mode - LOW_BALANCED = 8, -}performance_t; - -template -void ClearVector(std::vector& vt) -{ - std::vector vtTemp; - vtTemp.swap(vt); -} - -#endif diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/models/README.md b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/models/README.md deleted file mode 100644 index 22397b6f..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/models/README.md +++ /dev/null @@ -1 +0,0 @@ -Place dlc files in this folder diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/CMakeLists.txt b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/CMakeLists.txt deleted file mode 100644 index a0d4b817..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/CMakeLists.txt +++ /dev/null @@ -1,39 +0,0 @@ -cmake_minimum_required(VERSION 3.5.1) - - -# Compile options -add_compile_options(-std=c++11) - -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "../out") -set(CMAKE_CXX_FLAGS_DEBUG "-fPIC -O0 -g -Wall") -set(CMAKE_CXX_FLAGS_RELEASE "-fPIC -O2 -Wall") - -message(STATUS "source file path" ${PROJECT_SRC_ROOT}) - -file(GLOB_RECURSE SRC_FILE - ../src/*.cpp -) - -set(SOURCE_FILE - ${SRC_FILE} -) - -add_executable(ai-solutions ${SRC_FILE}) -target_compile_options(ai-solutions PUBLIC -fPIC -O0 -g -Wall -Wnon-virtual-dtor) - -# Header path -include_directories( - "../inc" - "/usr/include/glib-2.0" - "/usr/lib/aarch64-linux-gnu/glib-2.0/include" - "/usr/include/gstreamer-1.0" - "/usr/local/include/opencv4" - ${OpenCV_INCLUDE_DIRS} - ${JSON_INCLUDE_DIRS} - ${JSONCPP_INCLUDE_DIRS} -) - -message(STATUS "JSON file path" ${JSON_INCLUDE_DIRS}) -message(STATUS "JSONCPP file path" ${JSONCPP_INCLUDE_DIRS}) - -target_link_libraries(ai-solutions PUBLIC pthread dl ${OpenCV_LIBS} ${GST_APP_LIBRARIES} ${JSON_LIBRARIES} jsoncpp SNPE jsoncpp) diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/Configuration.cpp b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/Configuration.cpp deleted file mode 100644 index 60e711dc..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/Configuration.cpp +++ /dev/null @@ -1,152 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "Configuration.h" -#include "Utils.h" -#include - -/** @brief To convert runtime from string to int - * @param device which contains runtime as a string - * @return int value corresponding to runtime -*/ - -static runtime_t device2runtime(std::string&& device) -{ - /** - * To convert all characters to lower case - */ - std::transform(device.begin(), device.end(), device.begin(), - [](unsigned char ch){ return tolower(ch); }); - - if (0 == device.compare("dsp")) - { - return DSP; - } - else - { - return CPU; - } -} - -/** @brief To parse Input config from config file - * @param input contains input config array -*/ -int Configuration::LoadInputConfig(Json::Value& input) -{ - if (input.isArray()) - { - int size = input.size(); - for (int i = 0; i < size; ++i) - { - std::shared_ptr inputconfig = std::shared_ptr(new InputConfiguration()); - inputconfig->ConfigName = input[i][pipeline_input_config].asString(); - inputconfig->StreamType = input[i][stream_type].asString(); - inputconfig->Url = input[i][camera_url].asString(); - inputconfig->SkipFrame = input[i][skipframe].asInt(); - inputconfigs[inputconfig->ConfigName] = inputconfig; - } - } - LOG_INFO("Input streams size=%u \n", input.size()); - return 0; -} - -/** @brief To parse model config - * @param models contains model config array - */ - -int Configuration::LoadModelsConfig(Json::Value& models) -{ - std::string line; - if (models.isArray()) - { - int size = models.size(); - for (int i = 0; i < size; ++i) - { - std::shared_ptr modelconfig = - std::shared_ptr(new ObjectDetectionSnpeConfig()); - modelconfig->model_name = models[i][model_config_name].asString(); - modelconfig->model_type = models[i][model_type].asString(); - modelconfig->model_path = models[i][model_path].asString(); - modelconfig->runtime = device2runtime(models[i][runtime].asString()); - modelconfig->nmsThresh = models[i][nms_threshold].asFloat(); - modelconfig->confThresh = models[i][conf_threshold].asFloat(); - - /** - * To access input layer names from config - */ - if (models[i]["input-layers"].isArray()) { - int num = models[i]["input-layers"].size(); - for (int j= 0; j < num; j++) { - modelconfig->inputLayers.push_back(models[i]["input-layers"][j].asString()); - } - } - /** - * To access output layer names from config - */ - if (models[i][output_layers].isArray()) { - int num = models[i]["output-layers"].size(); - for (int j = 0; j < num; j++) { - modelconfig->outputLayers.push_back(models[i]["output-layers"][j].asString()); - } - } - /** - * To access output tensor names from config - */ - if (models[i][output_tensors].isArray()) { - int num = models[i]["output-tensors"].size(); - for (int j = 0; j < num; j++) { - modelconfig->outputTensors.push_back(models[i]["output-tensors"][j].asString()); - } - } - - modelsconfig[modelconfig->model_name] = modelconfig; - } - } - - LOG_INFO("modelsconfig size = %lu \n", modelsconfig.size()); - return 0; -} - -/** @brief To parse solution config - * @param solutions contains solution array - * -*/ - -int Configuration::LoadSolutionsConfig(Json::Value& solutions) { - if (solutions.isArray()) { - int size = solutions.size(); - for (int i = 0; i < size; ++i) { - std::shared_ptr solutionconfig = std::shared_ptr(new SolutionConfiguration()); - solutionconfig->solution_name = solutions[i][solution_name].asString(); - solutionconfig->model_name = solutions[i][model_name].asString(); - solutionconfig->Enable = solutions[i][Enable].asBool(); - solutionconfig->input_config_name = solutions[i][solution_input_config].asString(); - solutionconfig->output_type = solutions[i][output_type].asString(); - solutionsconfig[i] = solutionconfig; - } - } - LOG_DEBUG("Solutions size %lu", solutionsconfig.size() ); - return 0; -} - - -/** @brief To parse config file - * @param configFilePath contains json file passed as an argument -*/ -void Configuration::LoadConfiguration(string configFilePath) -{ - Json::Reader reader; - Json::Value root; - std::ifstream in(configFilePath, std::ios::binary); - reader.parse(in, root); - - LoadInputConfig(root[input_configs]); - LoadModelsConfig(root[model_configs]); - LoadSolutionsConfig(root[solution_configs]); -} \ No newline at end of file diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/DecodeQueue.cpp b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/DecodeQueue.cpp deleted file mode 100644 index c6876372..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/DecodeQueue.cpp +++ /dev/null @@ -1,118 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "DecodeQueue.h" - -/** @brief To access frames from source - * @param item to store the frame - * @param timeOutMs to wait for frame till timeout - * @return 0 if success -*/ - -int DecodeQueue::Dequeue(shared_ptr &item, unsigned int timeOutMs) -{ - std::unique_lock lock(mutex_); - auto realTime = std::chrono::milliseconds(timeOutMs); - - while (queue_.empty() && !is_stoped_) - { - empty_cond_.wait_for(lock, realTime); - } - /** - * To check if pipeline is stopped - */ - if (is_stoped_) - { - return 1; - } - /** - * To check if queue is emtpy - */ - else if (queue_.empty()) - { - return 2; - } - else - { - item = queue_.front(); - queue_.pop_front(); - } - - full_cond_.notify_one(); - - return 0; -} - -/** @brief To enqueue the frames to display or save - * @param item to push into the queue - * @param isWait to wait for frame till timeout -*/ - -int DecodeQueue::Enqueue(const shared_ptr &item, bool isWait) -{ - std::unique_lock lock(mutex_); - while (queue_.size() >= max_size_ && isWait && !is_stoped_) - { - full_cond_.wait(lock); - } - /** - * To check if pipeline is stopped - */ - if (is_stoped_) - { - return 1; - } - /** - * To check if queue_ size is greater than max size - */ - else if (queue_.size() >= max_size_) - { - return 3; - } - queue_.push_back(item); - empty_cond_.notify_one(); - return 0; -} - -/** @brief To stop the pipeline -*/ - -void DecodeQueue::Unlock() -{ - { - std::unique_lock lock(mutex_); - is_stoped_ = true; - } - - full_cond_.notify_all(); - empty_cond_.notify_all(); -} - -/** @brief To inference the remaining items -*/ -std::list> DecodeQueue::GetRemainItems() -{ - std::unique_lock lock(mutex_); - /** - * To check if pipeline is stopped - */ - if (!is_stoped_) - { - return std::list>(); - } - - return queue_; -} - -/** @brief To check if queue is empty -*/ -int DecodeQueue::IsEmpty() -{ - return queue_.empty(); -} \ No newline at end of file diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/DetectionSnpe.cpp b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/DetectionSnpe.cpp deleted file mode 100644 index aa2ba713..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/DetectionSnpe.cpp +++ /dev/null @@ -1,543 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include -#include "Configuration.h" -#include "DetectionSnpe.h" - -namespace detectionsnpe -{ - - /** @brief Constructor - */ - DETECTIONSnpe::DETECTIONSnpe() : m_isInit(false),m_snperuntime(nullptr) - { - - } - - /** @brief Destructor - */ - DETECTIONSnpe::~DETECTIONSnpe() - { - DeInitialize(); - } - - /** @brief To read model config and set output layers - * @param config model config parameters - * @return true if success;false otherwise - */ - bool DETECTIONSnpe::Initialize(const ObjectDetectionSnpeConfig& config) - { - m_snperuntime = std::move(std::unique_ptr(new snperuntime::SNPERuntime())); - - m_inputLayers = config.inputLayers; - m_outputLayers = config.outputLayers; - m_outputTensors = config.outputTensors; - m_nmsThresh = config.nmsThresh; - m_confThresh = config.confThresh; - - /** - * To set output layer from model config - */ - m_snperuntime->SetOutputLayers(m_outputLayers); - /** - * To initialize snperuntime - */ - if (!m_snperuntime->Initialize(config.model_path, config.runtime)) - { - LOG_ERROR("Failed to Initialize snpe instance.\n"); - return false; - } - - m_isInit = true; - return true; - } - - /** @brief To deallocate buffers and reset - */ - bool DETECTIONSnpe::DeInitialize() - { - if (m_isInit) - { - m_snperuntime->Deinitialize(); - m_snperuntime.reset(nullptr); - } - m_isInit = false; - return true; - } - - bool DETECTIONSnpe::SetScoreThresh(const float& conf_thresh, const float& nms_thresh = 0.5) - { - this->m_nmsThresh = nms_thresh; - this->m_confThresh = conf_thresh; - return true; - } - - bool DETECTIONSnpe::IsInitialized() const - { - return m_isInit; - } - - /** @brief To preprocess input image - * @param input_image Input image for inference - * @return true if succuess; false otherwise - */ - bool DETECTIONSnpe::PreProcessInput(const cv::Mat& input_image,string model_name) - { - if (input_image.empty()) { - LOG_ERROR("Invalid image!\n"); - return false; - } - - auto inputShape = m_snperuntime->GetInputShape(m_inputLayers[0]); - size_t model_h = inputShape[1]; - size_t model_w = inputShape[2]; - - if (m_snperuntime->GetInputTensor(m_inputLayers[0]) == nullptr) - { - LOG_ERROR("Empty input tensor\n"); - return false; - } - - cv::Mat image = cv::Mat(model_h,model_w, CV_32FC3, Scalar(0.)); - cv::resize(input_image,image,cv::Size(model_h,model_w)); - cv::Mat input(model_h, model_w, CV_32FC3, m_snperuntime->GetInputTensor(m_inputLayers[0])); - - if(model_name.compare("ssd-mobilenet-v2") == 0 ) - { - cv::cvtColor(image, image, cv::COLOR_BGR2RGB); - image.convertTo(image, CV_32S); - subtract(image,Scalar(123.0, 117.0, 104.0),image); - image.convertTo(input, CV_32FC3, 1.0); - } - else if(model_name.compare("yolo-nas") == 0) - { - image.convertTo(input, CV_32FC3, 1/255.0); - } - else if(model_name.compare("yolo-x") == 0) - { - image.convertTo(input, CV_32FC3, 1.0); - } - - return true; - } - - /** @brief To preprocess,execute and postprocess - * @param input_image Input image for inference - * @param output_image Inference output image - * @param model_name To identify model for specific post-processing - * @return true if success; false otherwise - */ - bool DETECTIONSnpe::Detect(cv::Mat image,cv::Mat& output_image,string model_name) - { - /** - * Preprocessing image - */ - if(PreProcessInput(image, model_name) != true) - { - LOG_ERROR("PreProcess failed\n"); - return false; - } - /** - * Inferencing model on target - */ - if (!m_snperuntime->execute()) { - LOG_ERROR("SNPERuntime execute failed."); - return false; - } - /** - * Postprocessing to extract bounding boxes - */ - if(PostProcess(image,output_image,model_name) != true) - { - LOG_ERROR("PostProcess failed\n"); - return false; - } - return true; - } - - float DETECTIONSnpe::computeIoU(const cv::Rect& a, const cv::Rect& b) - { - float xOverlap = std::max( - 0., - std::min(a.x + a.width, b.x + b.width) - std::max(a.x, b.x) + 1.); - float yOverlap = std::max( - 0., - std::min(a.y + a.height, b.y + b.height) - std::max(a.y, b.y) + 1.); - float intersection = xOverlap * yOverlap; - float unio = - (a.width + 1.) * (a.height + 1.) + - (b.width + 1.) * (b.height + 1.) - intersection; - return intersection / unio; - } - - std::vector DETECTIONSnpe::doNMS(std::vector winList, const float& nms_thresh) - { - if (winList.empty()) { - return winList; - } - - std::sort(winList.begin(), winList.end(), [] (const ObjectData& left, const ObjectData& right) { - if (left.confidence > right.confidence) { - return true; - } else { - return false; - } - }); - - std::vector flag(winList.size(), false); - for (unsigned int i = 0; i < winList.size(); i++) { - if (flag[i]) { - continue; - } - - for (unsigned int j = i + 1; j < winList.size(); j++) { - if (computeIoU(winList[i].bbox, winList[j].bbox) > nms_thresh) { - flag[j] = true; - } - } - } - - std::vector ret; - for (unsigned int i = 0; i < winList.size(); i++) { - if (!flag[i]) - ret.push_back(winList[i]); - } - return ret; - } - - /** @brief Object Detection postprocess - * @param output_image Image with bounding boxes - * @param model_name To identify model for specific post-processing - */ - bool DETECTIONSnpe::PostProcess( cv::Mat image,cv::Mat& output_image,string model_name) - { - int width = image.cols, height = image.rows; - cv::resize(image,output_image,cv::Size(width,height)); - if(model_name.compare("ssd-mobilenet-v2") == 0) - { - vectorclasses = { - "background","aeroplane","bicycle","bird","boat", - "bottle","bus","car","cat","chair","cow", - "diningtable","dog","horse","motorbike","person", - "pottedplant","sheep","sofa","train","tvmonitor", - }; - - auto outputShape_score = m_snperuntime->GetOutputShape(m_outputTensors[0]); - int elements_score = outputShape_score[1]; - int channels_score = outputShape_score[2]; - - auto outputShape_box = m_snperuntime->GetOutputShape(m_outputTensors[1]); - float *score_confidence = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - float *box_coordinates = m_snperuntime->GetOutputTensor(m_outputTensors[1]); - - if( (score_confidence == nullptr) || (box_coordinates == nullptr) ) - { - return false; - } - for(size_t class_index = 1; class_index winList; - for(int row=0; row m_confThresh && (class_pred==class_index) ) - { - ObjectData rect; - rect.bbox.x = box_coordinates[row*4 ] * width; - rect.bbox.y = box_coordinates[row*4+ 1] * height; - rect.bbox.width = box_coordinates[row*4 + 2] * width; - rect.bbox.height = box_coordinates[row*4 + 3] * height; - rect.confidence = value; - rect.label = class_pred; - winList.push_back(rect); - } - } - } - winList = doNMS(winList, m_nmsThresh); - for(size_t i =0;i classes = { - "person", "bicycle", "car", "motorcycle", "airplane", "bus", - "train", "truck", "boat", "traffic", "fire", "stop", "parking", - "bench", "bird", "cat", "dog", "horse", "sheep", "cow", - "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", - "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", - "sports", "kite", "baseball", "baseball", "skateboard", "surfboard", - "tennis", "bottle", "wine", "cup", "fork", "knife","spoon", - "bowl", "banana", "apple", "sandwich", "orange", "broccoli", - "carrot", "hot", "pizza", "donut", "cake", "chair", "couch", - "potted", "bed", "dining", "toilet", "tv", "laptop", "mouse", - "remote", "keyboard", "cell", "microwave", "oven", "toaster", - "sink", "refrigerator", "book", "clock", "vase", "scissors", - "teddy", "hair", "toothbrush" - }; - - float *class_scores = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - auto outputShape_scores = m_snperuntime->GetOutputShape(m_outputTensors[0]); - float *bboxes = m_snperuntime->GetOutputTensor(m_outputTensors[1]); - auto outputShape_bboxes = m_snperuntime->GetOutputShape(m_outputTensors[1]); - - if( ( class_scores == nullptr) || (bboxes == nullptr) ) - { - return false; - } - float ratio1 = width/320.0; - float ratio2 = height/320.0; - - int out_coordinates = outputShape_scores[1]; - int out_scores = outputShape_scores[2]; - - std::vector winList; - for(int i =0;i= m_confThresh) - { - float x1 = bboxes[i*4 ]*ratio1; - float y1 = bboxes[i*4 + 1]*ratio2; - float x2 = bboxes[i*4 + 2]*ratio1; - float y2 = bboxes[i*4 + 3]*ratio2; - ObjectData rect; - rect.bbox.x = x1 ; - rect.bbox.y = y1 ; - rect.bbox.width = x2 - x1; - rect.bbox.height = y2 - y1; - rect.confidence = class_scores[out_scores*i + j]; - rect.label = j; - winList.push_back(rect); - } - } - } - winList = doNMS(winList,m_nmsThresh); - for(size_t i =0;i classes = { - "person", "bicycle", "car", "motorcycle", "airplane", "bus", - "train", "truck", "boat", "traffic", "fire", "stop", "parking", - "bench", "bird", "cat", "dog", "horse", "sheep", "cow", - "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", - "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", - "sports", "kite", "baseball", "baseball", "skateboard", "surfboard", - "tennis", "bottle", "wine", "cup", "fork", "knife","spoon", - "bowl", "banana", "apple", "sandwich", "orange", "broccoli", - "carrot", "hot", "pizza", "donut", "cake", "chair", "couch", - "potted", "bed", "dining", "toilet", "tv", "laptop", "mouse", - "remote", "keyboard", "cell", "microwave", "oven", "toaster", - "sink", "refrigerator", "book", "clock", "vase", "scissors", - "teddy", "hair", "toothbrush" - }; - - float *scores = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - auto outputShape = m_snperuntime->GetOutputShape(m_outputTensors[0]); - - if(scores == nullptr) - { - return false; - } - int model_h = outputShape[1]; - int model_w = outputShape[2]; - float output[model_h][model_w]; - - for(int i=0;i grid; - static vector expanded_stride; - static int sum=0; - if(flag == false) - { - const int strides[3] = {8, 16, 32}; - int hsizes[3] = {80, 40, 20}; - int wsizes[3] = {80, 40, 20}; - - vector> grids, expanded_strides; - - for(int i=0;i<3;i++) - { - vector grid; - vector expanded_stride; - for(int j=0; j> boxes; - vector> scores_vec; - for(int i=0;i box; - for(int j=0;j<4;j++) - { - box.push_back(output[i][j]); - } - boxes.push_back(box); - } - - for(int i=0;i score; - float val = output[i][4]; - for(int j=5;j<85;j++) - { - score.push_back(output[i][j] * val); - } - scores_vec.push_back(score); - } - - std::vector winList; - for(int i=0;i=m_confThresh) - { - for(int j=0;j<4;j++) - { - int x1 = boxes[i][0]; - int y1 = boxes[i][1]; - int x2 = boxes[i][2]; - int y2 = boxes[i][3]; - - int x = (int)(x1 - x2/2); - int y = (int)(y1 - y2/2); - int w = (int)(x1 + x2/2); - int h = (int)(y1 + y2/2); - - ObjectData rect; - float ratio1 = width/640.0; - float ratio2 = height/640.0; - rect.bbox.x = x * ratio1; - rect.bbox.y = y * ratio2; - rect.bbox.width = w *ratio1; - rect.bbox.height = h *ratio2; - rect.confidence = maxScore; - rect.label = maxClassIndex.y; - - winList.push_back(rect); - } - } - } - - winList = doNMS(winList, m_nmsThresh); - for(size_t i =0;i -#include -#include -#include "Configuration.h" -#include "LowlightSnpe.h" - -namespace lowlightsnpe -{ - - /** @brief Constructor - */ - LOWLIGHTSnpe::LOWLIGHTSnpe() : m_isInit(false),m_snperuntime(nullptr) - { - - } - - /** @brief Destructor - */ - LOWLIGHTSnpe::~LOWLIGHTSnpe() - { - DeInitialize(); - } - - /** @brief To read model config and set output layers - * @param config model config parameters - * @return true if success;false otherwise - */ - bool LOWLIGHTSnpe::Initialize(const ObjectDetectionSnpeConfig& config) - { - m_snperuntime = std::move(std::unique_ptr(new snperuntime::SNPERuntime())); - - m_inputLayers = config.inputLayers; - m_outputLayers = config.outputLayers; - m_outputTensors = config.outputTensors; - - /** - * To set output layer from model config - */ - m_snperuntime->SetOutputLayers(m_outputLayers); - /** - * To initialize snperuntime - */ - if (!m_snperuntime->Initialize(config.model_path, config.runtime)) - { - LOG_ERROR("Failed to Initialize snpe instance.\n"); - return false; - } - - m_isInit = true; - return true; - } - - - /** @brief To deallocate buffers and reset - */ - bool LOWLIGHTSnpe::DeInitialize() - { - if (m_isInit) - { - m_snperuntime->Deinitialize(); - m_snperuntime.reset(nullptr); - } - - m_isInit = false; - return true; - } - - bool LOWLIGHTSnpe::IsInitialized() const - { - return m_isInit; - } - - /** @brief To preprocess input image - * @param input_image Input image for inference - * @return true if succuess; false otherwise - */ - bool LOWLIGHTSnpe::PreProcessInput(const cv::Mat& input_image,string model_name) - { - if (input_image.empty()) { - LOG_ERROR("Invalid image!\n"); - return false; - } - - auto inputShape = m_snperuntime->GetInputShape(m_inputLayers[0]); - int model_h = inputShape[1]; - int model_w = inputShape[2]; - int channels = inputShape[3]; - - cv::Mat image(model_h, model_w, CV_32FC3,cv::Scalar(0.0)); - cv::resize(input_image,image,cv::Size(model_h,model_w),cv::INTER_CUBIC); - cv::cvtColor(image, image, cv::COLOR_BGR2RGB); - - cv::Mat input(model_h, model_w, CV_32FC3, cv::Scalar(0.0)); - image.convertTo(input, CV_32FC3,1.0); - - vector app_vect; - - if (input.isContinuous()) - { - app_vect.assign((float*)input.data, (float*)input.data + input.total()*input.channels()); - } - else - { - for (int i = 0; i < input.rows; ++i) - { - app_vect.insert(app_vect.end(), input.ptr(i), input.ptr(i)+input.cols*input.channels()); - } - } - - float ***app = new float**[model_w]; - for (int i = 0; i < model_w; ++i) - { - app[i] = new float*[model_h]; - for (int j = 0; j < model_h; ++j) - app[i][j] = new float[channels]; - } - - for(int i = 0;iGetInputTensor(m_inputLayers[0]); - if (input_tensor == nullptr) { - LOG_ERROR("Empty input tensor\n"); - return false; - } - - float* pdata = (float*)(input.data); - for(int i = 0;iexecute()) { - LOG_ERROR("SNPERuntime execute failed."); - return false; - } - /** - * Postprocessing - */ - if(PostProcess(output_image,model_name) != true) - { - LOG_ERROR("PostProcess failed\n"); - return false; - } - return true; - } - - /** @brief Superres postprocess - * @param output_image Enhanced image - * @param model_name To identify model for specific post-processing - */ - bool LOWLIGHTSnpe::PostProcess(cv::Mat& output_image,string model_name) - { - auto outputShape = m_snperuntime->GetOutputShape(m_outputTensors[0]); - float *predOutput = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - - if(predOutput == nullptr) - { - return false; - } - int height = outputShape[1]; - int width = outputShape[2]; - int channels = outputShape[3]; - - cv::Mat temp0(cv::Size(width,height), CV_32FC3, predOutput); - cv::cvtColor(temp0, temp0, cv::COLOR_RGB2BGR); - - vector app_vect; - - if (temp0.isContinuous()) - { - app_vect.assign((float*)temp0.data, (float*)temp0.data + temp0.total()*temp0.channels()); - } - else - { - for (int i = 0; i < temp0.rows; ++i) - { - app_vect.insert(app_vect.end(), temp0.ptr(i), temp0.ptr(i)+temp0.cols*temp0.channels()); - } - } - - float ***app = new float**[channels]; - for (int i = 0; i < channels; ++i) - { - app[i] = new float*[width]; - for (int j = 0; j < width; ++j) - app[i][j] = new float[height]; - } - - for(int i = 0;i app_t_vec; - - for(int i = 0;i255.0) - x = 255.0; - app_t_vec.push_back(x); - } - } - } - - output_image = cv::Mat(width, height, CV_32FC3,cv::Scalar(0.0)); - float* pdata = (float*)(output_image.data); - for (int i = 0; i < channels*width*height; i++) - { - float x = app_t_vec[i]; - *pdata = x; - pdata += 1; - } - output_image.convertTo(output_image,CV_8UC3); - - for (int i = 0; i < channels; ++i) - { - for (int j = 0; j < width; ++j) - { - delete [] app[i][j]; - } - delete [] app[i]; - } - delete [] app; - app = NULL; - - return true; - } - -} // namespace lowlightsnpe diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/ModelInference.cpp b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/ModelInference.cpp deleted file mode 100644 index ae01891d..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/ModelInference.cpp +++ /dev/null @@ -1,221 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "ModelInference.h" -#include "Configuration.h" -#include "SuperresolutionSnpe.h" -#include "DetectionSnpe.h" -#include "LowlightSnpe.h" -#include "SegmentationSnpe.h" - -using namespace std; -using namespace cv; -using namespace superressnpe; -using namespace detectionsnpe; -using namespace lowlightsnpe; -using namespace segmentationsnpe; - -/** @brief contructor -*/ -ModelInference::ModelInference() -{ - Impl = new SUPERRESSnpe(); -} - -/** @brief Parameter constructor - * @param model_type To check model type from config file -*/ -ModelInference::ModelInference(const string model_type) -{ - if (model_type.compare("superresolution") == 0) { - Impl = new SUPERRESSnpe(); - Model = SUPERRESOLUTION; - } - else if(model_type.compare("detection") == 0) - { - Impl = new DETECTIONSnpe(); - Model = DETECTION; - } - else if(model_type.compare("lowlight") == 0) - { - Impl = new LOWLIGHTSnpe(); - Model = LOWLIGHT; - } - else if(model_type.compare("segmentation") == 0) - { - Impl = new SEGMENTATIONSnpe(); - Model = SEGMENTATION; - } - else - LOG_ERROR("Model implementation not found\n"); - - LOG_INFO("Initialized model = %s \n", model_type.c_str()); - -} - -/** @brief destructor -*/ -ModelInference::~ModelInference() -{ - if (nullptr != Impl) - { - if (Model == SUPERRESOLUTION) - { - delete static_cast(Impl); - } - else if(Model == DETECTION) - { - delete static_cast(Impl); - } - else if(Model == LOWLIGHT) - { - delete static_cast(Impl); - } - else if(Model == SEGMENTATION) - { - delete static_cast(Impl); - } - Impl = nullptr; - } -} - -/** @brief For model inference - * @param item contains image buffer and results object to store results - * @return true if success -*/ -int ModelInference::Inference(cv::Mat input,cv::Mat& output_image,string model_name) -{ - int ret=0; - if (nullptr != Impl && IsInitialized()) - { - if (Model == SUPERRESOLUTION) - { - ret = static_cast(Impl)->Detect(input,output_image, model_name); - } - else if(Model == DETECTION) - { - ret = static_cast(Impl)->Detect(input, output_image,model_name); - } - else if(Model == LOWLIGHT) - { - ret = static_cast(Impl)->Detect(input, output_image,model_name); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->Detect(input,output_image, model_name); - } - } - return ret; -} - -/** @brief To intialize SNPE - * @param contains SNPE configuration - * @return true if success -*/ -int ModelInference::Initialization(const ObjectDetectionSnpeConfig& config) -{ - int ret=0; - if (IsInitialized()) { - if (Model == SUPERRESOLUTION) - { - ret = static_cast(Impl)->DeInitialize() && static_cast(Impl)->Initialize(config); - } - else if(Model == DETECTION) - { - ret = static_cast(Impl)->DeInitialize() && static_cast(Impl)->Initialize(config); - } - else if(Model == LOWLIGHT) - { - ret = static_cast(Impl)->DeInitialize() && static_cast(Impl)->Initialize(config); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->DeInitialize() && static_cast(Impl)->Initialize(config); - } - } - else - { - if (Model == SUPERRESOLUTION) - { - ret = static_cast(Impl)->Initialize(config); - } - else if(Model == DETECTION) - { - ret = static_cast(Impl)->Initialize(config); - } - else if(Model == LOWLIGHT) - { - ret = static_cast(Impl)->Initialize(config); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->Initialize(config); - } - } - return ret; -} - -/** @brief To uninitialize SNPE - * @return true if success -*/ -bool ModelInference::UnInitialization() -{ - bool ret=false; - if (nullptr != Impl && IsInitialized()) - { - if (Model == SUPERRESOLUTION) - { - ret = static_cast(Impl)->DeInitialize(); - } - else if(Model == DETECTION) - { - ret = static_cast(Impl)->DeInitialize(); - } - else if(Model == LOWLIGHT) - { - ret = static_cast(Impl)->DeInitialize(); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->DeInitialize(); - } - } - else - { - LOG_ERROR("ObjectDetection: deinit failed!\n"); - ret = false; - } - return ret; -} - -/** @brief To check if SNPE is initialized - * @return true if already inititalized -*/ -bool ModelInference::IsInitialized() -{ - bool ret=false; - if (Model == SUPERRESOLUTION) - { - ret = static_cast(Impl)->IsInitialized(); - } - else if(Model == DETECTION) - { - ret = static_cast(Impl)->IsInitialized(); - } - else if(Model == LOWLIGHT) - { - ret = static_cast(Impl)->IsInitialized(); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->IsInitialized(); - } - return ret; -} - diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SNPERuntime.cpp b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SNPERuntime.cpp deleted file mode 100644 index febc4e0f..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SNPERuntime.cpp +++ /dev/null @@ -1,426 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "SNPERuntime.h" - -namespace snperuntime{ - - /** @brief SNPE constructor - */ - SNPERuntime::SNPERuntime() - { - static zdl::DlSystem::Version_t version = zdl::SNPE::SNPEFactory::getLibraryVersion(); - LOG_INFO("Using SNPE: '%s' \n", version.asString().c_str()); - } - - /** @brief To calculate buffer size for memory allocation - * @return buffer size - */ - static size_t calcSizeFromDims(const zdl::DlSystem::Dimension* dims, size_t rank, size_t elementSize) - { - if (rank == 0) return 0; - size_t size = elementSize; - while (rank--) { - size *= *dims; - dims++; - } - return size; - } - - /** @brief To create userbuffer - */ - void CreateUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - const zdl::DlSystem::TensorShape& bufferShape, - const char* name) - { - size_t bufferElementSize = sizeof(float); - - /** - * To calculate stride based on buffer strides - * Note: Strides = Number of bytes to advance to the next element in each dimension. - * For example, if a float tensor of dimension 2x4x3 is tightly packed in a buffer of 96 bytes, then the strides would be (48,12,4) - */ - std::vector strides(bufferShape.rank()); - strides[strides.size() - 1] = bufferElementSize; - size_t stride = strides[strides.size() - 1]; - for (size_t i = bufferShape.rank() - 1; i > 0; i--) - { - stride *= bufferShape[i]; - strides[i - 1] = stride; - } - - size_t bufSize = calcSizeFromDims(bufferShape.getDimensions(), bufferShape.rank(), bufferElementSize); - - /** - * To set the buffer encoding type - */ - zdl::DlSystem::UserBufferEncodingFloat userBufferEncodingFloat; - /** - * To create user-backed storage to load input data onto it - */ - applicationBuffers.emplace(name, std::vector(bufSize / bufferElementSize)); - /** - * To create SNPE user buffer from the user-backed buffer - */ - zdl::DlSystem::IUserBufferFactory& ubFactory = zdl::SNPE::SNPEFactory::getUserBufferFactory(); - snpeUserBackedBuffers.push_back(ubFactory.createUserBuffer((void*)applicationBuffers.at(name).data(), - bufSize, - strides, - &userBufferEncodingFloat)); - /** - * To add the user-backed buffer to the inputMap, which is later on fed to the network for execution - */ - if (snpeUserBackedBuffers.back() == nullptr) - { - std::cerr << "Error while creating user buffer." << std::endl; - } - userBufferMap.add(name, snpeUserBackedBuffers.back().get()); - } - - /** @brief To set SNPERuntime - * @param runtime contains SNPERuntime value - */ - void SNPERuntime::setTargetRuntime(const runtime_t runtime) - { - switch (runtime) { - case DSP: - m_runtime = zdl::DlSystem::Runtime_t::DSP; - break; - default: - m_runtime = zdl::DlSystem::Runtime_t::CPU; - break; - } - - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(m_runtime)) { - LOG_ERROR("Selected runtime not present. Falling back to CPU.\n"); - m_runtime = zdl::DlSystem::Runtime_t::CPU; - } - } - - /** @brief To set performance profile - * @param perfprofile contains performance value - */ - void SNPERuntime::setPerformanceProfile(const performance_t perfprofile) - { - switch (perfprofile) { - case BALANCED: - m_profile = zdl::DlSystem::PerformanceProfile_t::BALANCED; - break; - case HIGH_PERFORMANCE: - m_profile = zdl::DlSystem::PerformanceProfile_t::HIGH_PERFORMANCE; - break; - case POWER_SAVER: - m_profile = zdl::DlSystem::PerformanceProfile_t::POWER_SAVER; - break; - case SUSTAINED_HIGH_PERFORMANCE: - m_profile = zdl::DlSystem::PerformanceProfile_t::SUSTAINED_HIGH_PERFORMANCE; - break; - case BURST: - m_profile = zdl::DlSystem::PerformanceProfile_t::BURST; - break; - case LOW_POWER_SAVER: - m_profile = zdl::DlSystem::PerformanceProfile_t::LOW_POWER_SAVER; - break; - case HIGH_POWER_SAVER: - m_profile = zdl::DlSystem::PerformanceProfile_t::HIGH_POWER_SAVER; - break; - case LOW_BALANCED: - m_profile = zdl::DlSystem::PerformanceProfile_t::LOW_BALANCED; - break; - case SYSTEM_SETTINGS: - m_profile = zdl::DlSystem::PerformanceProfile_t::SYSTEM_SETTINGS; - break; - default: - m_profile = zdl::DlSystem::PerformanceProfile_t::BALANCED; - break; - } - LOG_DEBUG("Choose performance: %d, Set performance: %d \n", perfprofile, (int)m_profile); - } - - /** @brief To initialize SNPERuntime - * @param dlc_path contains dlc path from the config file - * @param runtime SNPERuntime value - * @return true if success; false otherwise - */ - bool SNPERuntime::Initialize(const std::string& dlc_path, const runtime_t runtime) - { - setTargetRuntime(runtime); - setPerformanceProfile(BURST); - /** - * To read dlc from dlc_path - */ - m_container = zdl::DlContainer::IDlContainer::open(dlc_path); - /** - * To create snpeBuilder from m_container based on runtime,performance profile - */ - std::vector runtimeStrVector; - switch (runtime) - { - case CPU: - runtimeStrVector.push_back("cpu_float32"); - runtimeStrVector.push_back("dsp_fixed8_tf"); - LOG_INFO("Runtime = CPU \n"); - break; - - case DSP: - runtimeStrVector.push_back("dsp_fixed8_tf"); - runtimeStrVector.push_back("cpu_float32"); - LOG_INFO("Runtime = DSP \n"); - break; - - } - //std::vector runtimeStrVector = {"dsp_fixed8_tf","gpu_float16","cpu_float32"}; - zdl::DlSystem::RuntimeList runtimeList; - - runtimeList.clear(); - for(auto& runtimeStr : runtimeStrVector) - { - zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::RuntimeList::stringToRuntime(runtimeStr.c_str()); - if(runtime != zdl::DlSystem::Runtime_t::UNSET) - { - bool ret = runtimeList.add(runtime); - if(ret == false) - { - std::cerr <getInputTensorNames(); - if (!inputNamesOpt) throw std::runtime_error("Error obtaining input tensor names\n"); - const zdl::DlSystem::StringList& inputNames = *inputNamesOpt; - - /** - * To create SNPE user buffers for each application storage buffer - */ - for (const char* name : inputNames) - { - /** - * To get attributes of buffer by name - */ - auto bufferAttributesOpt = m_snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) - { - LOG_ERROR("Error obtaining attributes for input tensor: %s\n", name); - return false; - } - - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - std::vector tensorShape; - for (size_t j = 0; j < bufferShape.rank(); j++) - { - tensorShape.push_back(bufferShape[j]); - } - m_inputShapes.emplace(name, tensorShape); - - CreateUserBuffer(m_inputUserBufferMap, m_applicationInputBuffers, m_inputUserBuffers, bufferShape, name); - } - - /** - * To get output tensor names of the network that need to be populated - */ - const auto& outputNamesOpt = m_snpe->getOutputTensorNames(); - if (!outputNamesOpt) throw std::runtime_error("Error obtaining output tensor names\n"); - const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; - - /** - * To create SNPE user buffers for each application storage buffer - */ - for (const char* name : outputNames) - { - // get attributes of buffer by name - auto bufferAttributesOpt = m_snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) - { - LOG_ERROR("Error obtaining attributes for input tensor: %s\n", name); - return false; - } - - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - std::vector tensorShape; - for (size_t j = 0; j < bufferShape.rank(); j++) { - tensorShape.push_back(bufferShape[j]); - } - m_outputShapes.emplace(name, tensorShape); - - CreateUserBuffer(m_outputUserBufferMap, m_applicationOutputBuffers, m_outputUserBuffers, bufferShape, name); - } - - m_isInit = true; - - return true; - } - - /** @brief To deinitialize SNPERuntime - */ - bool SNPERuntime::Deinitialize() - { - if (nullptr != m_snpe) - { - m_snpe.reset(nullptr); - } - - for (auto [k, v] : m_applicationInputBuffers) ClearVector(v); - for (auto [k, v] : m_applicationOutputBuffers) ClearVector(v); - return true; - } - - /** @brief To store output layers for each model - * @param outputlayers contains output layers defined in the config file - */ - bool SNPERuntime::SetOutputLayers(std::vector& outputLayers) - { - for (size_t i = 0; i < outputLayers.size(); i ++) - { - m_outputLayers.append(outputLayers[i].c_str()); - } - - return true; - } - - /** @brief To get input shape for each model - * @param name contains name of input layer - * @return shape of input layer if success; empty otherwise - */ - std::vector SNPERuntime::GetInputShape(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of input - */ - if (IsInit()) { - if (m_inputShapes.find(name) != m_inputShapes.end()) - { - return m_inputShapes.at(name); - } - LOG_ERROR("Can't find any input layer named %s\n", name.c_str()); - return {}; - } else { - LOG_ERROR("GetInputShape Failed: SNPE Init Failed !!!\n"); - return {}; - } - } - - /** @brief To get output shape for each model - * @param name contains name of output layers - * @return shape of output layer if success; empty otherwise - */ - std::vector SNPERuntime::GetOutputShape(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of output - */ - if (IsInit()) - { - if (m_outputShapes.find(name) != m_outputShapes.end()) - { - return m_outputShapes.at(name); - } - LOG_ERROR("Can't find any ouput layer named %s\n", name.c_str()); - return {}; - } - else - { - LOG_ERROR("GetOutputShape Failed: SNPE Init Failed !!!\n"); - return {}; - } - } - - - /** @brief To get input tensor for each model - * @param name contains name of input layer - * @return shape of input tensor if success; NULL otherwise - */ - float* SNPERuntime::GetInputTensor(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of input - */ - if (IsInit()) - { - if (m_applicationInputBuffers.find(name) != m_applicationInputBuffers.end()) - { - return m_applicationInputBuffers.at(name).data(); - } - LOG_ERROR("Can't find any input tensor named '%s' \n", name.c_str()); - return nullptr; - } - else - { - LOG_ERROR("GetInputTensor Failed: SNPE Init Failed !!!\n"); - return nullptr; - } - } - - /** @brief To get output tensor for each model - * @param name contains name of output layer - * @return shape of output tensor if success; NULL otherwise - */ - - float* SNPERuntime::GetOutputTensor(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of output - */ - if (IsInit()) - { - if (m_applicationOutputBuffers.find(name) != m_applicationOutputBuffers.end()) - { - return m_applicationOutputBuffers.at(name).data(); - } - LOG_ERROR("Can't find any output tensor named '%s' \n", name.c_str()); - return nullptr; - } - else - { - LOG_ERROR("GetOutputTensor Failed: SNPE Init Failed !!!"); - return nullptr; - } - } - - /** @brief To execute inference on target - * @return QS_SUCCESS if success; QS_FAIL otherwise - */ - bool SNPERuntime::execute() - { - if (!m_snpe->execute(m_inputUserBufferMap, m_outputUserBufferMap)) - { - LOG_ERROR("SNPE Task execute failed: %s\n", zdl::DlSystem::getLastErrorString()); - return false; - } - - return true; - } - -} // namespace snperuntime \ No newline at end of file diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SegmentationSnpe.cpp b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SegmentationSnpe.cpp deleted file mode 100644 index 3481e2bb..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SegmentationSnpe.cpp +++ /dev/null @@ -1,481 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include -#include "Configuration.h" -#include "SegmentationSnpe.h" - -namespace segmentationsnpe -{ - - /** @brief Constructor - */ - SEGMENTATIONSnpe::SEGMENTATIONSnpe() : m_isInit(false),m_snperuntime(nullptr) - { - - } - - /** @brief Destructor - */ - SEGMENTATIONSnpe::~SEGMENTATIONSnpe() - { - DeInitialize(); - } - - /** @brief To read model config and set output layers - * @param config model config parameters - * @return true if success;false otherwise - */ - bool SEGMENTATIONSnpe::Initialize(const ObjectDetectionSnpeConfig& config) - { - m_snperuntime = std::move(std::unique_ptr(new snperuntime::SNPERuntime())); - - m_inputLayers = config.inputLayers; - m_outputLayers = config.outputLayers; - m_outputTensors = config.outputTensors; - m_nmsThresh = config.nmsThresh; - m_confThresh = config.confThresh; - - /** - * To set output layer from model config - */ - m_snperuntime->SetOutputLayers(m_outputLayers); - /** - * To initialize snperuntime - */ - if (!m_snperuntime->Initialize(config.model_path, config.runtime)) { - LOG_ERROR("Failed to Initialize snpe instance.\n"); - return false; - } - - m_isInit = true; - return true; - } - - /** @brief To deallocate buffers - */ - bool SEGMENTATIONSnpe::DeInitialize() - { - if (m_isInit) { - m_snperuntime->Deinitialize(); - m_snperuntime.reset(nullptr); - } - - m_isInit = false; - return true; - } - - bool SEGMENTATIONSnpe::SetScoreThresh(const float& conf_thresh, const float& nms_thresh = 0.5) - { - this->m_nmsThresh = nms_thresh; - this->m_confThresh = conf_thresh; - return true; - } - - bool SEGMENTATIONSnpe::IsInitialized() const - { - return m_isInit; - } - - /** @brief To preprocess input image - * @param input_image Input image for inference - * @return true if succuess; false otherwise - */ - bool SEGMENTATIONSnpe::PreProcessInput(const cv::Mat& input_image,string model_name) - { - if (input_image.empty()) { - LOG_ERROR("Invalid image!\n"); - return false; - } - - auto inputShape = m_snperuntime->GetInputShape(m_inputLayers[0]); - int model_h = inputShape[1]; - int model_w = inputShape[2]; - - if (m_snperuntime->GetInputTensor(m_inputLayers[0]) == nullptr) - { - LOG_ERROR("Empty input tensor\n"); - return false; - } - - cv::Mat image = cv::Mat(model_h,model_w, CV_32FC3, Scalar(0.)); - cv::resize(input_image,image,cv::Size(model_h,model_w)); - cv::Mat input(model_h, model_w, CV_32FC3, m_snperuntime->GetInputTensor(m_inputLayers[0])); - - if(model_name.compare("DeepLabv3Plus-resnet++") == 0 || model_name.compare("DeepLabv3-resnet101") == 0 || model_name.compare("DeepLabv3-resnet50") == 0 || model_name.compare("FCN_resnet101") == 0 || model_name.compare("FCN_resnet50") == 0) - { - cv::resize(image,image,cv::Size(model_w,model_h)); - image.convertTo(input,CV_32FC3,1.0); - const float mean_vals[3] = {0.485, 0.456, 0.406}; - const float norm_vals[3] = {0.229, 0.224, 0.225}; - for (int i = 0; i < input.rows; i++) - { - float* pdata = (float*)(input.data + i * input.step); - for (int j = 0; j < input.cols; j++) - { - float x = pdata[2], y=pdata[1], z = pdata[0]; - pdata[0] = (x / 255.0 - mean_vals[0]) / norm_vals[0]; - pdata[1] = (y / 255.0 - mean_vals[1]) / norm_vals[1]; - pdata[2] = (z / 255.0 - mean_vals[2]) / norm_vals[2]; - pdata += 3; - } - } - } - return true; - } - - /** @brief To preprocess,execute and postprocess - * @param input_image Input image for inference - * @param output_image Inference output image - * @param model_name To identify model for specific post-processing - * @return true if success; false otherwise - */ - bool SEGMENTATIONSnpe::Detect(cv::Mat image,cv::Mat& output_image,string model_name) - { - /** - * Preprocessing image - */ - if(PreProcessInput(image, model_name) != true) - { - LOG_ERROR("PreProcess failed\n"); - return false; - } - /** - * Inferencing model on target - */ - if (!m_snperuntime->execute()) { - LOG_ERROR("SNPERuntime execute failed."); - return false; - } - /** - * Postprocessing - */ - if(PostProcess(image,output_image,model_name) != true) - { - LOG_ERROR("PostProcess failed\n"); - return false; - } - return true; - } - - /** @brief postprocess to overlay segmentation - * @param output_image Overlayed image - * @param model_name To identify model for specific post-processing - */ - bool SEGMENTATIONSnpe::PostProcess( cv::Mat image,cv::Mat& output_image,string model_name) - { - auto outputShape = m_snperuntime->GetOutputShape(m_outputTensors[0]); - float *predOutput = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - - if( predOutput == nullptr) - { - return false; - } - - int height = outputShape[1]; - int width = outputShape[2]; - int channels = outputShape[3]; - - cv::Mat temp = cv::Mat(height,width, CV_8UC3); - vector app_vect; - - float ***app = new float**[height]; - for (int i = 0; i < height; ++i) - { - app[i] = new float*[width]; - for (int j = 0; j < width; ++j) - app[i][j] = new float[channels]; - } - - for(int i = 0;i app_t_vec; - - for(int i = 0;i < channels;i++) - { - for (int j = 0; j < width; j++) - { - for (int k = 0; k < height; k++) - { - float x = app[j][k][i]; - app_t_vec.push_back(x); - } - } - } - - float ***app_t=NULL; - - app_t = new float**[channels]; - for (int i = 0; i < channels; ++i) - { - app_t[i] = new float*[width]; - for (int j = 0; j < width; ++j) - app_t[i][j] = new float[height]; - } - - for(int i =0;i> colors_res = { - { 0, 0, 0},{128, 0, 0},{ 0, 128, 0},{128, 128, 0},{ 0, 0, 128}, - {128, 0, 128},{ 0, 128, 128},{128, 128, 128},{ 64, 0, 0},{192, 0, 0}, - { 64, 128, 0},{192, 128, 0},{ 64, 0, 128},{192, 0, 128},{ 64, 128, 128}, - {192, 128, 128},{ 0, 64, 0},{128, 64, 0},{ 0, 192, 0},{128, 192, 0}, - { 0, 64, 128},{128, 64, 128},{ 0, 192, 128},{128, 192, 128},{ 64, 64, 0}, - {192, 64, 0},{ 64, 192, 0},{192, 192, 0},{ 64, 64, 128},{192, 64, 128}, - { 64, 192, 128},{192, 192, 128},{ 0, 0, 64},{128, 0, 64},{ 0, 128, 64}, - {128, 128, 64},{ 0, 0, 192},{128, 0, 192},{ 0, 128, 192},{128, 128, 192}, - { 64, 0, 64},{192, 0, 64},{ 64, 128, 64},{192, 128, 64},{ 64, 0, 192}, - {192, 0, 192},{ 64, 128, 192},{192, 128, 192},{ 0, 64, 64},{128, 64, 64}, - { 0, 192, 64},{128, 192, 64},{ 0, 64, 192},{128, 64, 192},{ 0, 192, 192}, - {128, 192, 192},{ 64, 64, 64},{192, 64, 64},{ 64, 192, 64},{192, 192, 64}, - { 64, 64, 192},{192, 64, 192},{ 64, 192, 192},{192, 192, 192},{ 32, 0, 0}, - {160, 0, 0},{ 32, 128, 0},{160, 128, 0},{ 32, 0, 128},{160, 0, 128}, - { 32, 128, 128},{160, 128, 128},{ 96, 0, 0},{224, 0, 0},{ 96, 128, 0}, - {224, 128, 0},{ 96, 0, 128},{224, 0, 128},{ 96, 128, 128},{224, 128, 128}, - { 32, 64, 0},{160, 64, 0},{ 32, 192, 0},{160, 192, 0},{ 32, 64, 128}, - {160, 64, 128},{ 32, 192, 128},{160, 192, 128},{ 96, 64, 0},{224, 64, 0}, - { 96, 192, 0},{224, 192, 0},{ 96, 64, 128},{224, 64, 128},{ 96, 192, 128}, - {224, 192, 128},{ 32, 0, 64},{160, 0, 64},{ 32, 128, 64},{160, 128, 64}, - { 32, 0, 192},{160, 0, 192},{ 32, 128, 192},{160, 128, 192},{ 96, 0, 64}, - {224, 0, 64},{ 96, 128, 64},{224, 128, 64},{ 96, 0, 192},{224, 0, 192}, - { 96, 128, 192},{224, 128, 192},{ 32, 64, 64},{160, 64, 64},{ 32, 192, 64}, - {160, 192, 64},{ 32, 64, 192},{160, 64, 192},{ 32, 192, 192},{160, 192, 192}, - { 96, 64, 64},{224, 64, 64},{ 96, 192, 64},{224, 192, 64},{ 96, 64, 192}, - {224, 64, 192},{ 96, 192, 192},{224, 192, 192},{ 0, 32, 0},{128, 32, 0}, - { 0, 160, 0},{128, 160, 0},{ 0, 32, 128},{128, 32, 128},{ 0, 160, 128}, - {128, 160, 128},{ 64, 32, 0},{192, 32, 0},{ 64, 160, 0},{192, 160, 0}, - { 64, 32, 128},{192, 32, 128},{ 64, 160, 128},{192, 160, 128},{ 0, 96, 0}, - {128, 96, 0},{ 0, 224, 0},{128, 224, 0},{ 0, 96, 128},{128, 96, 128}, - { 0, 224, 128},{128, 224, 128},{ 64, 96, 0},{192, 96, 0},{ 64, 224, 0}, - {192, 224, 0},{ 64, 96, 128},{192, 96, 128},{ 64, 224, 128},{192, 224, 128}, - { 0, 32, 64},{128, 32, 64},{ 0, 160, 64},{128, 160, 64},{ 0, 32, 192}, - {128, 32, 192},{ 0, 160, 192},{128, 160, 192},{ 64, 32, 64},{192, 32, 64}, - { 64, 160, 64},{192, 160, 64},{ 64, 32, 192},{192, 32, 192},{ 64, 160, 192}, - {192, 160, 192},{ 0, 96, 64},{128, 96, 64},{ 0, 224, 64},{128, 224, 64}, - { 0, 96, 192},{128, 96, 192},{ 0, 224, 192},{128, 224, 192},{ 64, 96, 64}, - {192, 96, 64},{ 64, 224, 64},{192, 224, 64},{ 64, 96, 192},{192, 96, 192}, - { 64, 224, 192},{192, 224, 192},{ 32, 32, 0},{160, 32, 0},{ 32, 160, 0}, - {160, 160, 0},{ 32, 32, 128},{160, 32, 128},{ 32, 160, 128},{160, 160, 128}, - { 96, 32, 0},{224, 32, 0},{ 96, 160, 0},{224, 160, 0},{ 96, 32, 128}, - {224, 32, 128},{ 96, 160, 128},{224, 160, 128},{ 32, 96, 0},{160, 96, 0}, - { 32, 224, 0},{160, 224, 0},{ 32, 96, 128},{160, 96, 128},{ 32, 224, 128}, - {160, 224, 128},{ 96, 96, 0},{224, 96, 0},{ 96, 224, 0},{224, 224, 0}, - { 96, 96, 128},{224, 96, 128},{ 96, 224, 128},{224, 224, 128},{ 32, 32, 64}, - {160, 32, 64},{ 32, 160, 64},{160, 160, 64},{ 32, 32, 192},{160, 32, 192}, - { 32, 160, 192},{160, 160, 192},{ 96, 32, 64},{224, 32, 64},{ 96, 160, 64}, - {224, 160, 64},{ 96, 32, 192},{224, 32, 192},{ 96, 160, 192},{224, 160, 192}, - { 32, 96, 64},{160, 96, 64},{ 32, 224, 64},{160, 224, 64},{ 32, 96, 192}, - {160, 96, 192},{ 32, 224, 192},{160, 224, 192},{ 96, 96, 64},{224, 96, 64}, - { 96, 224, 64},{224, 224, 64},{ 96, 96, 192},{224, 96, 192},{ 96, 224, 192}, - {224, 224, 192} - }; - - int **app_t_max=NULL; - - app_t_max = new int*[width]; - for (int j = 0; j < width; ++j) - { - app_t_max[j] = new int[height]; - } - - vector max_values; - for(int i=0;i max) - { - max = temp; - app_t_max[i][j] = k; - } - } - max_values.push_back(max); - } - } - - vector max_vec; - - for(int i = 0; i< height;i++) - { - for(int j=0;j> color; - color = colors_res; - - for (int i = 0; i < temp.rows; i++) - { - char* pdata = (char*)(temp.data + i * temp.step); - for (int j = 0; j < temp.cols; j++) - { - int id = app_t_max[i][j]; - pdata[0] = color[id][2]; - pdata[1] = color[id][1]; - pdata[2] = color[id][0]; - pdata += 3; - } - } - - for (int j = 0; j < width; ++j) - { - delete [] app_t_max[j]; - } - delete [] app_t_max; - app_t_max = NULL; - - } - else if(model_name.compare("DeepLabv3-resnet101") == 0 || model_name.compare("DeepLabv3-resnet50") == 0 || model_name.compare("FCN_resnet101") == 0 || model_name.compare("FCN_resnet50") == 0) - { - vector> label_map = { - {0, 0, 0}, // background - {128, 0, 0}, // aeroplane - {0, 128, 0}, // bicycle - {128, 128, 0}, // bird - {0, 0, 128}, // boat - {128, 0, 128}, // bottle - {0, 128, 128}, // bus - {128, 128, 128}, // car - {64, 0, 0}, // cat - {192, 0, 0}, // chair - {64, 128, 0}, // cow - {192, 128, 0}, // dining table - {64, 0, 128}, // dog - {192, 0, 128}, // horse - {64, 128, 128}, // motorbike - {192, 128, 128}, // person - {0, 64, 0}, // potted plant - {128, 64, 0}, // sheep - {0, 192, 0}, // sofa - {128, 192, 0}, // train - {0, 64, 128} // tv/monitor - }; - - int **app_t_max=NULL; - - app_t_max = new int*[width]; - for (int j = 0; j < width; j++) - { - app_t_max[j] = new int[height]; - } - - vector max_values; - for(int i=0; i max) - { - max = temp; - app_t_max[i][j] = k; - } - } - max_values.push_back(max); - - } - } - - vector max_vec; - - for(int i = 0; i< height;i++) - { - for(int j=0;j(user_data); - GstSample *sample = NULL; - GstBuffer *buffer = NULL; - GstMapInfo map; - const GstStructure *info = NULL; - GstCaps *caps = NULL; - GstFlowReturn ret = GST_FLOW_OK; - int sample_width = 0; - int sample_height = 0; - - g_signal_emit_by_name(appsink, "pull-sample", &sample, &ret); - if (ret != GST_FLOW_OK) - { - LOG_ERROR("can't pull GstSample."); - return ret; - } - - if (sample) - { - buffer = gst_sample_get_buffer(sample); - if (buffer == NULL) - { - LOG_ERROR("get buffer is null"); - goto exit; - } - - gst_buffer_map(buffer, &map, GST_MAP_READ); - - caps = gst_sample_get_caps(sample); - if (caps == NULL) - { - LOG_ERROR("get caps is null"); - goto exit; - } - - info = gst_caps_get_structure(caps, 0); - if (info == NULL) - { - LOG_ERROR("get info is null"); - goto exit; - } - - gst_structure_get_int(info, "width", &sample_width); - gst_structure_get_int(info, "height", &sample_height); - - - if (map.data == NULL) - { - LOG_ERROR("appsink buffer data empty"); - return GST_FLOW_OK; - } - - frameProcess->frameId += 1; - LOG_DEBUG("Frame ID=%d \n", frameProcess->frameId); - if (frameProcess->frameId % frameProcess->interval == 0) - { - shared_ptr detail(new DetectionItem()); - detail->Size = (uint32_t)map.size; - detail->Width = sample_width; - detail->Height = sample_height; - detail->FrameId = frameProcess->frameId; - detail->StreamName = frameProcess->streamName; - detail->StreamId = frameProcess->StreamId; - - uint8_t *imgBuf = new uint8_t[map.size]; - memcpy(static_cast(imgBuf), map.data, map.size); - detail->ImageBuffer.reset((uint8_t *)imgBuf, [](uint8_t *p) - { delete[] (p); }); - - int ret = frameProcess->blockQueue->Enqueue(detail,true); - if (ret != 0) - { - LOG_ERROR("Enqueue Fail = %d \n", ret); - } - } - } - -exit: - if (buffer) - { - gst_buffer_unmap(buffer, &map); - } - if (sample) - { - gst_sample_unref(sample); - } - return GST_FLOW_OK; -} - -void StreamDecode::OnPadAdd(GstElement *element, GstPad *pad, gpointer data) -{ - // Link two Element with named pad - GstPad *sink_pad = gst_element_get_static_pad(GST_ELEMENT(data), "sink"); - if (gst_pad_is_linked(sink_pad)) - { - LOG_INFO("rtspsrc and depay are already linked. Ignoring\n"); - return; - } - gst_element_link_pads(element, gst_pad_get_name(pad), GST_ELEMENT(data), "sink"); -} - -StreamDecode::StreamDecode(std::string streamtype, std::string url) -{ - this->StreamType = streamtype; - frameProcess_ = new FrameProcessData(); - frameProcess_->frameId = 1; - this->frameProcess_->streamName = url; -} - -StreamDecode::~StreamDecode() -{ - UnInitialization(); - if (frameProcess_ != nullptr) - { - delete frameProcess_; - frameProcess_ = nullptr; - } -} - -void StreamDecode::UnInitialization() -{ - terminate_ = true; -} -void StreamDecode::DecodeAndInference() -{ - GstStateChangeReturn ret; - shared_ptr msg = nullptr; - /* Start playing */ - ret = gst_element_set_state(data_.pipeline.get(), GST_STATE_PLAYING); - if (ret == GST_STATE_CHANGE_FAILURE) - { - LOG_ERROR("Unable to set the pipeline to the playing state.\n"); - return; - } - - /* Listen to the bus */ - bus_.reset(gst_element_get_bus(data_.pipeline.get()), [](GstBus *obj) - { gst_object_unref(obj); }); - - GstMessageType msgType; - do - { - msgType = static_cast(GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS); - msg.reset(gst_bus_timed_pop_filtered(bus_.get(), GST_CLOCK_TIME_NONE, msgType), [](GstMessage *m) - { gst_message_unref(m); }); - /* Parse message */ - if (msg != NULL) - { - GError *err; - gchar *debug_info; - - switch (GST_MESSAGE_TYPE(msg.get())) - { - case GST_MESSAGE_ERROR: - gst_message_parse_error(msg.get(), &err, &debug_info); - LOG_ERROR("Error received from element = %s \t %s\n", GST_OBJECT_NAME(msg.get()->src), err->message); - g_clear_error(&err); - g_free(debug_info); - terminate_ = true; - break; - case GST_MESSAGE_EOS: - LOG_INFO("Stream = %s \t End-Of-Stream reached. total Frame = %d \n ", frameProcess_->streamName.c_str(), frameProcess_->frameId); - terminate_ = true; - break; - case GST_MESSAGE_STATE_CHANGED: - if (GST_MESSAGE_SRC(msg.get()) == GST_OBJECT(data_.pipeline.get())) - { - GstState old_state, new_state, pending_state; - gst_message_parse_state_changed(msg.get(), &old_state, &new_state, &pending_state); - } - break; - default: - LOG_ERROR("Unexpected message received.\n"); - break; - } - } - } while (!terminate_); - - gst_element_set_state(data_.pipeline.get(), GST_STATE_NULL); -} - -void StreamDecode::UnRefElement(GstElement *elem) -{ - // LOG_DEBUG("Pipeline parent manage this object instead of unreffing the object directly: %s\n", elem->object.name); -} - -int StreamDecode::gst_camera_pipeline_init() -{ - GstCaps *filtercaps; - /* Initialize GStreamer */ - gst_init(nullptr, nullptr); - /* Create the empty pipeline */ - data_.pipeline.reset(gst_pipeline_new("decode-pipeline"), [](GstElement *elem) { - gst_element_set_state(elem, GST_STATE_NULL); - gst_object_unref(elem); }); - - data_.source.reset(gst_element_factory_make("qtiqmmfsrc", "source"), UnRefElement); - data_.main_capsfilter.reset(gst_element_factory_make ("capsfilter", "main_capsfilter"), UnRefElement); - data_.transform.reset(gst_element_factory_make("qtivtransform", "transform"), UnRefElement); - data_.sink.reset(gst_element_factory_make("appsink", "sink"), UnRefElement); - - if (!data_.pipeline.get() || !data_.source.get() || !data_.main_capsfilter.get() || !data_.transform.get() || !data_.sink.get()) - { - LOG_ERROR("Not all elements could be created."); - return QS_ERROR; - } - - filtercaps = gst_caps_new_simple ("video/x-raw", - "format", G_TYPE_STRING, "NV12", - "width", G_TYPE_INT, 1280, - "height", G_TYPE_INT, 720, - "framerate", GST_TYPE_FRACTION, 30, 1, - NULL); - - gst_caps_set_features (filtercaps, 0, - gst_caps_features_new ("memory:GBM", NULL)); - g_object_set (data_.main_capsfilter.get(), "caps", filtercaps, NULL); - gst_caps_unref (filtercaps); - - gst_bin_add_many(GST_BIN(data_.pipeline.get()), data_.source.get(), data_.main_capsfilter.get(), - data_.transform.get(), data_.sink.get(), NULL); - if (!gst_element_link_many(data_.source.get(), data_.main_capsfilter.get(), data_.transform.get(), data_.sink.get(), NULL)) - { - LOG_ERROR("Elements could not be linked.\n"); - gst_bin_remove_many (GST_BIN(data_.pipeline.get()), data_.source.get(), data_.main_capsfilter.get(), - data_.transform.get(), data_.sink.get(), NULL); - return QS_ERROR; - } - shared_ptr caps(gst_caps_from_string("video/x-raw,format=BGR"), [](GstCaps *caps) - { gst_caps_unref(caps); }); - g_object_set(data_.sink.get(), "caps", caps.get(), NULL); - - /* Configure appsink */ - g_object_set(data_.sink.get(), "emit-signals", TRUE, NULL); - g_signal_connect(data_.sink.get(), "new-sample", G_CALLBACK(StreamDecode::OnAppsinkNewSample), frameProcess_); - - /* Connect to the pad-added signal */ - g_signal_connect(data_.source.get(), "pad-added", G_CALLBACK(StreamDecode::OnPadAdd), data_.transform.get()); - - return QS_SUCCESS; - -} - -int StreamDecode::Initialization(shared_ptr &queue) -{ - frameProcess_->blockQueue = queue; - - if(0 == StreamType.compare("camera")) { - return gst_camera_pipeline_init(); - } - else { - LOG_ERROR("Stream Type does not configured"); - return QS_ERROR; - } -} - -void StreamDecode::SetSkipFrame(int interval) -{ - if (interval < 1) - { - return; - } - frameProcess_->interval = interval; -} - -void StreamDecode::SetStreamName(string name) -{ - LOG_INFO("Set stream name =%s \n", name.c_str()); - frameProcess_->streamName = name; -} - -void StreamDecode::SetStreamId(int uuid) -{ - frameProcess_->StreamId = uuid; -} - -void StreamDecode::Stop() -{ - terminate_ = true; - gboolean res = gst_element_send_event(data_.pipeline.get(), gst_event_new_eos()); - if (!res) - { - LOG_ERROR("Error occurred! EOS signal cannot be sent!\n\r"); - } -} - -static void CaptureThreadFunc(shared_ptr decodePtr) -{ - decodePtr->DecodeAndInference(); -} - -void CaptureController::CreateCapture(shared_ptr &input_config, shared_ptr &queue) -{ - shared_ptr decodePtr = make_shared(input_config->StreamType, input_config->Url); - - decodePtr->SetStreamId(input_config->StreamNumber); - - decodePtr->Initialization(queue); - decodePtr->SetStreamName("stream_" + to_string(input_config->StreamNumber)); - - decodePtr->SetSkipFrame(input_config->SkipFrame); - - std::thread decodeThread = std::thread(CaptureThreadFunc, decodePtr); - threads.emplace_back(move(decodeThread)); - - decoder.insert(pair>(input_config->StreamNumber, decodePtr)); -} - -void CaptureController::InterruptClose() -{ - map>::reverse_iterator iter; - - for (iter = decoder.rbegin(); iter != decoder.rend(); iter++) - { - iter->second->Stop(); - } -} - -void CaptureController::StopAll() -{ - map>::reverse_iterator iter; - - for (size_t i = 0; i < threads.size(); i++) - { - threads[i].join(); - } - - for (iter = decoder.rbegin(); iter != decoder.rend(); iter++) - { - iter->second->Stop(); - } -} diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/StreamEncode.cpp b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/StreamEncode.cpp deleted file mode 100644 index aa375bb8..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/StreamEncode.cpp +++ /dev/null @@ -1,248 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "StreamEncode.h" - -int StreamEncode::gst_wayland_pipeline_init(string output_type) -{ - gst_init(nullptr, nullptr); - data.pipeline.reset(gst_pipeline_new("pipeline"), [](GstElement *elem) - { - gst_element_set_state(elem, GST_STATE_NULL); - gst_object_unref(elem); - }); - - auto DefaultUnRefGstElement = [](GstElement *elem) { - // Per GStreamer design pipeline parent manage - // GstElement instead of unreffing the object directly - }; - - data.appsrc.reset(gst_element_factory_make("appsrc", "appsrc"), DefaultUnRefGstElement); - data.vidconv.reset(gst_element_factory_make("videoconvert", "vidconv"), DefaultUnRefGstElement); - data.videoscale.reset(gst_element_factory_make("videoscale", "videoscale"), DefaultUnRefGstElement); - data.waylandsink.reset(gst_element_factory_make("waylandsink", "waylandsink"), DefaultUnRefGstElement); - - if (!data.pipeline.get() || !data.appsrc.get() || !data.vidconv.get() || !data.videoscale.get() || !data.waylandsink.get()) - { - LOG_ERROR("[not all element created,(%s)(%s)(%s)(%s)(%s)]\n", - !data.pipeline.get() ? "ng" : "ok", - !data.appsrc.get() ? "ng" : "ok", - !data.vidconv.get() ? "ng" : "ok", - !data.videoscale.get() ? "ng" : "ok", - !data.waylandsink.get() ? "ng" : "ok"); - return QS_ERROR; - } - - gst_bin_add_many(GST_BIN(data.pipeline.get()), - data.appsrc.get(), - data.vidconv.get(), - data.videoscale.get(), - data.waylandsink.get(), - NULL); - - GstCaps *caps = gst_caps_from_string("video/x-raw, framerate=30/1,width=1280, height=720,format=BGR"); - g_object_set(data.appsrc.get(), "caps", caps, NULL); - gst_caps_unref(caps); - - g_object_set(G_OBJECT(data.waylandsink.get()), "async", true, NULL); - g_object_set(G_OBJECT(data.waylandsink.get()), "sync", false, NULL); - g_object_set (G_OBJECT (data.waylandsink.get()), "fullscreen", true, NULL); - - - gst_element_sync_state_with_parent(data.waylandsink.get()); - - - if (!gst_element_link(data.appsrc.get(), data.vidconv.get())) - { - LOG_ERROR("Link Fail %s %s \n", GST_ELEMENT_NAME(data.appsrc.get()), GST_ELEMENT_NAME(data.vidconv.get())); - return QS_ERROR; - } - - if (!gst_element_link(data.vidconv.get(), data.videoscale.get())) - { - LOG_ERROR("Link Fail %s %s \n", GST_ELEMENT_NAME(data.vidconv.get()), GST_ELEMENT_NAME(data.videoscale.get())); - return QS_ERROR; - } - if (!gst_element_link(data.videoscale.get(), data.waylandsink.get())) - { - LOG_ERROR("Link Fail %s %s \n", GST_ELEMENT_NAME(data.videoconvert.get()), GST_ELEMENT_NAME(data.waylandsink.get())); - return QS_ERROR; - } - - return QS_SUCCESS; -} - - -int StreamEncode::Initialization(string output_type) -{ - if(0 == output_type.compare("wayland")) - { - return gst_wayland_pipeline_init(output_type); - } - else - { - LOG_ERROR("Stream Type does not configured"); - return QS_ERROR; - } - - return QS_SUCCESS; - -} - -int StreamEncode::Loop() -{ - /* Start playing */ - GstStateChangeReturn ret = gst_element_set_state(data.pipeline.get(), GST_STATE_PLAYING); - if (ret == GST_STATE_CHANGE_FAILURE) - { - LOG_ERROR("Unable to set the pipeline to the playing state.\n"); - return QS_ERROR; - } - - /* Listen to the bus */ - bus.reset(gst_element_get_bus(data.pipeline.get()), [](GstBus *obj) - { gst_object_unref(obj); }); - - GstMessageType msgType; - gchar *debug_info; - GError *err; - GstMessage *msg = nullptr; - do - { - msgType = static_cast(GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS); - msg = gst_bus_timed_pop_filtered(bus.get(), GST_CLOCK_TIME_NONE, msgType); - - /* Parse message */ - if (msg != NULL) - { - switch (GST_MESSAGE_TYPE(msg)) - { - case GST_MESSAGE_ERROR: - gst_message_parse_error(msg, &err, &debug_info); - LOG_ERROR("Error received from element %s: %s\n", GST_OBJECT_NAME(msg->src), err->message); - g_clear_error(&err); - g_free(debug_info); - terminate = TRUE; - break; - case GST_MESSAGE_EOS: - LOG_INFO("End-Of-Stream reached. Encoder\n"); - terminate = TRUE; - break; - case GST_MESSAGE_STATE_CHANGED: - /* We are only interested in state-changed messages from the pipeline */ - if (GST_MESSAGE_SRC(msg) == GST_OBJECT(data.pipeline.get())) - { - GstState old_state, new_state, pending_state; - gst_message_parse_state_changed(msg, &old_state, &new_state, &pending_state); - } - break; - default: - /* We should not reach here */ - LOG_ERROR("Unexpected message received.\n"); - break; - } - gst_message_unref(msg); - } - } while (!terminate); - /* Free resources */ - gst_element_set_state(data.pipeline.get(), GST_STATE_NULL); - return QS_SUCCESS; -} - -void StreamEncode::UnInitialization() -{ - LOG_DEBUG("UnInitialization \n"); -} - -void StreamEncode::PushData(uint8_t *pushData, int len) -{ - GstBuffer *buffer = gst_buffer_new_and_alloc(len); - gst_buffer_fill(buffer, 0, pushData, len); - static GstClockTime timestamp = 0; - GST_BUFFER_PTS(buffer) = timestamp; - GST_BUFFER_DURATION(buffer) = gst_util_uint64_scale_int(1, GST_SECOND, 30); - - timestamp += GST_BUFFER_DURATION(buffer); - GstFlowReturn ret = GST_FLOW_OK; - - g_signal_emit_by_name(GST_APP_SRC(data.appsrc.get()), "push-buffer", buffer, &ret); - gst_buffer_unref(buffer); - - if ((ret != GST_FLOW_OK)) - { - LOG_ERROR("Error with gst_app_src_push_buffer for view_pipeline, return = %d \n", ret); - } -} - -void StreamEncode::Stop() -{ - terminate = TRUE; - gst_app_src_end_of_stream(GST_APP_SRC(data.appsrc.get())); // send eos -} - -static void EncodeThreadFunc(shared_ptr encodePtr) -{ - int ret; - ret = encodePtr->Loop(); - if(ret == QS_ERROR) - LOG_ERROR("Failed to run the gstreamer pipeline\n"); - -} - -void EncodeController::CreateEncoder(std::shared_ptr sol_conf) -{ - int streamId = sol_conf->input_config->StreamNumber; - string outputType = sol_conf->output_type; - shared_ptr encodePtr = make_shared(); - encodePtr->Initialization(outputType); - encoders.insert(pair>(streamId, encodePtr)); - - std::thread encodeThread = std::thread(EncodeThreadFunc, encodePtr); - threads.emplace_back(move(encodeThread)); -} - -void EncodeController::EncodeFrame(int streamId, uint8_t *pushData, int len) -{ - encoders[streamId]->PushData(pushData, len); -} - -void EncodeController::EndOfStream(int streamId) -{ - encoders[streamId]->Stop(); -} - -void EncodeController::InterruptClose() -{ - map>::reverse_iterator iter; - - for (iter = encoders.rbegin(); iter != encoders.rend(); iter++) - { - iter->second->Stop(); - } - - for (size_t i = 0; i < threads.size(); i++) - { - threads[i].join(); - } -} - -void EncodeController::Stop() -{ - map>::reverse_iterator iter; - - for (iter = encoders.rbegin(); iter != encoders.rend(); iter++) - { - iter->second->Stop(); - } - - for (size_t i = 0; i < threads.size(); i++) - { - threads[i].join(); - } -} diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SuperresolutionSnpe.cpp b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SuperresolutionSnpe.cpp deleted file mode 100644 index 9b704daf..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/SuperresolutionSnpe.cpp +++ /dev/null @@ -1,175 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include -#include "Configuration.h" -#include "SuperresolutionSnpe.h" - -namespace superressnpe { - - /** @brief Constructor - */ - SUPERRESSnpe::SUPERRESSnpe() : m_isInit(false),m_snperuntime(nullptr) - { - - } - - /** @brief Destructor - */ - SUPERRESSnpe::~SUPERRESSnpe() { - DeInitialize(); - } - - /** @brief To read model config and set output layers - * @param config model config parameters - * @return true if success;false otherwise - */ - bool SUPERRESSnpe::Initialize(const ObjectDetectionSnpeConfig& config) - { - m_snperuntime = std::move(std::unique_ptr(new snperuntime::SNPERuntime())); - - m_inputLayers = config.inputLayers; - m_outputLayers = config.outputLayers; - m_outputTensors = config.outputTensors; - - /** - * To set output layer from model config - */ - m_snperuntime->SetOutputLayers(m_outputLayers); - /** - * To initialize snperuntime - */ - if (!m_snperuntime->Initialize(config.model_path, config.runtime)) { - LOG_ERROR("Failed to Initialize snpe instance.\n"); - return false; - } - m_isInit = true; - return true; - } - - /** @brief To deallocate buffers - */ - bool SUPERRESSnpe::DeInitialize() - { - if (m_isInit) { - m_snperuntime->Deinitialize(); - m_snperuntime.reset(nullptr); - } - m_isInit = false; - return true; - } - - bool SUPERRESSnpe::IsInitialized() const - { - return m_isInit; - } - - /** @brief To preprocess input image - * @param input_image Input image for inference - * @return true if succuess; false otherwise - */ - bool SUPERRESSnpe::PreProcessInput(const cv::Mat& input_image,string model_name) - { - if (input_image.empty()) - { - LOG_ERROR("Invalid image!\n"); - return false; - } - - auto inputShape = m_snperuntime->GetInputShape(m_inputLayers[0]); - size_t model_h = inputShape[1]; - size_t model_w = inputShape[2]; - - if (m_snperuntime->GetInputTensor(m_inputLayers[0]) == nullptr) - { - LOG_ERROR("Empty input tensor\n"); - return false; - } - - cv::Mat image; - cv::resize(input_image,image,cv::Size(model_h,model_w),cv::INTER_CUBIC); - cv::cvtColor(image, image, cv::COLOR_BGR2RGB); - cv::Mat input(model_h, model_w, CV_32FC3, m_snperuntime->GetInputTensor(m_inputLayers[0])); - if(model_name.compare("ESRGAN") == 0) - { - image.convertTo(input, CV_32FC3, 1.0); - } - else - { - image.convertTo(input, CV_32FC3, 1.0/255.0); - } - return true; - } - - /** @brief To preprocess,execute and postprocess - * @param input_image Input image for inference - * @param output_image Inference output image - * @param model_name To identify model for specific post-processing - * @return true if success; false otherwise - */ - bool SUPERRESSnpe::Detect(cv::Mat input_image,cv::Mat& output_image, string model_name) - { - /** - * Preprocessing image - */ - if(PreProcessInput(input_image, model_name) != true) - { - LOG_ERROR("PreProcess failed\n"); - return false; - } - /** - * Inferencing model on target - */ - if (!m_snperuntime->execute()) { - LOG_ERROR("SNPERuntime execute failed."); - return false; - } - /** - * Postprocessing - */ - if(PostProcess(output_image, model_name) != true) - { - LOG_ERROR("PostProcess failed\n"); - return false; - } - return true; - } - - /** @brief Superres postprocess - * @param output_image upscaled image - * @param model_name To identify model for specific post-processing - */ - bool SUPERRESSnpe::PostProcess(cv::Mat& output_image,string model_name) - { - auto outputShape = m_snperuntime->GetOutputShape(m_outputTensors[0]); - float *output = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - - if(output == nullptr) - { - return false; - } - int height = outputShape[1]; - int width = outputShape[2]; - - output_image = cv::Mat(cv::Size(width,height), CV_32FC3, output); - if(model_name.compare("ESRGAN") == 0) - { - output_image.convertTo(output_image, CV_8UC3, 1.0); - } - else - { - output_image.convertTo(output_image, CV_8UC3, 255.0); - } - cv::cvtColor(output_image, output_image, cv::COLOR_RGB2BGR); - return true; - } - -} // namespace superressnpe diff --git a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/main.cpp b/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/main.cpp deleted file mode 100644 index 5392701c..00000000 --- a/ai-solutions/QRB5165-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/src/main.cpp +++ /dev/null @@ -1,334 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "ModelInference.h" -#include "Configuration.h" -#include "StreamDecode.h" -#include "StreamEncode.h" -#include "DecodeQueue.h" -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace std; - - -/** - * To decode frames from gstreamer -*/ -shared_ptr gDecodeQueue; - -/** - * To check for gstreamer exit -*/ -bool gExit = false; - -/** - * To encode frames for preview/file -*/ -shared_ptr encoderCtrl; - -/** - * To create object for frame capture -*/ -shared_ptr captureCtrl; - - - -/** @brief To intialize and configure the runtime based on the solution - * @param sol_conf contains information about the solution -*/ -void Inference_Image(void *sol_conf, string inputimage, string outputimage) -{ - LOG_DEBUG("InferenceThread \n"); - - SolutionConfiguration *solution_config = (SolutionConfiguration *)sol_conf; - /** - * TO initialize layers and buffers based on model type - */ - shared_ptr shInference; - shInference = std::make_shared(solution_config->model_config->model_type); - - shInference->Initialization(*solution_config->model_config.get()); - /** - * Run the loop until stream ends or interrupt from user - */ - shared_ptr item; - - - /** - * start inferencing on the image buffer - */ - auto start1 = chrono::steady_clock::now(); - cv::Mat input = cv::imread(inputimage, cv::IMREAD_COLOR); - if(input.empty()) - { - LOG_ERROR("Invalid image!\n"); - return; - } - - LOG_ERROR("model name = %s\n",solution_config->model_name.c_str()); - cv::Mat output_image; - if(shInference->Inference(input,output_image,solution_config->model_name) == true) - { - auto end1 = chrono::steady_clock::now(); - auto costTime1 = chrono::duration_cast(end1 - start1).count(); - LOG_INFO("Elapsed inference time in milliseconds: %ld ms\n",costTime1); - cv::imwrite(outputimage,output_image); - } - else - { - LOG_ERROR("Model Inference failed\n"); - } - shInference->UnInitialization(); -} - -void Inference_Camera(void *sol_conf) -{ - LOG_DEBUG("InferenceThread \n"); - - SolutionConfiguration *solution_config = (SolutionConfiguration *)sol_conf; - /** - * TO initialize layers and buffers based on model type - */ - shared_ptr shInference; - shInference = std::make_shared(solution_config->model_config->model_type); - - shInference->Initialization(*solution_config->model_config.get()); - - int ret = 0; - auto start = chrono::steady_clock::now(); - uint32_t frames = 0; - /** - * Run the loop until stream ends or interrupt from user - */ - do - { - shared_ptr item; - /** - * To retrieve gstreamer buffer from queue - */ - ret = gDecodeQueue->Dequeue(item, 300); - /** - * Check if Dequeue is successful - */ - if (ret == 0) - { - frames += 1; - auto start1 = chrono::steady_clock::now(); - /** - * start inferencing on the image buffer - */ - cv::Mat image(cv::Size(item->Width, item->Height), CV_8UC3, item->ImageBuffer.get(), cv::Mat::AUTO_STEP); - if(image.empty()) - { - LOG_ERROR("Invalid image!\n"); - return; - } - cv::Mat output_image; - shInference->Inference(image,output_image,solution_config->model_name); - auto end1 = chrono::steady_clock::now(); - auto costTime1 = chrono::duration_cast(end1 - start1).count(); - LOG_INFO("Elapsed inference time in milliseconds: %ld ms\n",costTime1); - - cv::resize(output_image,output_image,Size(1280,720)); - int size = output_image.total() * output_image.elemSize(); - /** - * To display on monitor - */ - encoderCtrl->EncodeFrame(item->StreamId, output_image.data, size); - } - /** - * If there are no items in the queue - */ - else - { - if (ret != 1) - { - LOG_ERROR("Error ret= %d\n", ret); - } - continue; - } - - } while (!gExit); - /** - * To infer on the remaining pending items if exited before completion - */ - auto remains = gDecodeQueue->GetRemainItems(); - LOG_INFO("Remain Items= %lu\n", remains.size()); - for (auto item : remains) - { - frames += 1; - cv::Mat image(cv::Size(item->Width, item->Height), CV_8UC3, item->ImageBuffer.get(), cv::Mat::AUTO_STEP); - cv::Mat output_image; - shInference->Inference(image,output_image,solution_config->model_name); - } - /** - * To deallocate the bufferes and runtime - */ - shInference->UnInitialization(); - - auto end = chrono::steady_clock::now(); - auto costTime = chrono::duration_cast(end - start).count(); - - LOG_INFO("Elapsed time in milliseconds: %ld ms \t Received Frames: %d \t Through rate: %ld \n", - costTime, frames, (frames * 1000)/costTime); -} - -/** @brief Execution starts from here - * @param argc for total argument count - * @param argv arguments to be passed -*/ - -int main(int argc, char **argv) -{ - /** - * To store config file name passed in argument - */ - const char* inputFile=NULL; - string inputimage,outputimage; - int opt = 0; - /** - * Check if 'h' or 'c' passed in argument - */ - while ((opt = getopt(argc, argv, ":hc:i:o:")) != EOF) - { - switch (opt) - { - case 'h': std::cout - << "\nDESCRIPTION:\n" - << "------------\n" - << "Example application demonstrating how to run the use case\n" - << "using the SNPE C++ API.\n" - << "REQUIRED ARGUMENTS:\n" - << "-------------------\n" - << " -c Path to the config json file.\n" - << "Example: ai-solutions -c data/config.json -i image_path -o Output_path\n"; - break; - case 'c': - inputFile = optarg; - LOG_INFO("Path to config file = %s \n", inputFile); - break; - case 'i': - inputimage = optarg; - LOG_INFO(" input image = %s \n",inputimage.c_str()); - break; - case 'o': - outputimage = optarg; - LOG_INFO(" output image = %s \n",outputimage.c_str()); - break; - - default: - LOG_INFO("Invalid parameter specified. Please run sample with the -h flag to see required arguments\n"); - exit(0); - }; - } - /** - * To parse input,model and solution config from inputFile - */ - Configuration::getInstance().LoadConfiguration(inputFile); - - /** - * To access enabled soultion model - */ - vector selected_model; - /** - * To access enabled solution configuration - */ - vector solutions_config; - /** - * To intialize each enabled solution - */ - - bool camera = false; - for (auto i : Configuration::getInstance().solutionsconfig) { - /** - * To access solution configuration - */ - std::shared_ptr config = i.second; - /** - * To check if solution is enabled - */ - if (config->Enable == true) { - /** - * To access the input configuration - */ - config->input_config = Configuration::getInstance().inputconfigs[config->input_config_name]; - if (config->input_config == NULL) { - LOG_ERROR("NULL Input configuration for selected solution name = %s \n", config->solution_name.c_str()); - exit(1); - } - config->input_config->StreamNumber = i.first; - /** - * To access the model configuration - */ - config->model_config = Configuration::getInstance().modelsconfig[config->model_name]; - if (config->model_config == NULL) { - LOG_ERROR("NULL Model configuration for selected solution name = %s \n", config->solution_name.c_str()); - exit(1); - } - /** - * To store the enabled solution configuration - */ - solutions_config.emplace_back(*config); - /** - * Append the selected models - */ - selected_model.push_back(config->model_name); - - if(config->input_config_name.compare("camera") == 0) - { - camera = true; - const int MAX_QUEUE_DEPTH = 1; - gDecodeQueue = make_shared(MAX_QUEUE_DEPTH); - encoderCtrl = make_shared(); - captureCtrl = make_shared(); - /** - * Intialize gstreamer pipeline to capture - */ - captureCtrl->CreateCapture(config->input_config, gDecodeQueue); - /** - * Intialze encoder to display or save frame - */ - encoderCtrl->CreateEncoder(config); - } - } - } - /** - * Check if any solution is enabled - */ - if (selected_model.size() == 0) { - LOG_ERROR("Solution not enabled, Enable the desired solution in config.json file\n"); - exit(1); - } - if(camera == true) - { - Inference_Camera((void *)(&solutions_config[0]) ); - gDecodeQueue->Unlock(); - captureCtrl->StopAll(); - captureCtrl->StopAll(); - } - else - { - if(inputimage.empty() || outputimage.empty()) - { - LOG_ERROR("Example: ai-solutions -c data/config.json -i image_path -o output_image\n"); - return 0; - } - Inference_Image((void *)(&solutions_config[0]), inputimage, outputimage ); - } - - - return 0; -} diff --git a/ai-solutions/Readme.md b/ai-solutions/Readme.md deleted file mode 100644 index 93d0e2d2..00000000 --- a/ai-solutions/Readme.md +++ /dev/null @@ -1,73 +0,0 @@ -# Qualcomm AI Stack Models - -## Introduction - AI Solutions - -AI solutions directory contains end-to-end AI solutions across various Qualcomm platforms. - -Workflow to for AI solutions across platforms is given below. - -

- - -- Use "models-for-solutions" in this repository, and prepare models -- Use the prepared models to prepare end-to-end AI solutions - -Summary of AI Solutions across platforms is given in below table - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Use CasePythonAndroidWoSRB5-LURB5-LEQCS8550-LE
Image Super ResolutionNotebooksAPKWoS AppRB5 APPCLICLI
Low-Light Image EnhancementNotebooksAPKWoS AppRB5 APPCLICLI
Object DetectionNotebooksAPKWoS AppRB5 APPCLICLI
Image SegmentationNotebooksAPKWoS AppRB5 APPCLICLI
Question-AnsweringNotebooksAPKWoS App Not Applicable Not Applicable Not Applicable
diff --git a/ai-solutions/android/01-ImageSuperResolution/README.md b/ai-solutions/android/01-ImageSuperResolution/README.md deleted file mode 100644 index bfa626cf..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/README.md +++ /dev/null @@ -1,133 +0,0 @@ -# Table of Contents -- [Table of Contents](#table-of-contents) -- [Introduction](#introduction) - + [About "Image Super Resolution"](#about--image-super-resolution-) - + [Pre-Requisites](#pre-requisites) -- [Model Selection and DLC conversion](#model-selection-and-dlc-conversion) - + [Model Overview](#model-overview) - + [Steps to convert model to DLC](#steps-to-convert-model-to-dlc) -- [Source Overview](#source-overview) - + [Source Organization](#source-organization) - + [Code Implementation](#code-implementation) -- [Build APK file with Android Studio](#build-apk-file-with-android-studio) -- [Results](#results) -# Introduction - -### About "Image Super Resolution" - -- Current project is an sample Android application for AI-based Image Super Resolution using [Qualcomm® Neural Processing SDK for AI](https://developer.qualcomm.com/sites/default/files/docs/snpe/index.html) framework. -- We have used 5 Models in this Solution -- This sample enhances input image resolution by 4x along width, and height. If input resolution is wxh, output resolution will be 4*w x 4*h -- DLC models take only fixed input size. -- If users intend to use a different model in this demo framework, **image pre/post processing will be needed**. -- Current pre/post processing is specific to the models used. - -### Pre-Requisites - -- Qualcomm® Neural Processing SDK for AI setup should be completed by following the guide here : https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html -- Android Studio to import sample project -- Android NDK to build native code -- Install opencv using ```pip install opencv-python``` - -# Model Selection and DLC conversion - -### Model Overview - -Please refer to Models repository for model overview - Add public link - -### Steps to convert model to DLC -Please refer to Models repository for model overview - Add public link - -# Source Overview - -### Source Organization - --

demo: Contains demo video, GIF -- superresolution: Contains source files in standard Android app format. -- app\src\main\assets : Contains Model binary DLC -- superresolution\src\main\java\com\qcom\imagesuperres : Application java source code -- superresolution\src\main\cpp : Application C++(native) source code -- sdk : Contains openCV sdk (Will be generated using _ResolveDependencies.sh_ ) - -### Code Implementation - -- Model Initialization - - `public boolean loadingMODELS(char runtime_var, String dlc_name)` - - runtime_var: Possible options are D, G, C. - - dlc_name: Name of the DLC. - -- Running Model - - - Following is the Java Function, that handles model execution. This function iternally calls sub functions to handle pre-processing and post-processing - - `inferSNPE(inputMat.getNativeObjAddr(), outputMat.getNativeObjAddr())` - - inputMat is opencv Matrix that contains input image. - - outputMat is the destination for the output image - - - C++ function that handles preprocessing for the input image. - - `preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) ` - - - C++ function that handles postprocessing after we receive input from model - - `postprocess(cv::Mat &outputimg)` - - - SNPE API function that runs the network and give result - - `snpe->execute(inputMap, outputMap);` - - -# Build APK file with Android Studio - -1. Clone this repo. -2. Generate DLC using the steps mentioned. -3. Run below script, from the directory where it is present, to resolve dependencies of this project. - - `bash resolveDependencies.sh` - - * This script will download opencv and paste to sdk directory, to enable OpenCv for android Java. - * This script will copy snpe-release.aar file from $SNPE_ROOT to "snpe-release" directory in Android project. - - **NOTE - If you are using SNPE version 2.11 or greater, please change following line in resolveDependencies.sh.** - ``` - From: cp $SNPE_ROOT/android/snpe-release.aar snpe-release - To : cp $SNPE_ROOT/lib/android/snpe-release.aar snpe-release - ``` - -4. Import folder VisionSolution2-ImageSuperResolution as a project in Android Studio -5. Do gradle sync -6. Compile the project. -7. Output APK file should get generated : superresolution-debug.apk -8. Prepare the Qualcomm Innovators development kit(QIDK) to install the application (Do not run APK on emulator) -9. Install and test application : superresolution-debug.apk - -```java -adb install -r -t superresolution-debug.apk -``` - -10. launch the application - -Following is the basic "Image Super Resolution" Android App - -1. Select one of the models -2. Select one of the given images from the drop-down list -3. Select the run-time to run the model (CPU, GPU or DSP) -4. Observe the result of model on screen -5. Also note the performance indicator for the particular run-time in mSec - -Same results for the application are shown below - -# Results - -- Demo video, and performance details as seen below: - -![Demo video.](demo/VisionSolution2-ImageSuperResolution.gif) - - - - - -###### *Qualcomm Neural Processing SDK and Snapdragon are products of Qualcomm Technologies, Inc. and/or its subsidiaries. AIMET Model Zoo is a product of Qualcomm Innovation Center, Inc.* diff --git a/ai-solutions/android/01-ImageSuperResolution/build.gradle b/ai-solutions/android/01-ImageSuperResolution/build.gradle deleted file mode 100644 index 292efdaf..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/build.gradle +++ /dev/null @@ -1,26 +0,0 @@ -// Top-level build file where you can add configuration options common to all sub-projects/modules. -buildscript { - ext.kotlin_version = '1.6.10' - repositories { - google() - jcenter() - } - dependencies { - classpath 'com.android.tools.build:gradle:7.2.1' - classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" - // NOTE: Do not place your application dependencies here; they belong - // in the individual module build.gradle files - } -} - -allprojects { - repositories { - google() - jcenter() - } - -} - -task clean(type: Delete) { - delete rootProject.buildDir -} \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/demo/VisionSolution2-ImageSuperResolution.gif b/ai-solutions/android/01-ImageSuperResolution/demo/VisionSolution2-ImageSuperResolution.gif deleted file mode 100644 index bca3a037..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/demo/VisionSolution2-ImageSuperResolution.gif and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/demo/VisionSolution2-ImageSuperResolution.mp4 b/ai-solutions/android/01-ImageSuperResolution/demo/VisionSolution2-ImageSuperResolution.mp4 deleted file mode 100644 index 9315d0bc..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/demo/VisionSolution2-ImageSuperResolution.mp4 and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/gradle.properties b/ai-solutions/android/01-ImageSuperResolution/gradle.properties deleted file mode 100644 index be1bd26a..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/gradle.properties +++ /dev/null @@ -1,19 +0,0 @@ -# Project-wide Gradle settings. -# IDE (e.g. Android Studio) users: -# Gradle settings configured through the IDE *will override* -# any settings specified in this file. -# For more details on how to configure your build environment visit -# http://www.gradle.org/docs/current/userguide/build_environment.html -# Specifies the JVM arguments used for the daemon process. -# The setting is particularly useful for tweaking memory settings. -org.gradle.jvmargs=-Xmx2048m -# When configured, Gradle will run in incubating parallel mode. -# This option should only be used with decoupled projects. More details, visit -# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects -# org.gradle.parallel=true -# AndroidX package structure to make it clearer which packages are bundled with the -# Android operating system, and which are packaged with your app"s APK -# https://developer.android.com/topic/libraries/support-library/androidx-rn -android.useAndroidX=true -# Automatically convert third-party libraries to use AndroidX -android.enableJetifier=true diff --git a/ai-solutions/android/01-ImageSuperResolution/gradle/wrapper/gradle-wrapper.properties b/ai-solutions/android/01-ImageSuperResolution/gradle/wrapper/gradle-wrapper.properties deleted file mode 100644 index cb6bd4ea..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/gradle/wrapper/gradle-wrapper.properties +++ /dev/null @@ -1,6 +0,0 @@ -#Fri Sep 09 10:14:39 IST 2022 -distributionBase=GRADLE_USER_HOME -distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-bin.zip -distributionPath=wrapper/dists -zipStorePath=wrapper/dists -zipStoreBase=GRADLE_USER_HOME diff --git a/ai-solutions/android/01-ImageSuperResolution/gradlew b/ai-solutions/android/01-ImageSuperResolution/gradlew deleted file mode 100644 index 4e395898..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/gradlew +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/env sh - -# -# Copyright 2015 the original author or authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -############################################################################## -## -## Gradle start up script for UN*X -## -############################################################################## - -# Attempt to set APP_HOME -# Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi -done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null - -APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' - -# Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" - -warn () { - echo "$*" -} - -die () { - echo - echo "$*" - echo - exit 1 -} - -# OS specific support (must be 'true' or 'false'). -cygwin=false -msys=false -darwin=false -nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MSYS* | MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; -esac - -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar - - -# Determine the Java command to use to start the JVM. -if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - if [ ! -x "$JAVACMD" ] ; then - die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." - fi -else - JAVACMD="java" - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." -fi - -# Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi -fi - -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi - -# For Cygwin or MSYS, switch paths to Windows format before running java -if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi - # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" - fi - i=`expr $i + 1` - done - case $i in - 0) set -- ;; - 1) set -- "$args0" ;; - 2) set -- "$args0" "$args1" ;; - 3) set -- "$args0" "$args1" "$args2" ;; - 4) set -- "$args0" "$args1" "$args2" "$args3" ;; - 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac -fi - -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=`save "$@"` - -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" - -exec "$JAVACMD" "$@" diff --git a/ai-solutions/android/01-ImageSuperResolution/gradlew.bat b/ai-solutions/android/01-ImageSuperResolution/gradlew.bat deleted file mode 100644 index ac1b06f9..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/gradlew.bat +++ /dev/null @@ -1,89 +0,0 @@ -@rem -@rem Copyright 2015 the original author or authors. -@rem -@rem Licensed under the Apache License, Version 2.0 (the "License"); -@rem you may not use this file except in compliance with the License. -@rem You may obtain a copy of the License at -@rem -@rem https://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. -@rem - -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Resolve any "." and ".." in APP_HOME to make it shorter. -for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto execute - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto execute - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/ai-solutions/android/01-ImageSuperResolution/resolveDependencies.sh b/ai-solutions/android/01-ImageSuperResolution/resolveDependencies.sh deleted file mode 100644 index ac881bd4..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/resolveDependencies.sh +++ /dev/null @@ -1,36 +0,0 @@ -# -*- mode: shell script -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= - -#RESOLVING DEPENDENCIES - -# steps to copy opencv -wget https://sourceforge.net/projects/opencvlibrary/files/4.5.5/opencv-4.5.5-android-sdk.zip/download -unzip download -rm download -mkdir sdk -mv OpenCV-android-sdk/sdk/* sdk -rm -r OpenCV-android-sdk - -#Steps to paste files in JNI -##copying snpe-release.aar file -## Change $SNPE_ROOT/lib/android/snpe-release.aar to $SNPE_ROOT/android/snpe-release.aar for SNPE<=2.10 -mkdir snpe-release -cp $SNPE_ROOT/lib/android/snpe-release.aar snpe-release -unzip -o snpe-release/snpe-release.aar -d snpe-release/snpe-release - -mkdir -p app/src/main/jniLibs/arm64-v8a - -##writing jniLibs -cp snpe-release/snpe-release/jni/arm64-v8a/libc++_shared.so app/src/main/jniLibs/arm64-v8a/ -cp snpe-release/snpe-release/jni/arm64-v8a/libSNPE.so app/src/main/jniLibs/arm64-v8a/ -cp snpe-release/snpe-release/jni/arm64-v8a/libsnpe-android.so app/src/main/jniLibs/arm64-v8a/ -cp snpe-release/snpe-release/jni/arm64-v8a/libSnpeHtpPrepare.so app/src/main/jniLibs/arm64-v8a/ -cp snpe-release/snpe-release/jni/arm64-v8a/libSnpeHtpV73Skel.so app/src/main/jniLibs/arm64-v8a/ -cp snpe-release/snpe-release/jni/arm64-v8a/libSnpeHtpV73Stub.so app/src/main/jniLibs/arm64-v8a/ \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/sdk/ReadMe.txt b/ai-solutions/android/01-ImageSuperResolution/sdk/ReadMe.txt deleted file mode 100644 index 885d6d66..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/sdk/ReadMe.txt +++ /dev/null @@ -1,2 +0,0 @@ -OpenCV SDK needs to be placed here. -Please refer to : resolveDependencies.sh \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/settings.gradle b/ai-solutions/android/01-ImageSuperResolution/settings.gradle deleted file mode 100644 index ad22b161..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/settings.gradle +++ /dev/null @@ -1,3 +0,0 @@ -include ':superresolution' -rootProject.name = "superresolution" -include ':sdk' diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/.gitignore b/ai-solutions/android/01-ImageSuperResolution/superresolution/.gitignore deleted file mode 100644 index 42afabfd..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/build \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/build.gradle b/ai-solutions/android/01-ImageSuperResolution/superresolution/build.gradle deleted file mode 100644 index d4b426be..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/build.gradle +++ /dev/null @@ -1,66 +0,0 @@ -apply plugin: 'com.android.application' - - -android { - compileSdkVersion 33 - - defaultConfig { - applicationId "com.qcom.aistack_superres" - minSdkVersion 24 - targetSdkVersion 30 - versionCode 1 - versionName "1.0" - - testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" - externalNativeBuild { - cmake { - cppFlags "-std=c++11 -frtti -fexceptions" - arguments "-DOpenCV_DIR=" + project(':sdk').projectDir + "/native/jni", - "-DANDROID_TOOLCHAIN=clang" - targets "ImageSuperResolution" - } - ndk { - abiFilters 'arm64-v8a' - } - } - } - - packagingOptions { - pickFirst 'lib/x86/libc++_shared.so' - pickFirst 'lib/x86_64/libc++_shared.so' - pickFirst 'lib/arm64-v8a/libc++_shared.so' - pickFirst 'lib/armeabi-v7a/libc++_shared.so' - } - - buildTypes { - release { - minifyEnabled false - proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' - } - } - - compileOptions { - sourceCompatibility JavaVersion.VERSION_1_8 - targetCompatibility JavaVersion.VERSION_1_8 - } - ndkVersion '21.4.7075529' - externalNativeBuild { - cmake { - path file('src/main/cpp/CMakeLists.txt') - } - } -} - -dependencies { - implementation fileTree(dir: 'libs', include: ['*.jar']) - implementation project(path: ':sdk') - implementation 'androidx.appcompat:appcompat:1.2.0' - testImplementation 'junit:junit:4.12' - androidTestImplementation 'androidx.test.ext:junit:1.1.1' - androidTestImplementation 'androidx.test.espresso:espresso-core:3.2.0' - implementation 'com.google.android.material:material:1.2.1' - implementation 'androidx.constraintlayout:constraintlayout:2.0.4' - androidTestImplementation 'androidx.test.ext:junit:1.1.2' - androidTestImplementation 'com.android.support.test:rules:1.0.2' - androidTestImplementation 'androidx.test.espresso:espresso-core:3.3.0' -} \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/proguard-rules.pro b/ai-solutions/android/01-ImageSuperResolution/superresolution/proguard-rules.pro deleted file mode 100644 index 64b4a059..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/proguard-rules.pro +++ /dev/null @@ -1,21 +0,0 @@ -# Add project specific ProGuard rules here. -# You can control the set of applied configuration files using the -# proguardFiles setting in build.gradle. -# -# For more details, see -# http://developer.android.com/guide/developing/tools/proguard.html - -# If your project uses WebView with JS, uncomment the following -# and specify the fully qualified class name to the JavaScript interface -# class: -#-keepclassmembers class fqcn.of.javascript.interface.for.webview { -# public *; -#} - -# Uncomment this to preserve the line number information for -# debugging stack traces. -#-keepattributes SourceFile,LineNumberTable - -# If you keep the line number information, uncomment this to -# hide the original source file name. -#-renamesourcefileattribute SourceFile \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/AndroidManifest.xml b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/AndroidManifest.xml deleted file mode 100644 index 02182513..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/AndroidManifest.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/assets/ReadMe.txt b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/assets/ReadMe.txt deleted file mode 100644 index aca5c44b..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/assets/ReadMe.txt +++ /dev/null @@ -1 +0,0 @@ -Generate model DLC and place here \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/assets/Sample1.jpg b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/assets/Sample1.jpg deleted file mode 100644 index f0ef1300..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/assets/Sample1.jpg and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/assets/Sample2.jpg b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/assets/Sample2.jpg deleted file mode 100644 index 962b02c0..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/assets/Sample2.jpg and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/CMakeLists.txt b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/CMakeLists.txt deleted file mode 100644 index b8348112..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/CMakeLists.txt +++ /dev/null @@ -1,74 +0,0 @@ - -# For more information about using CMake with Android Studio, read the -# documentation: https://d.android.com/studio/projects/add-native-code.html - -# Sets the minimum version of CMake required to build the native library. - -cmake_minimum_required(VERSION 3.18.1) - -# Declares and names the project. - -project("ImageSuperResolution") - -# Creates and names a library, sets it as either STATIC -# or SHARED, and provides the relative paths to its source code. -# You can define multiple libraries, and CMake builds them for you. -# Gradle automatically packages shared libraries with your APK. - -###OPENCV -#find_package(OpenCV REQUIRED) ##FAILED, cannot find libcpufeatures.so -#set(OpenCV_STATIC on) -#set(OpenCV_DIR C:/Users/shubgoya/Desktop/SNPEworkspace/github_workspace/HRNET_posenet/opencv45/native/jni) -find_package(OpenCV REQUIRED) -#INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) - - -###INCLUDE_DIRECTORIES -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/zdl) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/hpp) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) - - -add_library( # Sets the name of the library. - ImageSuperResolution - - # Sets the library as a shared library. - SHARED - - # Provides a relative path to your source file(s). - inference.cpp inference_helper.cpp ImageSuperResolution.cpp Model.h - ESRGAN.cpp ESRGAN.h - SESR.cpp SESR.h - SRGAN.cpp SRGAN.h - XLSR.cpp XLSR.h - QuickSRNetLarge.cpp QuickSRNetLarge.h - QuickSRNetMedium.cpp QuickSRNetMedium.h - QuickSRNetSmall.cpp QuickSRNetSmall.h - ) - -# Searches for a specified prebuilt library and stores the path as a -# variable. Because CMake includes system libraries in the search path by -# default, you only need to specify the name of the public NDK library -# you want to add. CMake verifies that the library exists before -# completing its build. - -find_library( # Sets the name of the path variable. - log-lib - - # Specifies the name of the NDK library that - # you want CMake to locate. - log ) - -# Specifies libraries CMake should link to your target library. You -# can link multiple libraries, such as libraries you define in this -# build script, prebuilt third-party libraries, or system libraries. - -target_link_libraries( # Specifies the target library. - ImageSuperResolution - - # Links the target library to the log library - # included in the NDK. - ${CMAKE_CURRENT_SOURCE_DIR}/../jniLibs/arm64-v8a/libSNPE.so - - ${log-lib} ${OpenCV_LIBS}) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/ESRGAN.cpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/ESRGAN.cpp deleted file mode 100644 index bb10fe8e..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/ESRGAN.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubgoya on 8/2/2023. -// - -#include "ESRGAN.h" - -void ESRGAN::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("ESRGAN_PREPROCESS is called"); - cv::Mat resized_img; - - //dims is of size [batchsize(1), height, width, channels(3)] - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR); //Not needed for this case - LOGI("inputimageSIZE width%d::%d height%d::%d",dims[1],resized_img.cols, dims[2],resized_img.rows); - - float * accumulator = reinterpret_cast (&dest_buffer[0]); - - //opencv read in BGRA by default, converting to BGR - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - -}; - - -#endif //SUPERRESOLUTION_ESRGAN_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/ImageSuperResolution.cpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/ImageSuperResolution.cpp deleted file mode 100644 index 30d7d7a8..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/ImageSuperResolution.cpp +++ /dev/null @@ -1,200 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include -#include -#include -#include - -#include "hpp/inference.h" -#include "hpp/Util.hpp" - -#include "zdl/SNPE/SNPE.hpp" -#include "zdl/SNPE/SNPEFactory.hpp" -#include "ESRGAN.h" -#include "SESR.h" -#include "SRGAN.h" -#include "QuickSRNetLarge.h" -#include "QuickSRNetSmall.h" -#include "QuickSRNetMedium.h" -#include "XLSR.h" - -#include - -using namespace cv; - -Model *modelobj; - -extern "C" JNIEXPORT jstring JNICALL -Java_com_qcom_aistack_1superres_SNPEHelper_queryRuntimes( - JNIEnv* env, - jobject /* this */, - jstring native_dir_path) { - - const char *cstr = env->GetStringUTFChars(native_dir_path, nullptr); - env->ReleaseStringUTFChars(native_dir_path, cstr); - - std::string runT_Status; - std::string nativeLibPath = std::string(cstr); - -// runT_Status += "\nLibs Path : " + nativeLibPath + "\n"; - - if (!SetAdspLibraryPath(nativeLibPath)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "Failed to set ADSP Library Path\n"); - - runT_Status += "\nFailed to set ADSP Library Path\nTerminating"; - return env->NewStringUTF(runT_Status.c_str()); - } - else - { - LOGI("ADSP found"); - } - - // ====================================================================================== // - runT_Status = "Querying Runtimes : \n\n"; - // DSP unsignedPD check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::DSP,zdl::DlSystem::RuntimeCheckOption_t::UNSIGNEDPD_CHECK)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "UnsignedPD DSP runtime : Absent\n"); - runT_Status += "UnsignedPD DSP runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "UnsignedPD DSP runtime : Present\n"); - runT_Status += "UnsignedPD DSP runtime : Present\n"; - } - // DSP signedPD check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::DSP)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "DSP runtime : Absent\n"); - runT_Status += "DSP runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "DSP runtime : Present\n"); - runT_Status += "DSP runtime : Present\n"; - } - // GPU check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::GPU)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "GPU runtime : Absent\n"); - runT_Status += "GPU runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "GPU runtime : Present\n"); - runT_Status += "GPU runtime : Present\n"; - } - // CPU check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::CPU)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "CPU runtime : Absent\n"); - runT_Status += "CPU runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "CPU runtime : Present\n"); - runT_Status += "CPU runtime : Present\n"; - } - - return env->NewStringUTF(runT_Status.c_str()); -} - - -//initializing network -extern "C" -JNIEXPORT jstring JNICALL -Java_com_qcom_aistack_1superres_SNPEHelper_initSNPE(JNIEnv *env, jobject thiz, jobject asset_manager, jchar runtime, jstring jdlc_name) { - LOGI("Reading SNPE DLC ..."); - std::string result; - - const char *cstr = env->GetStringUTFChars(jdlc_name, 0); - AAssetManager* mgr = AAssetManager_fromJava(env, asset_manager); - AAsset* asset_model = AAssetManager_open(mgr, cstr, AASSET_MODE_UNKNOWN); - - //Changing Preprocessing/PostProcessing for SESR - if(strcmp(cstr,"sesr_quant_128_4.dlc")==0){ - modelobj = new SESR(); - } - //Changing Preprocessing/PostProcessing for SRGAN - else if(strcmp(cstr,"srgan_quant_128_4.dlc")==0){ - modelobj = new SRGAN(); - } - //Changing Preprocessing/PostProcessing for ESRGAN - else if(strcmp(cstr,"esrgan_quant_128_4.dlc")==0){ - modelobj = new ESRGAN(); - } - //Changing Preprocessing/PostProcessing for XLSR - else if(strcmp(cstr,"xlsr_quant_128_4.dlc")==0){ - modelobj = new XLSR(); - } - //Changing Preprocessing/PostProcessing for Quick_SR_Large - else if(strcmp(cstr,"quicksrnet_large_quant_128_4.dlc")==0){ - modelobj = new QuickSRNetLarge(); - } - //Changing Preprocessing/PostProcessing for Quick_SR_medium - else if(strcmp(cstr,"quicksrnet_medium_quant_128_4.dlc")==0){ - modelobj = new QuickSRNetMedium(); - } - //Changing Preprocessing/PostProcessing for Quick_SR_Small - else if(strcmp(cstr,"quicksrnet_small_quant_128_4.dlc")==0){ - modelobj = new QuickSRNetSmall(); - } - else - { - LOGE("Model pre and post is not defined"); - return NULL; - } - - modelobj->msg(); - env->ReleaseStringUTFChars(jdlc_name, cstr); - - if (NULL == asset_model) { - LOGE("Failed to load ASSET, needed to load DLC\n"); - result = "Failed to load ASSET, needed to load DLC\n"; - return env->NewStringUTF(result.c_str()); - } - - long dlc_size = AAsset_getLength(asset_model); - LOGI("DLC Size = %ld MB\n", dlc_size / (1024*1024)); - result += "DLC Size = " + std::to_string(dlc_size); - char* dlc_buffer = (char*) malloc(sizeof(char) * dlc_size); - AAsset_read(asset_model, dlc_buffer, dlc_size); - - result += "\n\nBuilding Models DLC Network:\n"; - result += build_network(reinterpret_cast(dlc_buffer), dlc_size,runtime); - - return env->NewStringUTF(result.c_str()); -} - -//inference -extern "C" -JNIEXPORT jfloat JNICALL -Java_com_qcom_aistack_1superres_SNPEHelper_inferSNPE(JNIEnv *env, jobject thiz, jlong inputMat, - jlong outputMat) { - - LOGI("infer SNPE S"); - - cv::Mat &inputimg = *(cv::Mat*) inputMat; - cvtColor(inputimg,inputimg,CV_BGR2RGB); - - cv::Mat &outputimg = *(cv::Mat*) outputMat; - - float milli_time; - - bool status = executeDLC(inputimg, outputimg, milli_time, modelobj); - - if(status == false) - { - LOGE("fatal ERROR"); - return 0; - } - else { - LOGI("status is TRUE"); - LOGI("rows: %d cols: %d",outputimg.rows,outputimg.cols); - } - LOGI("infer SNPE E"); - LOGI("milli_time: %f",milli_time); - return milli_time; - -} \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/Model.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/Model.h deleted file mode 100644 index 9b9f86b3..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/Model.h +++ /dev/null @@ -1,51 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubgoya on 8/2/2023. -// - - - -#ifndef SUPERRESOLUTION_MODEL_H -#define SUPERRESOLUTION_MODEL_H - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "android/log.h" - - -#include -#include -#include -#define LOG_TAG "SNPE_INF" -#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__) -#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__) - - -class Model { - -public: - virtual void preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) = 0; - virtual void postprocess(cv::Mat &outputimg) = 0; - virtual void msg() = 0; - -}; - - -#endif //SUPERRESOLUTION_MODEL_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/QuickSRNetLarge.cpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/QuickSRNetLarge.cpp deleted file mode 100644 index c303b1e6..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/QuickSRNetLarge.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubgoya on 8/2/2023. -// - -#include "QuickSRNetLarge.h" - -void QuickSRNetLarge::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("SESR Class Preprocess is called"); - cv::Mat resized_img; - - //dims is of size [batchsize(1), height, width, channels(3)] - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR); //Resizing based on input - LOGI("inputimageSIZE width%d::%d height%d::%d",dims[1],resized_img.cols, dims[2],resized_img.rows); - - float inputScale = 0.00392156862745f; //normalization value, this is 1/255 - - float * accumulator = reinterpret_cast (&dest_buffer[0]); - - //opencv read in BGRA by default - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - -}; - - -#endif //SUPERRESOLUTION_QuickSRNetLarge_H \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/QuickSRNetMedium.cpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/QuickSRNetMedium.cpp deleted file mode 100644 index a5da4ac2..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/QuickSRNetMedium.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubgoya on 8/2/2023. -// - -#include "QuickSRNetMedium.h" - -void QuickSRNetMedium::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("SESR Class Preprocess is called"); - cv::Mat resized_img; - - //dims is of size [batchsize(1), height, width, channels(3)] - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR); //Resizing based on input - LOGI("inputimageSIZE width%d::%d height%d::%d",dims[1],resized_img.cols, dims[2],resized_img.rows); - - float inputScale = 0.00392156862745f; //normalization value, this is 1/255 - - float * accumulator = reinterpret_cast (&dest_buffer[0]); - - //opencv read in BGRA by default - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - -}; - - -#endif //SUPERRESOLUTION_QUICKSRNETMEDIUM_H \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/QuickSRNetSmall.cpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/QuickSRNetSmall.cpp deleted file mode 100644 index 704dedc6..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/QuickSRNetSmall.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubgoya on 8/2/2023. -// - -#include "QuickSRNetSmall.h" - -void QuickSRNetSmall::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("SESR Class Preprocess is called"); - cv::Mat resized_img; - - //dims is of size [batchsize(1), height, width, channels(3)] - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR); //Resizing based on input - LOGI("inputimageSIZE width%d::%d height%d::%d",dims[1],resized_img.cols, dims[2],resized_img.rows); - - float inputScale = 0.00392156862745f; //normalization value, this is 1/255 - - float * accumulator = reinterpret_cast (&dest_buffer[0]); - - //opencv read in BGRA by default - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - -}; - - -#endif //SUPERRESOLUTION_QuickSRNetSmall_H \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/SESR.cpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/SESR.cpp deleted file mode 100644 index 9c8a6a39..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/SESR.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubgoya on 8/2/2023. -// - -#include "SESR.h" - -void SESR::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("SESR Class Preprocess is called"); - cv::Mat resized_img; - - //dims is of size [batchsize(1), height, width, channels(3)] - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR); //Resizing based on input - LOGI("inputimageSIZE width%d::%d height%d::%d",dims[1],resized_img.cols, dims[2],resized_img.rows); - - float inputScale = 0.00392156862745f; //normalization value, this is 1/255 - - float * accumulator = reinterpret_cast (&dest_buffer[0]); - - //opencv read in BGRA by default - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - -}; - - -#endif //SUPERRESOLUTION_SESR_H \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/SRGAN.cpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/SRGAN.cpp deleted file mode 100644 index 8ee4dac7..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/SRGAN.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubgoya on 8/2/2023. -// - -#include "SRGAN.h" - -void SRGAN::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("SESR Class Preprocess is called"); - cv::Mat resized_img; - - //dims is of size [batchsize(1), height, width, channels(3)] - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR); //Resizing based on input - LOGI("inputimageSIZE width%d::%d height%d::%d",dims[1],resized_img.cols, dims[2],resized_img.rows); - - float inputScale = 0.00392156862745f; //normalization value, this is 1/255 - - float * accumulator = reinterpret_cast (&dest_buffer[0]); - - //opencv read in BGRA by default - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - -}; - - -#endif //SUPERRESOLUTION_SRGAN_H \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/XLSR.cpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/XLSR.cpp deleted file mode 100644 index 4c6cc8fa..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/XLSR.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubgoya on 8/2/2023. -// - -#include "XLSR.h" - -void XLSR::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("SESR Class Preprocess is called"); - cv::Mat resized_img; - - //dims is of size [batchsize(1), height, width, channels(3)] - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR); //Resizing based on input - LOGI("inputimageSIZE width%d::%d height%d::%d",dims[1],resized_img.cols, dims[2],resized_img.rows); - - float inputScale = 0.00392156862745f; //normalization value, this is 1/255 - - float * accumulator = reinterpret_cast (&dest_buffer[0]); - - //opencv read in BGRA by default - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - -}; - - -#endif //SUPERRESOLUTION_XLSR_H \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/CheckRuntime.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/CheckRuntime.hpp deleted file mode 100644 index 07538cd0..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/CheckRuntime.hpp +++ /dev/null @@ -1,17 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef CHECKRUNTIME_H -#define CHECKRUNTIME_H - -#include "SNPE/SNPEFactory.hpp" - -zdl::DlSystem::Runtime_t checkRuntime(zdl::DlSystem::Runtime_t runtime); -bool checkGLCLInteropSupport(); - -#endif diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/CreateUserBuffer.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/CreateUserBuffer.hpp deleted file mode 100644 index b880b033..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/CreateUserBuffer.hpp +++ /dev/null @@ -1,59 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2020 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include"inference.h" -#include -#include -#include -#include "SNPE/SNPE.hpp" -#include "DlSystem/IUserBuffer.hpp" -#include "DlSystem/UserBufferMap.hpp" - -typedef unsigned int GLuint; - -// Helper function to fill a single entry of the UserBufferMap with the given user-backed buffer -void createUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const char * name, - const bool isTfNBuffer, - int bitWidth); - - -// Create a UserBufferMap of the SNPE network outputs -void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const bool isTfNBuffer, - int bitWidth); - -// Create a UserBufferMap of the SNPE network inputs -void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const bool isTfNBuffer, - int bitWidth); - -//// Create a UserBufferMap of the SNPE network outputs -//void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, -// std::unordered_map>& applicationBuffers, -// std::vector>& snpeUserBackedBuffers, -// std::unique_ptr& snpe, -// const bool isTfNBuffer, -// int bitWidth); -// -//// Create a UserBufferMap of the SNPE network inputs -//void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, -// std::unordered_map>& applicationBuffers, -// std::vector>& snpeUserBackedBuffers, -// std::unique_ptr& snpe, -// const bool isTfNBuffer, -// int bitWidth); diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/LoadContainer.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/LoadContainer.hpp deleted file mode 100644 index 85bf622a..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/LoadContainer.hpp +++ /dev/null @@ -1,19 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef LOADCONTAINER_H -#define LOADCONTAINER_H - -#include - -#include "DlContainer/IDlContainer.hpp" - -std::unique_ptr loadContainerFromFile(std::string containerPath); -std::unique_ptr loadContainerFromBuffer(const uint8_t * buffer, const size_t size); - -#endif diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/LoadInputTensor.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/LoadInputTensor.hpp deleted file mode 100644 index 7aec3b24..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/LoadInputTensor.hpp +++ /dev/null @@ -1,27 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef LOADINPUTTENSOR_H -#define LOADINPUTTENSOR_H - -#include -#include -#include - -#include "SNPE/SNPE.hpp" -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/TensorMap.hpp" -#include "../../Model.h" - - -bool loadInputUserBuffer(std::unordered_map>& applicationBuffers, - std::unique_ptr& snpe, - cv::Mat &model_input, - zdl::DlSystem::UserBufferMap& inputMap, - int bitWidth, Model *modelobj); -#endif diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/SetBuilderOptions.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/SetBuilderOptions.hpp deleted file mode 100644 index 3b760147..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/SetBuilderOptions.hpp +++ /dev/null @@ -1,25 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SETBUILDEROPTIONS_H -#define SETBUILDEROPTIONS_H - -#include "DlSystem/RuntimeList.hpp" -#include "SNPE/SNPE.hpp" -#include "DlSystem/DlEnums.hpp" -//#include "DlSystem/UDLFunc.hpp" -#include "DlContainer/IDlContainer.hpp" -#include "DlSystem/PlatformConfig.hpp" - -std::unique_ptr setBuilderOptions(std::unique_ptr & container, - zdl::DlSystem::Runtime_t runtime, - zdl::DlSystem::RuntimeList runtimeList, - bool useUserSuppliedBuffers, - bool useCaching); - -#endif //SETBUILDEROPTIONS_H \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/Util.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/Util.hpp deleted file mode 100644 index 346e7ac0..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/Util.hpp +++ /dev/null @@ -1,41 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef UTIL_H -#define UTIL_H - -#include -#include -#include -#include - -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/TensorShape.hpp" - -template Container& split(Container& result, const typename Container::value_type & s, typename Container::value_type::value_type delimiter ) -{ - result.clear(); - std::istringstream ss( s ); - while (!ss.eof()) - { - typename Container::value_type field; - getline( ss, field, delimiter ); - if (field.empty()) continue; - result.push_back( field ); - } - return result; -} - - -cv::Mat get_affine_transform(int dst_w, int dst_h, int inv, double center[], double scale[]); -//void getcenterscale(int image_width, int image_height, double center[2], double scale[2]); -void getcenterscale(int image_width, int image_height, double center[2], double scale[2],float bottom, float left, float top, float right); -float** getCoords(std::vector buff, double center[], double scale[]); - -#endif - diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/inference.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/inference.h deleted file mode 100644 index 1903cc74..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/hpp/inference.h +++ /dev/null @@ -1,54 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubpate on 12/11/2021. -// - -#ifndef NATIVEINFERENCE_INFERENCE_H -#define NATIVEINFERENCE_INFERENCE_H - -#include "zdl/DlSystem/TensorShape.hpp" -#include "zdl/DlSystem/TensorMap.hpp" -#include "zdl/DlSystem/TensorShapeMap.hpp" -#include "zdl/DlSystem/IUserBufferFactory.hpp" -#include "zdl/DlSystem/IUserBuffer.hpp" -#include "zdl/DlSystem/UserBufferMap.hpp" -#include "zdl/DlSystem/IBufferAttributes.hpp" - -#include "zdl/DlSystem/StringList.hpp" - -#include "zdl/SNPE/SNPE.hpp" -#include "zdl/SNPE/SNPEFactory.hpp" -#include "zdl/DlSystem/DlVersion.hpp" -#include "zdl/DlSystem/DlEnums.hpp" -#include "zdl/DlSystem/String.hpp" -#include "zdl/DlContainer/IDlContainer.hpp" -#include "zdl/SNPE/SNPEBuilder.hpp" - -#include "zdl/DlSystem/ITensor.hpp" -#include "zdl/DlSystem/ITensorFactory.hpp" - -#include -#include "android/log.h" - -#include - -#include "../../Model.h" - -#define LOG_TAG "SNPE_INF" -#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__) -#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__) - -std::string build_network(const uint8_t * dlc_buffer, const size_t dlc_size, const char runtime_arg); -bool SetAdspLibraryPath(std::string nativeLibPath); - -bool executeDLC(cv::Mat &inputimg, cv::Mat &outputimg, float &milli_time, Model *modelobj); - -#endif //NATIVEINFERENCE_INFERENCE_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/IDiagLog.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/IDiagLog.h deleted file mode 100644 index 9a084071..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/IDiagLog.h +++ /dev/null @@ -1,102 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _DIAGLOG_IDIAGLOG_H_ -#define _DIAGLOG_IDIAGLOG_H_ - -#include "DiagLog/Options.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE IDiagLog handle - */ -typedef void* Snpe_IDiagLog_Handle_t; - -/** - * @brief . - * - * Sets the options after initialization occurs. - * - * @param[in] handle : Handle to access IDiagLog - * @param[in] loggingOptions : The options to set up diagnostic logging. - * - * @return Error code if the options could not be set. Ensure logging is not started/ - * SNPE_SUCCESS otherwise - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_SetOptions(Snpe_IDiagLog_Handle_t handle, Snpe_Options_Handle_t loggingOptionsHandle); - -/** - * @brief . - * - * Gets the curent options for the diag logger. - * - * @param[in] handle : Handle to access IDiagLog - * @return Handle to access DiagLog options. - */ -SNPE_API -Snpe_Options_Handle_t Snpe_IDiagLog_GetOptions(Snpe_IDiagLog_Handle_t handle); - -/** - * @brief . - * - * @param[in] handle : Handle to access IDiagLog - * @param[in] mask : Allows for setting the log mask once diag logging has started - * @return SNPE_SUCCESS if the level was set successfully. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_SetDiagLogMask(Snpe_IDiagLog_Handle_t handle, const char* mask) ; - -/** - * @brief . - * - * Enables logging. - * - * Logging should be started prior to the instantiation of other SNPE_APIs - * to ensure all events are captured. - * - * @param[in] handle : Handle to access IDiagLog - * @return SNPE_SUCCESS if diagnostic logging started successfully. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_Start(Snpe_IDiagLog_Handle_t handle); - -/** - * @brief Disables logging. - * - * @param[in] handle : Handle to access IDiagLog - * - * @return SNPE_SUCCESS if logging stopped successfully. Error code otherwise. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_Stop(Snpe_IDiagLog_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DIAGLOG_IDIAGLOG_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/IDiagLog.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/IDiagLog.hpp deleted file mode 100644 index 64b81eba..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/IDiagLog.hpp +++ /dev/null @@ -1,133 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include -#include - -#include "Options.hpp" -#include "DlSystem/String.hpp" - -#include "DiagLog/IDiagLog.h" - - -namespace DiagLog{ -class IDiagLog : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static Snpe_ErrorCode_t InvalidDeleteCall(Snpe_IDiagLog_Handle_t ){ - return SNPE_ERRORCODE_CAPI_DELETE_FAILURE; - } - - static constexpr DeleteFunctionType DeleteFunction{InvalidDeleteCall}; - - class OptionsInternal : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_Options_Delete}; - public: - OptionsInternal() - : BaseType(Snpe_Options_Create()) - { } - - explicit OptionsInternal(const Options& options) - : BaseType(Snpe_Options_Create()) - { - setDiagLogMask(options.DiagLogMask.c_str()); - setLogFileDirectory(options.LogFileDirectory.c_str()); - setLogFileName(options.LogFileName.c_str()); - setLogFileRotateCount(options.LogFileRotateCount); - setLogFileReplace(options.LogFileReplace); - } - - const char* getDiagLogMask() const{ - return Snpe_Options_GetDiagLogMask(handle()); - } - void setDiagLogMask(const char* diagLogMask){ - Snpe_Options_SetDiagLogMask(handle(), diagLogMask); - } - - const char* getLogFileDirectory() const{ - return Snpe_Options_GetLogFileDirectory(handle()); - } - void setLogFileDirectory(const char* logFileDirectory){ - Snpe_Options_SetLogFileDirectory(handle(), logFileDirectory); - } - - const char* getLogFileName() const{ - return Snpe_Options_GetLogFileName(handle()); - } - void setLogFileName(const char* logFileName){ - Snpe_Options_SetLogFileName(handle(), logFileName); - } - - uint32_t getLogFileRotateCount() const{ - return Snpe_Options_GetLogFileRotateCount(handle()); - } - void setLogFileRotateCount(uint32_t logFileRotateCount){ - Snpe_Options_SetLogFileRotateCount(handle(), logFileRotateCount); - } - - bool getLogFileReplace() const{ - return Snpe_Options_GetLogFileReplace(handle()); - } - void setLogFileReplace(bool logFileReplace){ - Snpe_Options_SetLogFileReplace(handle(), logFileReplace); - } - - explicit operator Options() const{ - return { - getDiagLogMask(), - getLogFileDirectory(), - getLogFileName(), - getLogFileRotateCount(), - getLogFileReplace() - }; - } - - }; - - - -public: - bool setOptions(const Options& loggingOptions){ - OptionsInternal optionsInternal(loggingOptions); - return SNPE_SUCCESS == Snpe_IDiagLog_SetOptions(handle(), getHandle(optionsInternal)); - } - Options getOptions() const{ - OptionsInternal optionsInternal(moveHandle(Snpe_IDiagLog_GetOptions(handle()))); - return Options(optionsInternal); - } - - bool setDiagLogMask(const std::string& mask){ - return SNPE_SUCCESS == Snpe_IDiagLog_SetDiagLogMask(handle(), mask.c_str()); - } - bool setDiagLogMask(const DlSystem::String& mask){ - return setDiagLogMask(static_cast(mask)); - } - - bool start(void){ - return SNPE_SUCCESS == Snpe_IDiagLog_Start(handle()); - } - bool stop(void){ - return SNPE_SUCCESS == Snpe_IDiagLog_Stop(handle()); - } - -}; - -} // ns DiagLog - -ALIAS_IN_ZDL_NAMESPACE(DiagLog, IDiagLog) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/Options.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/Options.h deleted file mode 100644 index ad641cca..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/Options.h +++ /dev/null @@ -1,164 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _DIAGLOG_OPTIONS_H_ -#define _DIAGLOG_OPTIONS_H_ - -#include - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE Options handle - */ -typedef void* Snpe_Options_Handle_t; - - -SNPE_API -Snpe_Options_Handle_t Snpe_Options_Create(); - -/** - * Destroys/frees a Options - * - * @param[in] handle : Handle to access Options object - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_Options_Delete(Snpe_Options_Handle_t handle); - -/** - * Gets DiagLogMask - * diagLogMask: Enables diag logging only on the specified area mask - * - * @param[in] handle : Handle to access Options object - * @return diagLogMask as a const char* - */ -SNPE_API -const char* Snpe_Options_GetDiagLogMask(Snpe_Options_Handle_t handle); - -/** - * Sets DiagLogMask - * diagLogMask: Enables diag logging only on the specified area mask - * - * @param[in] handle : Handle to access Options object - * @param[in] diagLogMask : specific area where logging needs to be enabed - */ -SNPE_API -void Snpe_Options_SetDiagLogMask(Snpe_Options_Handle_t handle, const char* diagLogMask); - -/** - * Gets logFileDirectory - * logFileDirectory: The path to the directory where log files will be written. - * The path may be relative or absolute. Relative paths are interpreted - * - * @param[in] handle : Handle to access Options object - * @return logFileDirectory as a const char* - */ -SNPE_API -const char* Snpe_Options_GetLogFileDirectory(Snpe_Options_Handle_t handle); - -/** - * Sets logFileDirectory - * logFileDirectory: The path to the directory where log files will be written. - * The path may be relative or absolute. Relative paths are interpreted - * - * @param[in] handle : Handle to access Options object - * @param[in] logFileDirectory : path for saving the log files - */ -SNPE_API -void Snpe_Options_SetLogFileDirectory(Snpe_Options_Handle_t handle, const char* logFileDirectory); - - -/** - * Gets logFileName - * logFileName: The name used for log files. If this value is empty then BaseName will be - * used as the default file name. - * - * @param[in] handle : Handle to access Options object - * @return logFileName as a const char* - */ -SNPE_API -const char* Snpe_Options_GetLogFileName(Snpe_Options_Handle_t handle); - -/** - * Sets logFileName - * logFileName: The name used for log files. If this value is empty then BaseName will be - * used as the default file name. - * - * @param[in] handle : Handle to access Options object - * @param[in] logFileName : name of log file - */ -SNPE_API -void Snpe_Options_SetLogFileName(Snpe_Options_Handle_t handle, const char* logFileName); - -/** - * Gets the maximum number of log files to create. If set to 0 no log rotation - * will be used and the log file name specified will be used each time, overwriting - * any existing log file that may exist. - * - * @param[in] handle : Handle to access options object. - * @return max log files to create - */ -SNPE_API -uint32_t Snpe_Options_GetLogFileRotateCount(Snpe_Options_Handle_t handle); - -/** - * Sets the maximum number of log files to create. If set to 0 no log rotation - * will be used and the log file name specified will be used each time, overwriting - * any existing log file that may exist. - * - * @param[in] handle : Handle to access options object. - * @param[in] logFileRotateCount : max log files to create - */ -SNPE_API -void Snpe_Options_SetLogFileRotateCount(Snpe_Options_Handle_t handle, uint32_t logFileRotateCount); - -/** - * If the log file already exists, control whether it will be replaced - * - * @param[in] handle : Handle to access options object - * @return 1 if log file will be replaced, 0 otherwise - */ -SNPE_API -int Snpe_Options_GetLogFileReplace(Snpe_Options_Handle_t handle); - -/** - * If the log file already exists, control whether it will be replaced - * - * @param[in] handle : Handle to access options object - * @param[in] logFileReplace : 1 if log file to be replaced, 0 otherwise - */ -SNPE_API -void Snpe_Options_SetLogFileReplace(Snpe_Options_Handle_t handle, int logFileReplace); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DIAGLOG_OPTIONS_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/Options.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/Options.hpp deleted file mode 100644 index c9ad48b6..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DiagLog/Options.hpp +++ /dev/null @@ -1,50 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include -#include - -#include "DiagLog/IDiagLog.h" - - -namespace DiagLog { - -class Options -{ -public: - Options( - std::string diagLogMask = "", - std::string logFileDirectory = "diaglogs", - std::string logFileName = "DiagLog", - uint32_t logFileRotateCount = 20, - bool logFileReplace = true - ) - : DiagLogMask(std::move(diagLogMask)), - LogFileDirectory(std::move(logFileDirectory)), - LogFileName(std::move(logFileName)), - LogFileRotateCount(logFileRotateCount), - LogFileReplace(logFileReplace) - { - // Solves the empty string problem with multiple std libs - DiagLogMask.reserve(1); - } - - std::string DiagLogMask; - std::string LogFileDirectory; - std::string LogFileName; - uint32_t LogFileRotateCount; - - bool LogFileReplace; -}; - -} // ns DiagLog - -ALIAS_IN_ZDL_NAMESPACE(DiagLog, Options) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlContainer/DlContainer.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlContainer/DlContainer.h deleted file mode 100644 index 6ce7cd25..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlContainer/DlContainer.h +++ /dev/null @@ -1,185 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_CONTAINER_DLCONTAINER_H -#define DL_CONTAINER_DLCONTAINER_H - -#ifdef __cplusplus -#include // uint8_t -#include // size_t -#else -#include -#include -#endif - -#include "DlSystem/DlError.h" -#include "DlSystem/StringList.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE DlcRecord handle - */ -typedef void* Snpe_DlcRecord_Handle_t; - -/** - * Constructs a DlcRecord and returns a handle to it - * - * @return the handle to the created DlcRecord - */ -SNPE_API -Snpe_DlcRecord_Handle_t Snpe_DlcRecord_Create(); - -/** - * Constructs a DlcRecord with a provided name and returns a handle to it - * - * @param[in] name : the name of the record - * - * @return the handle to the created DlcRecord - */ -SNPE_API -Snpe_DlcRecord_Handle_t Snpe_DlcRecord_CreateName(const char* name); - - -/** - * Destroys/frees a DlcRecord - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_DlcRecord_Delete(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * Gets the size of a DlcRecord in bytes - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return the size of the DlcRecord in bytes - */ -SNPE_API -size_t Snpe_DlcRecord_Size(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * Gets a pointer to the start of the DlcRecord's data - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return uint8_t pointer to the DlcRecord's data - */ -SNPE_API -uint8_t* Snpe_DlcRecord_Data(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * Gets the name of the DlcRecord - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return the record's name - */ -SNPE_API -const char* Snpe_DlcRecord_Name(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * A typedef to indicate a SNPE DlContainer handle - */ -typedef void* Snpe_DlContainer_Handle_t; - -/** - * Destroys/frees a DlContainer - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_DlContainer_Delete(Snpe_DlContainer_Handle_t dlContainerHandle); - - -/** - * Initializes a container from a container archive file. - * - * @param[in] filename Container archive file path. - * - * @return Status of container open call - */ -SNPE_API -Snpe_DlContainer_Handle_t Snpe_DlContainer_Open(const char* filename); - -/** - * Initializes a container from a byte buffer. - * - * @param[in] buffer Byte buffer holding the contents of an archive - * file. - * - * @param[in] size Size of the byte buffer. - * - * @return A Snpe_DlContainer_Handle_t to access the dlContainer - */ -SNPE_API -Snpe_DlContainer_Handle_t Snpe_DlContainer_OpenBuffer(const uint8_t* buffer, const size_t size); - -/** - * Get the record catalog for a container. - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * - * @return A Snpe_StringListHandle_t that holds the record names of the DlContainer - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_DlContainer_GetCatalog(Snpe_DlContainer_Handle_t dlContainerHandle); - -/** - * Get a record from a container by name. - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * @param[in] recordName : Name of the record to fetch. - * - * @return A Snpe_DlcRecordHandle_t that owns the record read from the DlContainer - */ -SNPE_API -Snpe_DlcRecord_Handle_t Snpe_DlContainer_GetRecord(Snpe_DlContainer_Handle_t dlContainerHandle, const char* recordName); - -/** - * Save the container to an archive on disk. This function will save the - * container if the filename is different from the file that it was opened - * from, or if at least one record was modified since the container was - * opened. - * - * It will truncate any existing file at the target path. - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * @param[in] filename : Container archive file path. - * - * @return indication of success/failure - */ -SNPE_API -Snpe_ErrorCode_t Snpe_DlContainer_Save(Snpe_DlContainer_Handle_t dlContainerHandle, const char* filename); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_CONTAINER_DLCONTAINER_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlContainer/IDlContainer.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlContainer/IDlContainer.hpp deleted file mode 100644 index 482dbd02..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlContainer/IDlContainer.hpp +++ /dev/null @@ -1,146 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include -#include -#include - -#include "Wrapper.hpp" -#include "DlSystem/String.hpp" - -#include "DlContainer/DlContainer.h" -#include "DlSystem/StringList.hpp" - - - -namespace DlContainer { - -struct DlcRecord -{ - std::string name; - std::vector data; - - DlcRecord() - : name{}, - data{} - { } - - DlcRecord( DlcRecord&& other ) noexcept - : name(std::move(other.name)), - data(std::move(other.data)) - { } - DlcRecord(const std::string& new_name) - : name(new_name), - data() - { - if(name.empty()) { - name.reserve(1); - } - } - DlcRecord(const DlcRecord&) = delete; -}; - - -class IDlContainer : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_DlContainer_Delete}; - - template - void getCatalog_(std::set& catalog) const{ - DlSystem::StringList sl(moveHandle(Snpe_DlContainer_GetCatalog(handle()))); - for(auto s : sl){ - catalog.emplace(s); - } - } - - - class DlcRecordInternal : public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_DlcRecord_Delete}; - public: - DlcRecordInternal() - : BaseType(Snpe_DlcRecord_Create()) - { } - explicit DlcRecordInternal(const std::string& name) - : BaseType(Snpe_DlcRecord_CreateName(name.c_str())) - { } - - uint8_t* getData(){ - return Snpe_DlcRecord_Data(handle()); - } - size_t size() const{ - return Snpe_DlcRecord_Size(handle()); - } - const char* getName(){ - return Snpe_DlcRecord_Name(handle()); - } - }; - - -public: - static std::unique_ptr open(const std::string& filename) noexcept{ - return makeUnique(Snpe_DlContainer_Open(filename.c_str())); - } - - static std::unique_ptr open(const uint8_t* buffer, const size_t size) noexcept{ - return makeUnique(Snpe_DlContainer_OpenBuffer(buffer, size)); - - } - static std::unique_ptr open(const std::vector& buffer) noexcept{ - return open(buffer.data(), buffer.size()); - } - static std::unique_ptr open(const DlSystem::String &filename) noexcept{ - return open(static_cast(filename)); - } - - - void getCatalog(std::set& catalog) const{ - return getCatalog_(catalog); - } - void getCatalog(std::set& catalog) const{ - return getCatalog_(catalog); - } - - bool getRecord(const std::string& name, DlcRecord& record) const{ - auto h = Snpe_DlContainer_GetRecord(handle(), name.c_str()); - if(!h) return false; - DlcRecordInternal internal(moveHandle(h)); - auto data = internal.getData(); - - record.name.assign(internal.getName()); - record.data.assign(data, data+internal.size()); - return true; - } - - bool getRecord(const DlSystem::String& name, DlcRecord& record) const{ - return getRecord(static_cast(name), record); - } - - bool save(const std::string& filename){ - return Snpe_DlContainer_Save(handle(), filename.c_str()); - } - - bool save(const DlSystem::String& filename){ - return save(static_cast(filename)); - } -}; - - -} // ns DlContainer - -ALIAS_IN_ZDL_NAMESPACE(DlContainer, DlcRecord) -ALIAS_IN_ZDL_NAMESPACE(DlContainer, IDlContainer) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlEnums.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlEnums.h deleted file mode 100644 index 85a0f4d3..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlEnums.h +++ /dev/null @@ -1,267 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _DL_ENUMS_H_ -#define _DL_ENUMS_H_ - -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * Enumeration of supported target runtimes. - */ -typedef enum -{ - /// Special value indicating the property is unset. - SNPE_RUNTIME_UNSET = -1, - /// Run the processing on Snapdragon CPU. - /// Data: float 32bit - /// Math: float 32bit - SNPE_RUNTIME_CPU_FLOAT32 = 0, - /// Default legacy enum to retain backward compatibility. - /// CPU = CPU_FLOAT32 - SNPE_RUNTIME_CPU = SNPE_RUNTIME_CPU_FLOAT32, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 32bit - SNPE_RUNTIME_GPU_FLOAT32_16_HYBRID = 1, - /// Default legacy enum to retain backward compatibility. - /// GPU = GPU_FLOAT32_16_HYBRID - SNPE_RUNTIME_GPU = SNPE_RUNTIME_GPU_FLOAT32_16_HYBRID, - - /// Run the processing on the Hexagon DSP. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - SNPE_RUNTIME_DSP_FIXED8_TF = 2, - /// Default legacy enum to retain backward compatibility. - /// DSP = DSP_FIXED8_TF - SNPE_RUNTIME_DSP = SNPE_RUNTIME_DSP_FIXED8_TF, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 16bit - SNPE_RUNTIME_GPU_FLOAT16 = 3, - - /// Run the processing on Snapdragon AIX+HVX. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - SNPE_RUNTIME_AIP_FIXED8_TF = 5, - SNPE_RUNTIME_AIP_FIXED_TF = SNPE_RUNTIME_AIP_FIXED8_TF -} Snpe_Runtime_t; - -/** - * Enumeration of runtime available check options. - */ -typedef enum -{ - /// Perform standard runtime available check - SNPE_RUNTIME_CHECK_OPTION_DEFAULT = 2, - /// Perform standard runtime available check - SNPE_RUNTIME_CHECK_OPTION_NORMAL_CHECK = 0, - /// Perform basic runtime available check, may be runtime specific - SNPE_RUNTIME_CHECK_OPTION_BASIC_CHECK = 1, - /// Perform unsignedPD runtime available check - SNPE_RUNTIME_CHECK_OPTION_UNSIGNEDPD_CHECK = 2, -} Snpe_RuntimeCheckOption_t; - -/** - * Enumeration of various performance profiles that can be requested. - */ -typedef enum -{ - /// Run in a standard mode. - /// This mode will be deprecated in the future and replaced with BALANCED. - SNPE_PERFORMANCE_PROFILE_DEFAULT = 0, - /// Run in a balanced mode. - SNPE_PERFORMANCE_PROFILE_BALANCED = 0, - - /// Run in high performance mode - SNPE_PERFORMANCE_PROFILE_HIGH_PERFORMANCE = 1, - - /// Run in a power sensitive mode, at the expense of performance. - SNPE_PERFORMANCE_PROFILE_POWER_SAVER = 2, - - /// Use system settings. SNPE makes no calls to any performance related APIs. - SNPE_PERFORMANCE_PROFILE_SYSTEM_SETTINGS = 3, - - /// Run in sustained high performance mode - SNPE_PERFORMANCE_PROFILE_SUSTAINED_HIGH_PERFORMANCE = 4, - - /// Run in burst mode - SNPE_PERFORMANCE_PROFILE_BURST = 5, - - /// Run in lower clock than POWER_SAVER, at the expense of performance. - SNPE_PERFORMANCE_PROFILE_LOW_POWER_SAVER = 6, - - /// Run in higher clock and provides better performance than POWER_SAVER. - SNPE_PERFORMANCE_PROFILE_HIGH_POWER_SAVER = 7, - - /// Run in lower balanced mode - SNPE_PERFORMANCE_PROFILE_LOW_BALANCED = 8, - - /// Run in lowest clock at the expense of performance - SNPE_PERFORMANCE_PROFILE_EXTREME_POWER_SAVER = 9, - -} Snpe_PerformanceProfile_t; - -/** - * Enumeration of various profilngLevels that can be requested. - */ -typedef enum -{ - /// No profiling. - /// Collects no runtime stats in the DiagLog - SNPE_PROFILING_LEVEL_OFF = 0, - - /// Basic profiling - /// Collects some runtime stats in the DiagLog - SNPE_PROFILING_LEVEL_BASIC = 1, - - /// Detailed profiling - /// Collects more runtime stats in the DiagLog, including per-layer statistics - /// Performance may be impacted - SNPE_PROFILING_LEVEL_DETAILED = 2, - - /// Moderate profiling - /// Collects more runtime stats in the DiagLog, no per-layer statistics - SNPE_PROFILING_LEVEL_MODERATE = 3, - - /// Linting profiling - /// HTP exclusive profiling level that collects in-depth performance metrics - /// for each op in the graph including main thread execution time and time spent - /// on parallel background ops - SNPE_PROFILING_LEVEL_LINTING = 4 - -} Snpe_ProfilingLevel_t; - -/** - * Enumeration of various execution priority hints. - */ -typedef enum -{ - /// Normal priority - SNPE_EXECUTION_PRIORITY_NORMAL = 0, - - /// Higher than normal priority - SNPE_EXECUTION_PRIORITY_HIGH = 1, - - /// Lower priority - SNPE_EXECUTION_PRIORITY_LOW = 2, - - /// Between Normal and High priority - SNPE_EXECUTION_PRIORITY_NORMAL_HIGH = 3 - -} Snpe_ExecutionPriorityHint_t; - -/** - * Enumeration that lists the supported image encoding formats. - */ -typedef enum -{ - /// For unknown image type. Also used as a default value for ImageEncoding_t. - SNPE_IMAGE_ENCODING_UNKNOWN = 0, - - /// The RGB format consists of 3 bytes per pixel: one byte for - /// Red, one for Green, and one for Blue. The byte ordering is - /// endian independent and is always in RGB byte order. - SNPE_IMAGE_ENCODING_RGB = 1, - - /// The ARGB32 format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering depends on the - /// underlying CPU. For little endian CPUs, the byte order is BGRA. - /// For big endian CPUs, the byte order is ARGB. - SNPE_IMAGE_ENCODING_ARGB32 = 2, - - /// The RGBA format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering is endian independent - /// and is always in RGBA byte order. - SNPE_IMAGE_ENCODING_RGBA = 3, - - /// The GRAYSCALE format is for 8-bit grayscale. - SNPE_IMAGE_ENCODING_GRAYSCALE = 4, - - /// NV21 is the Android version of YUV. The Chrominance is down - /// sampled and has a subsampling ratio of 4:2:0. Note that this - /// image format has 3 channels, but the U and V channels - /// are subsampled. For every four Y pixels there is one U and one V pixel. @newpage - SNPE_IMAGE_ENCODING_NV21 = 5, - - /// The BGR format consists of 3 bytes per pixel: one byte for - /// Red, one for Green and one for Blue. The byte ordering is - /// endian independent and is always BGR byte order. - SNPE_IMAGE_ENCODING_BGR = 6 -} Snpe_ImageEncoding_t; - -/** - * Enumeration that lists the supported LogLevels that can be set by users. - */ -typedef enum -{ - /// Enumeration variable to be used by user to set logging level to FATAL. - SNPE_LOG_LEVEL_FATAL = 0, - - /// Enumeration variable to be used by user to set logging level to ERROR. - SNPE_LOG_LEVEL_ERROR = 1, - - /// Enumeration variable to be used by user to set logging level to WARN. - SNPE_LOG_LEVEL_WARN = 2, - - /// Enumeration variable to be used by user to set logging level to INFO. - SNPE_LOG_LEVEL_INFO = 3, - - /// Enumeration variable to be used by user to set logging level to VERBOSE. - SNPE_LOG_LEVEL_VERBOSE = 4 -} Snpe_LogLevel_t; - -/** - * Enumeration that list the supported data types for buffers - */ -typedef enum -{ - /// Unspecified - SNPE_IO_BUFFER_DATATYPE_UNSPECIFIED = 0, - - /// 32-bit floating point - SNPE_IO_BUFFER_DATATYPE_FLOATING_POINT_32 = 1, - - /// 16-bit floating point - SNPE_IO_BUFFER_DATATYPE_FLOATING_POINT_16 = 2, - - /// 8-bit fixed point - SNPE_IO_BUFFER_DATATYPE_FIXED_POINT_8 = 3, - - /// 16-bit fixed point - SNPE_IO_BUFFER_DATATYPE_FIXED_POINT_16 = 4 -} Snpe_IOBufferDataType_t; - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DL_ENUMS_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlEnums.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlEnums.hpp deleted file mode 100644 index 9158f594..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlEnums.hpp +++ /dev/null @@ -1,266 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -namespace DlSystem { -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * Enumeration of supported target runtimes. - */ -enum class Runtime_t -{ - /// Special value indicating the property is unset. - UNSET = -1, - /// Run the processing on Snapdragon CPU. - /// Data: float 32bit - /// Math: float 32bit - CPU_FLOAT32 = 0, - /// Default legacy enum to retain backward compatibility. - /// CPU = CPU_FLOAT32 - CPU = CPU_FLOAT32, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 32bit - GPU_FLOAT32_16_HYBRID = 1, - /// Default legacy enum to retain backward compatibility. - /// GPU = GPU_FLOAT32_16_HYBRID - GPU = GPU_FLOAT32_16_HYBRID, - - /// Run the processing on the Hexagon DSP. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - DSP_FIXED8_TF = 2, - /// Default legacy enum to retain backward compatibility. - /// DSP = DSP_FIXED8_TF - DSP = DSP_FIXED8_TF, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 16bit - GPU_FLOAT16 = 3, - - /// Run the processing on Snapdragon AIX+HVX. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - AIP_FIXED8_TF = 5, - AIP_FIXED_TF = AIP_FIXED8_TF, - - /// Any new enums should be added above this line - NUM_RUNTIME_TARGETS -}; - -/** - * Enumeration of runtime available check options. - */ -enum class RuntimeCheckOption_t -{ - /// Perform standard runtime available check - NORMAL_CHECK = 0, - /// Perform basic runtime available check, may be runtime specific - BASIC_CHECK = 1, - /// Perform unsignedPD runtime available check - UNSIGNEDPD_CHECK = 2, - /// Perform standard runtime available check - DEFAULT = 2, - /// Any new enums should be added above this line - NUM_RUNTIMECHECK_OPTIONS -}; - -/** - * Enumeration of various performance profiles that can be requested. - */ -enum class PerformanceProfile_t -{ - /// Run in a standard mode. - /// This mode will be deprecated in the future and replaced with BALANCED. - DEFAULT = 0, - /// Run in a balanced mode. - BALANCED = 0, - - /// Run in high performance mode - HIGH_PERFORMANCE = 1, - - /// Run in a power sensitive mode, at the expense of performance. - POWER_SAVER = 2, - - /// Use system settings. SNPE makes no calls to any performance related APIs. - SYSTEM_SETTINGS = 3, - - /// Run in sustained high performance mode - SUSTAINED_HIGH_PERFORMANCE = 4, - - /// Run in burst mode - BURST = 5, - - /// Run in lower clock than POWER_SAVER, at the expense of performance. - LOW_POWER_SAVER = 6, - - /// Run in higher clock and provides better performance than POWER_SAVER. - HIGH_POWER_SAVER = 7, - - /// Run in lower balanced mode - LOW_BALANCED = 8, - - /// Run in lowest clock at the expense of performance - EXTREME_POWER_SAVER = 9, - - /// Any new enums should be added above this line - NUM_PERF_PROFILES -}; - -/** - * Enumeration of various profilngLevels that can be requested. - */ -enum class ProfilingLevel_t -{ - /// No profiling. - /// Collects no runtime stats in the DiagLog - OFF = 0, - - /// Basic profiling - /// Collects some runtime stats in the DiagLog - BASIC = 1, - - /// Detailed profiling - /// Collects more runtime stats in the DiagLog, including per-layer statistics - /// Performance may be impacted - DETAILED = 2, - - /// Moderate profiling - /// Collects more runtime stats in the DiagLog, no per-layer statistics - MODERATE = 3, - - /// Linting profiling - /// HTP exclusive profiling level that collects in-depth performance metrics - /// for each op in the graph including main thread execution time and time spent - /// on parallel background ops - LINTING = 4 -}; - -/** - * Enumeration of various execution priority hints. - */ -enum class ExecutionPriorityHint_t -{ - /// Normal priority - NORMAL = 0, - - /// Higher than normal priority - HIGH = 1, - - /// Lower priority - LOW = 2, - - /// Between Normal and High priority - NORMAL_HIGH = 3, - - /// Any new enums should be added above this line - NUM_EXECUTION_PRIORITY_HINTS -}; - -/** @} */ /* end_addtogroup c_plus_plus_apis C++*/ - -/** - * Enumeration that lists the supported image encoding formats. - */ -enum class ImageEncoding_t -{ - /// For unknown image type. Also used as a default value for ImageEncoding_t. - UNKNOWN = 0, - - /// The RGB format consists of 3 bytes per pixel: one byte for - /// Red, one for Green, and one for Blue. The byte ordering is - /// endian independent and is always in RGB byte order. - RGB = 1, - - /// The ARGB32 format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering depends on the - /// underlying CPU. For little endian CPUs, the byte order is BGRA. - /// For big endian CPUs, the byte order is ARGB. - ARGB32 = 2, - - /// The RGBA format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering is endian independent - /// and is always in RGBA byte order. - RGBA = 3, - - /// The GRAYSCALE format is for 8-bit grayscale. - GRAYSCALE = 4, - - /// NV21 is the Android version of YUV. The Chrominance is down - /// sampled and has a subsampling ratio of 4:2:0. Note that this - /// image format has 3 channels, but the U and V channels - /// are subsampled. For every four Y pixels there is one U and one V pixel. @newpage - NV21 = 5, - - /// The BGR format consists of 3 bytes per pixel: one byte for - /// Red, one for Green and one for Blue. The byte ordering is - /// endian independent and is always BGR byte order. - BGR = 6 -}; - -/** - * Enumeration that lists the supported LogLevels that can be set by users. - */ -enum class LogLevel_t -{ - /// Enumeration variable to be used by user to set logging level to FATAL. - LOG_FATAL = 0, - - /// Enumeration variable to be used by user to set logging level to ERROR. - LOG_ERROR = 1, - - /// Enumeration variable to be used by user to set logging level to WARN. - LOG_WARN = 2, - - /// Enumeration variable to be used by user to set logging level to INFO. - LOG_INFO = 3, - - /// Enumeration variable to be used by user to set logging level to VERBOSE. - LOG_VERBOSE = 4, - - /// Any new enums should be added above this line - NUM_LOG_LEVELS -}; - -enum class IOBufferDataType_t : int -{ - UNSPECIFIED = 0, - FLOATING_POINT_32 = 1, - FLOATING_POINT_16 = 2, - FIXED_POINT_8 = 3, - FIXED_POINT_16 = 4, - INT_32 = 5, - UINT_32 = 6, - INT_8 = 7, - UINT_8 = 8, - INT_16 = 9, - UINT_16 = 10, - BOOL_8 = 11, - INT_64 = 12, - UINT_64 = 13 -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, Runtime_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, RuntimeCheckOption_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, PerformanceProfile_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ProfilingLevel_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ExecutionPriorityHint_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ImageEncoding_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, LogLevel_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IOBufferDataType_t) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlError.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlError.h deleted file mode 100644 index f8c216ea..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlError.h +++ /dev/null @@ -1,299 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _DL_ERROR_H_ -#define _DL_ERROR_H_ - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * Enumeration of error codes - */ -typedef enum -{ - /// Indicate success: SNPE_SUCCESS = 0 - SNPE_SUCCESS = 0, - - // C API Error Codes - // This is a temporary place for them. We still have to figure out how to manage - // passing error codes from the C API to C++ if we want to use things like SetLastError - SNPE_ERRORCODE_CAPI_CREATE_FAILURE = 10, - SNPE_ERRORCODE_CAPI_HANDLEGEN_FAILURE = 11, - SNPE_ERRORCODE_CAPI_DELETE_FAILURE = 12, - SNPE_ERRORCODE_CAPI_BAD_HANDLE = 13, - SNPE_ERRORCODE_CAPI_BAD_ARGUMENT = 14, - SNPE_ERRORCODE_CAPI_BAD_ALLOC = 15, - - // System config errors - SNPE_ERRORCODE_CONFIG_MISSING_PARAM = 100, - SNPE_ERRORCODE_CONFIG_INVALID_PARAM = 101, - SNPE_ERRORCODE_CONFIG_MISSING_FILE = 102, - SNPE_ERRORCODE_CONFIG_NNCONFIG_NOT_SET = 103, - SNPE_ERRORCODE_CONFIG_NNCONFIG_INVALID = 104, - SNPE_ERRORCODE_CONFIG_WRONG_INPUT_NAME = 105, - SNPE_ERRORCODE_CONFIG_INCORRECT_INPUT_DIMENSIONS = 106, - SNPE_ERRORCODE_CONFIG_DIMENSIONS_MODIFICATION_NOT_SUPPORTED = 107, - SNPE_ERRORCODE_CONFIG_BOTH_OUTPUT_LAYER_TENSOR_NAMES_SET = 108, - - SNPE_ERRORCODE_CONFIG_NNCONFIG_ONLY_TENSOR_SUPPORTED = 120, - SNPE_ERRORCODE_CONFIG_NNCONFIG_ONLY_USER_BUFFER_SUPPORTED = 121, - - // DlSystem errors - SNPE_ERRORCODE_DLSYSTEM_MISSING_BUFFER = 200, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_CAST_FAILED = 201, - SNPE_ERRORCODE_DLSYSTEM_FIXED_POINT_PARAM_INVALID = 202, - SNPE_ERRORCODE_DLSYSTEM_SIZE_MISMATCH = 203, - SNPE_ERRORCODE_DLSYSTEM_NAME_NOT_FOUND = 204, - SNPE_ERRORCODE_DLSYSTEM_VALUE_MISMATCH = 205, - SNPE_ERRORCODE_DLSYSTEM_INSERT_FAILED = 206, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_FILE_READ_FAILED = 207, - SNPE_ERRORCODE_DLSYSTEM_DIAGLOG_FAILURE = 208, - SNPE_ERRORCODE_DLSYSTEM_LAYER_NOT_SET = 209, - SNPE_ERRORCODE_DLSYSTEM_WRONG_NUMBER_INPUT_BUFFERS = 210, - SNPE_ERRORCODE_DLSYSTEM_RUNTIME_TENSOR_SHAPE_MISMATCH = 211, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_MISSING = 212, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_ITERATION_UNSUPPORTED = 213, - SNPE_ERRORCODE_DLSYSTEM_BUFFER_MANAGER_MISSING = 214, - SNPE_ERRORCODE_DLSYSTEM_RUNTIME_BUFFER_SOURCE_UNSUPPORTED = 215, - SNPE_ERRORCODE_DLSYSTEM_BUFFER_CAST_FAILED = 216, - SNPE_ERRORCODE_DLSYSTEM_WRONG_TRANSITION_TYPE = 217, - SNPE_ERRORCODE_DLSYSTEM_LAYER_ALREADY_REGISTERED = 218, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_DIM_INVALID = 219, - - SNPE_ERRORCODE_DLSYSTEM_BUFFERENCODING_UNKNOWN = 240, - SNPE_ERRORCODE_DLSYSTEM_BUFFER_INVALID_PARAM = 241, - - // DlContainer errors - SNPE_ERRORCODE_DLCONTAINER_MODEL_PARSING_FAILED = 300, - SNPE_ERRORCODE_DLCONTAINER_UNKNOWN_LAYER_CODE = 301, - SNPE_ERRORCODE_DLCONTAINER_MISSING_LAYER_PARAM = 302, - SNPE_ERRORCODE_DLCONTAINER_LAYER_PARAM_NOT_SUPPORTED = 303, - SNPE_ERRORCODE_DLCONTAINER_LAYER_PARAM_INVALID = 304, - SNPE_ERRORCODE_DLCONTAINER_TENSOR_DATA_MISSING = 305, - SNPE_ERRORCODE_DLCONTAINER_MODEL_LOAD_FAILED = 306, - SNPE_ERRORCODE_DLCONTAINER_MISSING_RECORDS = 307, - SNPE_ERRORCODE_DLCONTAINER_INVALID_RECORD = 308, - SNPE_ERRORCODE_DLCONTAINER_WRITE_FAILURE = 309, - SNPE_ERRORCODE_DLCONTAINER_READ_FAILURE = 310, - SNPE_ERRORCODE_DLCONTAINER_BAD_CONTAINER = 311, - SNPE_ERRORCODE_DLCONTAINER_BAD_DNN_FORMAT_VERSION = 312, - SNPE_ERRORCODE_DLCONTAINER_UNKNOWN_AXIS_ANNOTATION = 313, - SNPE_ERRORCODE_DLCONTAINER_UNKNOWN_SHUFFLE_TYPE = 314, - SNPE_ERRORCODE_DLCONTAINER_TEMP_FILE_FAILURE = 315, - - // Network errors - SNPE_ERRORCODE_NETWORK_EMPTY_NETWORK = 400, - SNPE_ERRORCODE_NETWORK_CREATION_FAILED = 401, - SNPE_ERRORCODE_NETWORK_PARTITION_FAILED = 402, - SNPE_ERRORCODE_NETWORK_NO_OUTPUT_DEFINED = 403, - SNPE_ERRORCODE_NETWORK_MISMATCH_BETWEEN_NAMES_AND_DIMS = 404, - SNPE_ERRORCODE_NETWORK_MISSING_INPUT_NAMES = 405, - SNPE_ERRORCODE_NETWORK_MISSING_OUTPUT_NAMES = 406, - SNPE_ERRORCODE_NETWORK_EXECUTION_FAILED = 407, - - // Host runtime errors - SNPE_ERRORCODE_HOST_RUNTIME_TARGET_UNAVAILABLE = 500, - - // CPU runtime errors - SNPE_ERRORCODE_CPU_LAYER_NOT_SUPPORTED = 600, - SNPE_ERRORCODE_CPU_LAYER_PARAM_NOT_SUPPORTED = 601, - SNPE_ERRORCODE_CPU_LAYER_PARAM_INVALID = 602, - SNPE_ERRORCODE_CPU_LAYER_PARAM_COMBINATION_INVALID = 603, - SNPE_ERRORCODE_CPU_BUFFER_NOT_FOUND = 604, - SNPE_ERRORCODE_CPU_NETWORK_NOT_SUPPORTED = 605, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_CPU_UDO_OPERATION_FAILED = 606, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // CPU fixed-point runtime errors - SNPE_ERRORCODE_CPU_FXP_LAYER_NOT_SUPPORTED = 700, - SNPE_ERRORCODE_CPU_FXP_LAYER_PARAM_NOT_SUPPORTED = 701, - SNPE_ERRORCODE_CPU_FXP_LAYER_PARAM_INVALID = 702, - SNPE_ERRORCODE_CPU_FXP_OPTION_INVALID = 703, - - // GPU runtime errors - SNPE_ERRORCODE_GPU_LAYER_NOT_SUPPORTED = 800, - SNPE_ERRORCODE_GPU_LAYER_PARAM_NOT_SUPPORTED = 801, - SNPE_ERRORCODE_GPU_LAYER_PARAM_INVALID = 802, - SNPE_ERRORCODE_GPU_LAYER_PARAM_COMBINATION_INVALID = 803, - SNPE_ERRORCODE_GPU_KERNEL_COMPILATION_FAILED = 804, - SNPE_ERRORCODE_GPU_CONTEXT_NOT_SET = 805, - SNPE_ERRORCODE_GPU_KERNEL_NOT_SET = 806, - SNPE_ERRORCODE_GPU_KERNEL_PARAM_INVALID = 807, - SNPE_ERRORCODE_GPU_OPENCL_CHECK_FAILED = 808, - SNPE_ERRORCODE_GPU_OPENCL_FUNCTION_ERROR = 809, - SNPE_ERRORCODE_GPU_BUFFER_NOT_FOUND = 810, - SNPE_ERRORCODE_GPU_TENSOR_DIM_INVALID = 811, - SNPE_ERRORCODE_GPU_MEMORY_FLAGS_INVALID = 812, - SNPE_ERRORCODE_GPU_UNEXPECTED_NUMBER_OF_IO = 813, - SNPE_ERRORCODE_GPU_LAYER_PROXY_ERROR = 814, - SNPE_ERRORCODE_GPU_BUFFER_IN_USE = 815, - SNPE_ERRORCODE_GPU_BUFFER_MODIFICATION_ERROR = 816, - SNPE_ERRORCODE_GPU_DATA_ARRANGEMENT_INVALID = 817, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_GPU_UDO_OPERATION_FAILED = 818, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - // DSP runtime errors - SNPE_ERRORCODE_DSP_LAYER_NOT_SUPPORTED = 900, - SNPE_ERRORCODE_DSP_LAYER_PARAM_NOT_SUPPORTED = 901, - SNPE_ERRORCODE_DSP_LAYER_PARAM_INVALID = 902, - SNPE_ERRORCODE_DSP_LAYER_PARAM_COMBINATION_INVALID = 903, - SNPE_ERRORCODE_DSP_STUB_NOT_PRESENT = 904, - SNPE_ERRORCODE_DSP_LAYER_NAME_TRUNCATED = 905, - SNPE_ERRORCODE_DSP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 906, - SNPE_ERRORCODE_DSP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 907, - SNPE_ERRORCODE_DSP_RUNTIME_COMMUNICATION_ERROR = 908, - SNPE_ERRORCODE_DSP_RUNTIME_INVALID_PARAM_ERROR = 909, - SNPE_ERRORCODE_DSP_RUNTIME_SYSTEM_ERROR = 910, - SNPE_ERRORCODE_DSP_RUNTIME_CRASHED_ERROR = 911, - SNPE_ERRORCODE_DSP_BUFFER_SIZE_ERROR = 912, - SNPE_ERRORCODE_DSP_UDO_EXECUTE_ERROR = 913, - SNPE_ERRORCODE_DSP_UDO_LIB_NOT_REGISTERED_ERROR = 914, - SNPE_ERRORCODE_DSP_UDO_INVALID_QUANTIZATION_TYPE_ERROR = 915, - - // Model validataion errors - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_NOT_SUPPORTED = 1000, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_NOT_SUPPORTED = 1001, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_INVALID = 1002, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_MISSING = 1003, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_COMBINATION_INVALID = 1004, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_ORDERING_INVALID = 1005, - SNPE_ERRORCODE_MODEL_VALIDATION_INVALID_CONSTRAINT = 1006, - SNPE_ERRORCODE_MODEL_VALIDATION_MISSING_BUFFER = 1007, - SNPE_ERRORCODE_MODEL_VALIDATION_BUFFER_REUSE_NOT_SUPPORTED = 1008, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_COULD_NOT_BE_ASSIGNED = 1009, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_MODEL_VALIDATION_UDO_LAYER_FAILED = 1010, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // UDL errors - SNPE_ERRORCODE_UDL_LAYER_EMPTY_UDL_NETWORK = 1100, - SNPE_ERRORCODE_UDL_LAYER_PARAM_INVALID = 1101, - SNPE_ERRORCODE_UDL_LAYER_INSTANCE_MISSING = 1102, - SNPE_ERRORCODE_UDL_LAYER_SETUP_FAILED = 1103, - SNPE_ERRORCODE_UDL_EXECUTE_FAILED = 1104, - SNPE_ERRORCODE_UDL_BUNDLE_INVALID = 1105, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_UDO_REGISTRATION_FAILED = 1106, - SNPE_ERRORCODE_UDO_GET_PACKAGE_FAILED = 1107, - SNPE_ERRORCODE_UDO_GET_IMPLEMENTATION_FAILED = 1108, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // Dependent library errors - SNPE_ERRORCODE_STD_LIBRARY_ERROR = 1200, - - // Unknown exception (catch (...)), Has no component attached to this - SNPE_ERRORCODE_UNKNOWN_EXCEPTION = 1210, - - // Storage Errors - SNPE_ERRORCODE_STORAGE_INVALID_KERNEL_REPO = 1300, - -#ifdef DNN_RUNTIME_HAVE_AIP_RUNTIME - // AIP runtime errors - SNPE_ERRORCODE_AIP_LAYER_NOT_SUPPORTED = 1400, - SNPE_ERRORCODE_AIP_LAYER_PARAM_NOT_SUPPORTED = 1401, - SNPE_ERRORCODE_AIP_LAYER_PARAM_INVALID = 1402, - SNPE_ERRORCODE_AIP_LAYER_PARAM_COMBINATION_INVALID = 1403, - SNPE_ERRORCODE_AIP_STUB_NOT_PRESENT = 1404, - SNPE_ERRORCODE_AIP_LAYER_NAME_TRUNCATED = 1405, - SNPE_ERRORCODE_AIP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 1406, - SNPE_ERRORCODE_AIP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 1407, - SNPE_ERRORCODE_AIP_RUNTIME_COMMUNICATION_ERROR = 1408, - SNPE_ERRORCODE_AIP_RUNTIME_INVALID_PARAM_ERROR = 1409, - SNPE_ERRORCODE_AIP_RUNTIME_SYSTEM_ERROR = 1410, - SNPE_ERRORCODE_AIP_RUNTIME_TENSOR_MISSING = 1411, - SNPE_ERRORCODE_AIP_RUNTIME_TENSOR_SHAPE_MISMATCH = 1412, - SNPE_ERRORCODE_AIP_RUNTIME_BAD_AIX_RECORD = 1413, -#endif // DNN_RUNTIME_HAVE_AIP_RUNTIME - - // DlCaching errors - SNPE_ERRORCODE_DLCACHING_INVALID_METADATA = 1500, - SNPE_ERRORCODE_DLCACHING_INVALID_INITBLOB = 1501, - - // Infrastructure Errors - SNPE_ERRORCODE_INFRA_CLUSTERMGR_INSTANCE_INVALID = 1600, - SNPE_ERRORCODE_INFRA_CLUSTERMGR_EXECUTE_SYNC_FAILED = 1601, - - // Memory Errors - SNPE_ERRORCODE_MEMORY_CORRUPTION_ERROR = 1700 - -} Snpe_ErrorCode_t; - - - -/** - * Clear the last error code - */ -SNPE_API void Snpe_ErrorCode_clearLastErrorCode(); - -/** -* Returns the error code of the last error encountered. -* -* @return The error code. -* -* @note The returned error code is significant only when the return -* value of the call indicated an error. -*/ -SNPE_API Snpe_ErrorCode_t Snpe_ErrorCode_getLastErrorCode(); - -/** -* Returns the error string of the last error encountered. -* -* @return The error string. -* -* @note The returned error string is significant only when the return -* value of the call indicated an error. -*/ -SNPE_API const char* Snpe_ErrorCode_GetLastErrorString(); - -/** - * Returns the info string of the last error encountered. - */ -SNPE_API const char* Snpe_ErrorCode_getLastInfoString(); - -/** - * Returns the uint32_t representation of the error code enum. - * - * @param[in] code The error code to be converted. - * - * @return uint32_t representation of the error code. - */ -SNPE_API uint32_t Snpe_ErrorCode_enumToUInt32(Snpe_ErrorCode_t code); - - -#ifdef __cplusplus -} // extern "C" -#endif - - -#endif // _DL_ERROR_H_ - diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlError.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlError.hpp deleted file mode 100644 index 55dc2140..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlError.hpp +++ /dev/null @@ -1,261 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include - -#include "DlSystem/DlError.h" - - -namespace DlSystem { - -enum class ErrorCode : uint32_t { - NONE = 0, - - // C API Error Codes - // This is a temporary place for them. We still have to figure out how to manage - // passing error codes from the C API to C++ if we want to use things like SetLastError - SNPE_CAPI_CREATE_FAILURE = 10, - SNPE_CAPI_HANDLEGEN_FAILURE = 11, - SNPE_CAPI_DELETE_FAILURE = 12, - SNPE_CAPI_BAD_HANDLE = 13, - SNPE_CAPI_BAD_ARGUMENT = 14, - SNPE_CAPI_BAD_ALLOC = 15, - - - // System config errors - SNPE_CONFIG_MISSING_PARAM = 100, - SNPE_CONFIG_INVALID_PARAM = 101, - SNPE_CONFIG_MISSING_FILE = 102, - SNPE_CONFIG_NNCONFIG_NOT_SET = 103, - SNPE_CONFIG_NNCONFIG_INVALID = 104, - SNPE_CONFIG_WRONG_INPUT_NAME = 105, - SNPE_CONFIG_INCORRECT_INPUT_DIMENSIONS = 106, - SNPE_CONFIG_DIMENSIONS_MODIFICATION_NOT_SUPPORTED = 107, - SNPE_CONFIG_BOTH_OUTPUT_LAYER_TENSOR_NAMES_SET = 108, - - SNPE_CONFIG_NNCONFIG_ONLY_TENSOR_SUPPORTED = 120, - SNPE_CONFIG_NNCONFIG_ONLY_USER_BUFFER_SUPPORTED = 121, - - // DlSystem errors - SNPE_DLSYSTEM_MISSING_BUFFER = 200, - SNPE_DLSYSTEM_TENSOR_CAST_FAILED = 201, - SNPE_DLSYSTEM_FIXED_POINT_PARAM_INVALID = 202, - SNPE_DLSYSTEM_SIZE_MISMATCH = 203, - SNPE_DLSYSTEM_NAME_NOT_FOUND = 204, - SNPE_DLSYSTEM_VALUE_MISMATCH = 205, - SNPE_DLSYSTEM_INSERT_FAILED = 206, - SNPE_DLSYSTEM_TENSOR_FILE_READ_FAILED = 207, - SNPE_DLSYSTEM_DIAGLOG_FAILURE = 208, - SNPE_DLSYSTEM_LAYER_NOT_SET = 209, - SNPE_DLSYSTEM_WRONG_NUMBER_INPUT_BUFFERS = 210, - SNPE_DLSYSTEM_RUNTIME_TENSOR_SHAPE_MISMATCH = 211, - SNPE_DLSYSTEM_TENSOR_MISSING = 212, - SNPE_DLSYSTEM_TENSOR_ITERATION_UNSUPPORTED = 213, - SNPE_DLSYSTEM_BUFFER_MANAGER_MISSING = 214, - SNPE_DLSYSTEM_RUNTIME_BUFFER_SOURCE_UNSUPPORTED = 215, - SNPE_DLSYSTEM_BUFFER_CAST_FAILED = 216, - SNPE_DLSYSTEM_WRONG_TRANSITION_TYPE = 217, - SNPE_DLSYSTEM_LAYER_ALREADY_REGISTERED = 218, - SNPE_DLSYSTEM_TENSOR_DIM_INVALID = 219, - - SNPE_DLSYSTEM_BUFFERENCODING_UNKNOWN = 240, - SNPE_DLSYSTEM_BUFFER_INVALID_PARAM = 241, - - // DlContainer errors - SNPE_DLCONTAINER_MODEL_PARSING_FAILED = 300, - SNPE_DLCONTAINER_UNKNOWN_LAYER_CODE = 301, - SNPE_DLCONTAINER_MISSING_LAYER_PARAM = 302, - SNPE_DLCONTAINER_LAYER_PARAM_NOT_SUPPORTED = 303, - SNPE_DLCONTAINER_LAYER_PARAM_INVALID = 304, - SNPE_DLCONTAINER_TENSOR_DATA_MISSING = 305, - SNPE_DLCONTAINER_MODEL_LOAD_FAILED = 306, - SNPE_DLCONTAINER_MISSING_RECORDS = 307, - SNPE_DLCONTAINER_INVALID_RECORD = 308, - SNPE_DLCONTAINER_WRITE_FAILURE = 309, - SNPE_DLCONTAINER_READ_FAILURE = 310, - SNPE_DLCONTAINER_BAD_CONTAINER = 311, - SNPE_DLCONTAINER_BAD_DNN_FORMAT_VERSION = 312, - SNPE_DLCONTAINER_UNKNOWN_AXIS_ANNOTATION = 313, - SNPE_DLCONTAINER_UNKNOWN_SHUFFLE_TYPE = 314, - SNPE_DLCONTAINER_TEMP_FILE_FAILURE = 315, - - // Network errors - SNPE_NETWORK_EMPTY_NETWORK = 400, - SNPE_NETWORK_CREATION_FAILED = 401, - SNPE_NETWORK_PARTITION_FAILED = 402, - SNPE_NETWORK_NO_OUTPUT_DEFINED = 403, - SNPE_NETWORK_MISMATCH_BETWEEN_NAMES_AND_DIMS = 404, - SNPE_NETWORK_MISSING_INPUT_NAMES = 405, - SNPE_NETWORK_MISSING_OUTPUT_NAMES = 406, - SNPE_NETWORK_EXECUTION_FAILED = 407, - - // Host runtime errors - SNPE_HOST_RUNTIME_TARGET_UNAVAILABLE = 500, - - // CPU runtime errors - SNPE_CPU_LAYER_NOT_SUPPORTED = 600, - SNPE_CPU_LAYER_PARAM_NOT_SUPPORTED = 601, - SNPE_CPU_LAYER_PARAM_INVALID = 602, - SNPE_CPU_LAYER_PARAM_COMBINATION_INVALID = 603, - SNPE_CPU_BUFFER_NOT_FOUND = 604, - SNPE_CPU_NETWORK_NOT_SUPPORTED = 605, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_CPU_UDO_OPERATION_FAILED = 606, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // CPU fixed-point runtime errors - SNPE_CPU_FXP_LAYER_NOT_SUPPORTED = 700, - SNPE_CPU_FXP_LAYER_PARAM_NOT_SUPPORTED = 701, - SNPE_CPU_FXP_LAYER_PARAM_INVALID = 702, - SNPE_CPU_FXP_OPTION_INVALID = 703, - - // GPU runtime errors - SNPE_GPU_LAYER_NOT_SUPPORTED = 800, - SNPE_GPU_LAYER_PARAM_NOT_SUPPORTED = 801, - SNPE_GPU_LAYER_PARAM_INVALID = 802, - SNPE_GPU_LAYER_PARAM_COMBINATION_INVALID = 803, - SNPE_GPU_KERNEL_COMPILATION_FAILED = 804, - SNPE_GPU_CONTEXT_NOT_SET = 805, - SNPE_GPU_KERNEL_NOT_SET = 806, - SNPE_GPU_KERNEL_PARAM_INVALID = 807, - SNPE_GPU_OPENCL_CHECK_FAILED = 808, - SNPE_GPU_OPENCL_FUNCTION_ERROR = 809, - SNPE_GPU_BUFFER_NOT_FOUND = 810, - SNPE_GPU_TENSOR_DIM_INVALID = 811, - SNPE_GPU_MEMORY_FLAGS_INVALID = 812, - SNPE_GPU_UNEXPECTED_NUMBER_OF_IO = 813, - SNPE_GPU_LAYER_PROXY_ERROR = 814, - SNPE_GPU_BUFFER_IN_USE = 815, - SNPE_GPU_BUFFER_MODIFICATION_ERROR = 816, - SNPE_GPU_DATA_ARRANGEMENT_INVALID = 817, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_GPU_UDO_OPERATION_FAILED = 818, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - // DSP runtime errors - SNPE_DSP_LAYER_NOT_SUPPORTED = 900, - SNPE_DSP_LAYER_PARAM_NOT_SUPPORTED = 901, - SNPE_DSP_LAYER_PARAM_INVALID = 902, - SNPE_DSP_LAYER_PARAM_COMBINATION_INVALID = 903, - SNPE_DSP_STUB_NOT_PRESENT = 904, - SNPE_DSP_LAYER_NAME_TRUNCATED = 905, - SNPE_DSP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 906, - SNPE_DSP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 907, - SNPE_DSP_RUNTIME_COMMUNICATION_ERROR = 908, - SNPE_DSP_RUNTIME_INVALID_PARAM_ERROR = 909, - SNPE_DSP_RUNTIME_SYSTEM_ERROR = 910, - SNPE_DSP_RUNTIME_CRASHED_ERROR = 911, - SNPE_DSP_BUFFER_SIZE_ERROR = 912, - SNPE_DSP_UDO_EXECUTE_ERROR = 913, - SNPE_DSP_UDO_LIB_NOT_REGISTERED_ERROR = 914, - SNPE_DSP_UDO_INVALID_QUANTIZATION_TYPE_ERROR = 915, - SNPE_DSP_RUNTIME_INVALID_RPC_DRIVER = 916, - SNPE_DSP_RUNTIME_RPC_PERMISSION_ERROR = 917, - SNPE_DSP_RUNTIME_DSP_FILE_OPEN_ERROR = 918, - - // Model validataion errors - SNPE_MODEL_VALIDATION_LAYER_NOT_SUPPORTED = 1000, - SNPE_MODEL_VALIDATION_LAYER_PARAM_NOT_SUPPORTED = 1001, - SNPE_MODEL_VALIDATION_LAYER_PARAM_INVALID = 1002, - SNPE_MODEL_VALIDATION_LAYER_PARAM_MISSING = 1003, - SNPE_MODEL_VALIDATION_LAYER_PARAM_COMBINATION_INVALID = 1004, - SNPE_MODEL_VALIDATION_LAYER_ORDERING_INVALID = 1005, - SNPE_MODEL_VALIDATION_INVALID_CONSTRAINT = 1006, - SNPE_MODEL_VALIDATION_MISSING_BUFFER = 1007, - SNPE_MODEL_VALIDATION_BUFFER_REUSE_NOT_SUPPORTED = 1008, - SNPE_MODEL_VALIDATION_LAYER_COULD_NOT_BE_ASSIGNED = 1009, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_MODEL_VALIDATION_UDO_LAYER_FAILED = 1010, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // UDL errors - SNPE_UDL_LAYER_EMPTY_UDL_NETWORK = 1100, - SNPE_UDL_LAYER_PARAM_INVALID = 1101, - SNPE_UDL_LAYER_INSTANCE_MISSING = 1102, - SNPE_UDL_LAYER_SETUP_FAILED = 1103, - SNPE_UDL_EXECUTE_FAILED = 1104, - SNPE_UDL_BUNDLE_INVALID = 1105, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_UDO_REGISTRATION_FAILED = 1106, - SNPE_UDO_GET_PACKAGE_FAILED = 1107, - SNPE_UDO_GET_IMPLEMENTATION_FAILED = 1108, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // Dependent library errors - SNPE_STD_LIBRARY_ERROR = 1200, - - // Unknown exception (catch (...)), Has no component attached to this - SNPE_UNKNOWN_EXCEPTION = 1210, - - // Storage Errors - SNPE_STORAGE_INVALID_KERNEL_REPO = 1300, - -#ifdef DNN_RUNTIME_HAVE_AIP_RUNTIME - // AIP runtime errors - SNPE_AIP_LAYER_NOT_SUPPORTED = 1400, - SNPE_AIP_LAYER_PARAM_NOT_SUPPORTED = 1401, - SNPE_AIP_LAYER_PARAM_INVALID = 1402, - SNPE_AIP_LAYER_PARAM_COMBINATION_INVALID = 1403, - SNPE_AIP_STUB_NOT_PRESENT = 1404, - SNPE_AIP_LAYER_NAME_TRUNCATED = 1405, - SNPE_AIP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 1406, - SNPE_AIP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 1407, - SNPE_AIP_RUNTIME_COMMUNICATION_ERROR = 1408, - SNPE_AIP_RUNTIME_INVALID_PARAM_ERROR = 1409, - SNPE_AIP_RUNTIME_SYSTEM_ERROR = 1410, - SNPE_AIP_RUNTIME_TENSOR_MISSING = 1411, - SNPE_AIP_RUNTIME_TENSOR_SHAPE_MISMATCH = 1412, - SNPE_AIP_RUNTIME_BAD_AIX_RECORD = 1413, - SNPE_AIP_AXIS_QUANT_UNSUPPORTED = 1414, - -#endif // DNN_RUNTIME_HAVE_AIP_RUNTIME - - // DlCaching errors - SNPE_DLCACHING_INVALID_METADATA = 1500, - SNPE_DLCACHING_INVALID_INITBLOB = 1501, - - // Infrastructure Errors - SNPE_INFRA_CLUSTERMGR_INSTANCE_INVALID = 1600, - SNPE_INFRA_CLUSTERMGR_EXECUTE_SYNC_FAILED = 1601, - - // Memory Errors - SNPE_MEMORY_CORRUPTION_ERROR = 1700 - -}; - - -inline ErrorCode getLastErrorCode(){ - return static_cast(Snpe_ErrorCode_getLastErrorCode()); -} - -inline const char* getLastErrorString(){ - return Snpe_ErrorCode_GetLastErrorString(); -} - -inline const char* getLastInfoString(){ - return Snpe_ErrorCode_getLastInfoString(); -} - - -inline uint32_t enumToUInt32(ErrorCode code){ - return Snpe_ErrorCode_enumToUInt32(static_cast(code)); -} - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ErrorCode); - - -namespace zdl{ namespace DlSystem { - inline ErrorCode getLastErrorCode() { return ::DlSystem::getLastErrorCode() ; } - inline const char* getLastErrorString() { return ::DlSystem::getLastErrorString() ; } - inline const char* getLastInfoString() { return ::DlSystem::getLastInfoString() ; } - inline uint32_t enumToUInt32(ErrorCode code){ return ::DlSystem::enumToUInt32(code); } -}} // ns zdl::DlSystem diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlOptional.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlOptional.hpp deleted file mode 100644 index e7bbf666..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlOptional.hpp +++ /dev/null @@ -1,244 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include -#include - - -//============================================================================== -// -// Copyright (c) 2016, 2020 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -//#include -#include -//#include - - -namespace DlSystem { - - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * @brief . - * - * Class to manage a value that may or may not exist. The boolean value - * of the Optional class is true if the object contains a value and false - * if it does not contain a value. - * - * The class must be evaluated and confirmed as true (containing a value) - * before being dereferenced. - */ -template -class Optional { -public: - enum class LIFECYCLE { - NONE = 0, - REFERENCE_OWNED = 1, - POINTER_OWNED = 2, - POINTER_NOT_OWNED = 3 - }; - - struct ReferenceCount { - size_t count = 0; - - void increment() { count++; } - - size_t decrement() { - if (count > 0) { - count--; - } - return count; - } - }; - - using U = typename std::remove_pointer::type; - - /** - * The default constructor is set to not have any value, and is - * therefore evaluated as false. - */ - // Do not explicit it so we can return {} - Optional() { - m_Type = LIFECYCLE::NONE; - } - - /** - * Construct an Optional class using an object. - * @param[in] Reference to an object v - * @param[out] Optional instance of object v - */ - template - Optional (const T& v, typename std::enable_if::value>::type* = 0) - : m_Type(LIFECYCLE::REFERENCE_OWNED) { - try { - m_StoragePtr = new T(v); - } catch (...) { - m_StoragePtr = nullptr; - m_Type = LIFECYCLE::NONE; - } - } - - template - Optional(U* v, LIFECYCLE type, typename std::enable_if::value>::type* = 0) - : m_Type(type) { - switch (m_Type) { - case LIFECYCLE::POINTER_OWNED: - m_StoragePtr = v; - m_Count = new ReferenceCount(); - m_Count->increment(); - break; - case LIFECYCLE::POINTER_NOT_OWNED: - m_StoragePtr = v; - break; - case LIFECYCLE::REFERENCE_OWNED: - throw std::bad_exception(); - case LIFECYCLE::NONE: - break; - } - } - - Optional(const Optional &other) : m_Type(other.m_Type), m_Count(other.m_Count) { - if (isReference()) { - m_StoragePtr = new U(*other.m_StoragePtr); - } else if (isPointer()) { - m_StoragePtr = other.m_StoragePtr; - if (isOwned()) { - m_Count->increment(); - } - } - } - - Optional& operator=(const Optional& other) noexcept { - Optional tmp(other); - swap(std::move(tmp)); - return *this; - } - - Optional(Optional&& other) noexcept { - swap(std::move(other)); - } - - Optional& operator=(Optional&& other) noexcept { - swap(std::move(other)); - return *this; - } - - ~Optional() { - if (isOwned()) { - if (isReference() || (isPointer() && m_Count->decrement() == 0)) { - delete m_StoragePtr; - delete m_Count; - } - } - } - - /** - * Boolean value of Optional class is only true when there exists a value. - */ - operator bool() const noexcept { return isValid(); } - - bool operator!() const noexcept { return !isValid(); } - - /** - * Get reference of Optional object - * @warning User must validate Optional has value before. - */ - const T& operator*() { return this->GetReference(); } - - /** - * Get reference of Optional object - * @warning User must validate Optional has value before. - */ - const T& operator*() const { return this->GetReference(); } - - operator T&() { return this->GetReference(); } - - T operator->() { - T self = this->GetReference(); - return self; - } - - void release(){ - if(isOwned() && isPointer()){ - m_Type = LIFECYCLE::POINTER_NOT_OWNED; - if(m_Count && m_Count->decrement() == 0){ - delete m_Count; - m_Count = nullptr; - } - } - } -private: - void swap(Optional&& other) { - m_Type = other.m_Type; - m_StoragePtr = other.m_StoragePtr; - m_Count = other.m_Count; - - other.m_Type = LIFECYCLE::NONE; - other.m_StoragePtr = nullptr; - other.m_Count = nullptr; - } - - template - typename std::enable_if::value, const Q&>::type GetReference() const noexcept { - if (!isReference()) std::terminate(); - return *static_cast(m_StoragePtr); - } - - template - typename std::enable_if::value, const Q&>::type GetReference() const noexcept { - if (!isPointer()) std::terminate(); - return static_cast(m_StoragePtr); - } - - template - typename std::enable_if::value, Q&>::type GetReference() noexcept { - if (!isReference()) std::terminate(); - return *m_StoragePtr; - } - - template - typename std::enable_if::value, Q&>::type GetReference() noexcept { - if (!isPointer()) std::terminate(); - return m_StoragePtr; - } - - bool isPointer() const { - return m_Type == LIFECYCLE::POINTER_OWNED || m_Type == LIFECYCLE::POINTER_NOT_OWNED; - } - - bool isOwned() const { - return m_Type == LIFECYCLE::REFERENCE_OWNED || m_Type == LIFECYCLE::POINTER_OWNED; - } - - bool isReference() const { - return m_Type == LIFECYCLE::REFERENCE_OWNED; - } - - bool isValid() const { - return m_Type != LIFECYCLE::NONE; - } - - U* m_StoragePtr = nullptr; - LIFECYCLE m_Type; - ReferenceCount *m_Count = nullptr; -}; - -} // ns DlSystem - - - -namespace zdl { namespace DlSystem { template using Optional = ::DlSystem::Optional; }} diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlVersion.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlVersion.h deleted file mode 100644 index fac01d1c..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlVersion.h +++ /dev/null @@ -1,122 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - - -/** - * @file - */ - -#ifndef _DL_VERSION_H_ -#define _DL_VERSION_H_ - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" -#include - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A class that contains the different portions of a version number. - * A typedef to indicate a SNPE DlVersion handle - */ -typedef void* Snpe_DlVersion_Handle_t; - -/** - * Construct a DlVersion - * - * @return a handle to the created DlVersion - */ -SNPE_API -Snpe_DlVersion_Handle_t Snpe_DlVersion_Create(); - - -/** - * Destroys/frees DlVersion - * - * @param[in] handle : Handle to access DlVersion - * - * @return SNPE_SUCCESS if Delete operation successful. -*/ -SNPE_API -Snpe_ErrorCode_t Snpe_DlVersion_Delete(Snpe_DlVersion_Handle_t handle); - -/** - * Get the major version number. - * @param[in] handle : Handle to access DlVersion - * @return Major version - */ -SNPE_API -int32_t Snpe_DlVersion_GetMajor(Snpe_DlVersion_Handle_t handle); - -/** - * Get the minor version number. - * @param[in] handle : Handle to access DlVersion - * @return Minor version - */ -SNPE_API -int32_t Snpe_DlVersion_GetMinor(Snpe_DlVersion_Handle_t handle); - -/** - * Get the teeny version number. - * @param[in] handle : Handle to access DlVersion - * @return Teeny version - */ -SNPE_API -int32_t Snpe_DlVersion_GetTeeny(Snpe_DlVersion_Handle_t handle); - -/** - * Get the string holding information about the build version. - * - * @param[in] handle : Handle to access DlVersion - * @return Build information - */ -SNPE_API -const char* Snpe_DlVersion_GetBuild(Snpe_DlVersion_Handle_t handle); - -/** - * @brief Returns a string in the form Major.Minor.Teeny.Build - * - * @param[in] handle : Handle to access DlVersion - * @return A formatted char* holding the version information. - * - * @note the returned string will be invalidated by subsequent calls to this function - */ -SNPE_API -const char* Snpe_DlVersion_ToString(Snpe_DlVersion_Handle_t handle); - -/** - * @brief Create a DlVersion from a string - * - * @param stringValue The formatted DlVersion string - * - * @return A handle to the created DlVersion - */ -SNPE_API -Snpe_DlVersion_Handle_t Snpe_DlVersion_FromString(const char* stringValue); - - - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DL_VERSION_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlVersion.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlVersion.hpp deleted file mode 100644 index 7badab1f..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/DlVersion.hpp +++ /dev/null @@ -1,118 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include - -#include "Wrapper.hpp" -#include "String.hpp" - -#include "DlSystem/DlVersion.h" -#include "SNPE/SNPEUtil.h" - - -namespace DlSystem { - -class Version_t : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_DlVersion_Delete}; - - template - using MajorReference = WrapperDetail::GenericConstMemberReference; - - template - using MinorReference = WrapperDetail::GenericConstMemberReference; - - template - using TeenyReference = WrapperDetail::GenericConstMemberReference; - - - static std::string BuildGetter(Snpe_DlVersion_Handle_t handle){ - return Snpe_DlVersion_GetBuild(handle); - } - - template - using BuildReference = WrapperDetail::GenericConstMemberReference; - - - static const std::string& toString(int32_t Major, int32_t Minor, int32_t Teeny, const std::string& Build){ - thread_local std::string toret; - - toret = std::to_string(Major); - toret += '.'; - toret += std::to_string(Minor); - toret += '.'; - toret += std::to_string(Teeny); - if(!Build.empty()){ - toret += '.'; - toret += Build; - } - - return toret; - } - -public: - Version_t() - : BaseType(Snpe_DlVersion_Create()) - { } - - Version_t(int32_t Major, int32_t Minor, int32_t Teeny, const std::string& Build) - : BaseType(Snpe_DlVersion_FromString(toString(Major, Minor, Teeny, Build).c_str())) - { } - - - /// Holds the major version number. Changes in this value indicate - /// major changes that break backward compatibility. - MajorReference Major{*this}; - - /// Holds the minor version number. Changes in this value indicate - /// minor changes made to library that are backwards compatible - /// (such as additions to the interface). - MinorReference Minor{*this}; - - /// Holds the teeny version number. Changes in this value indicate - /// changes such as bug fixes and patches made to the library that - /// do not affect the interface. - TeenyReference Teeny{*this}; - - /// This string holds information about the build version. - BuildReference Build{*this}; - - - static Version_t fromString(const std::string& stringValue){ - return moveHandle(Snpe_DlVersion_FromString(stringValue.c_str())); - } - - /** - * @brief Returns a string in the form Major.Minor.Teeny.Build - * - * @return A formatted string holding the version information. - */ - std::string toString() const{ - return Snpe_DlVersion_ToString(handle()); - } - - /** - * @brief Returns a string in the form Major.Minor.Teeny.Build - * - * @return A formatted string holding the version information. - */ - String asString() const{ - return String(toString()); - } -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, Version_t) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.h deleted file mode 100644 index 96453ef9..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.h +++ /dev/null @@ -1,117 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _IBUFFER_ATTRIBUTES_H -#define _IBUFFER_ATTRIBUTES_H - -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE IBufferAttributes handle - */ -typedef void* Snpe_IBufferAttributes_Handle_t; - - -/** - * @brief Gets the buffer's element size, in bytes - * - * This can be used to compute the memory size required - * to back this buffer. - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Element size, in bytes - */ -SNPE_API -size_t Snpe_IBufferAttributes_GetElementSize(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the element's encoding type - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return encoding type - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_IBufferAttributes_GetEncodingType(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the number of elements in each dimension - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Dimension size, in terms of number of elements - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_IBufferAttributes_GetDims(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the alignment requirement of each dimension - * - * Alignment per each dimension is expressed as an multiple, for - * example, if one particular dimension can accept multiples of 8, - * the alignment will be 8. - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Alignment in each dimension, in terms of multiple of - * number of elements - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_IBufferAttributes_GetAlignments(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the buffer encoding returned from the network responsible - * for generating this buffer. Depending on the encoding type, this will - * be an instance of an encoding type specific derived class. - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Derived user buffer encoding object. - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_IBufferAttributes_GetEncoding_Ref(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Destroys the IBufferAttributes object - * - * @param[handle] handle : Handle to access IBufferAttributes - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IBufferAttributes_Delete(Snpe_IBufferAttributes_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _IBUFFER_ATTRIBUTES_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.hpp deleted file mode 100644 index 2a86fcec..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.hpp +++ /dev/null @@ -1,85 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include -#include "TensorShape.hpp" - -#include "DlSystem/IBufferAttributes.h" -#include "IUserBuffer.hpp" - -namespace DlSystem { - - -class IBufferAttributes : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_IBufferAttributes_Delete}; -public: - - size_t getElementSize() const noexcept{ - return Snpe_IBufferAttributes_GetElementSize(handle()); - } - - UserBufferEncoding::ElementType_t getEncodingType() const noexcept{ - return static_cast(Snpe_IBufferAttributes_GetEncodingType(handle())); - } - - TensorShape getDims() const{ - return moveHandle(Snpe_IBufferAttributes_GetDims(handle())); - } - - TensorShape getAlignments() const{ - return moveHandle(Snpe_IBufferAttributes_GetAlignments(handle())); - } - - UserBufferEncoding* getEncoding() const{ - auto h = Snpe_IBufferAttributes_GetEncoding_Ref(handle()); - switch(Snpe_UserBufferEncoding_GetElementType(h)){ - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32: - return makeReference(h); - - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8: - return makeReference(h); - - default: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN: - return makeReference(h); - } - } - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IBufferAttributes) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.h deleted file mode 100644 index a3c3c623..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.h +++ /dev/null @@ -1,156 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_IOBUFFER_DATATYPE_MAP_H -#define DL_SYSTEM_IOBUFFER_DATATYPE_MAP_H - -#include - -#include "DlSystem/DlError.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE IOBufferDataTypeMap handle - */ -typedef void* Snpe_IOBufferDataTypeMap_Handle_t; - -/** - * @brief . - * - * Creates a new Buffer Data type map - * - */ -SNPE_API -Snpe_IOBufferDataTypeMap_Handle_t Snpe_IOBufferDataTypeMap_Create(); - -/** - * @brief Destroys the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IOBufferDataTypeMap_Delete(Snpe_IOBufferDataTypeMap_Handle_t handle); -/** - * @brief Adds a name and the corresponding buffer data type - * to the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - * @param[in] bufferDataType : data type of the buffer - * - * @note If a buffer with the same name already exists, no new - * buffer is added. - */ -SNPE_API -Snpe_ErrorCode_t -Snpe_IOBufferDataTypeMap_Add(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name, Snpe_IOBufferDataType_t bufferDataType); - -/** - * @brief Removes a buffer name from the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IOBufferDataTypeMap_Remove(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name); - -/** - * @brief Returns the type of the named buffer - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - * @return The type of the buffer, or UNSPECIFIED if the buffer does not exist - * - */ -SNPE_API -Snpe_IOBufferDataType_t Snpe_IOBufferDataTypeMap_GetBufferDataType(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name); - -/** - * @brief Returns the type of the first buffer - * - * @param handle : Handle to access the IOBufferDataType map - * - * @return The type of the first buffer, or SNPE_IO_BUFFER_DATATYPE_UNSPECIFIED if the map is empty. - */ -SNPE_API -Snpe_IOBufferDataType_t Snpe_IOBufferDataTypeMap_GetBufferDataTypeOfFirst(Snpe_IOBufferDataTypeMap_Handle_t handle); - -/** - * @brief Returns the size of the buffer type map. - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @return The size of the map - * - */ -SNPE_API -size_t Snpe_IOBufferDataTypeMap_Size(Snpe_IOBufferDataTypeMap_Handle_t handle); - -/** - * @brief Checks the existence of the named buffer in the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - * @return 1 if the named buffer exists, 0 otherwise. - * - */ -SNPE_API -int Snpe_IOBufferDataTypeMap_Find(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name); - -/** - * @brief Resets the map - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IOBufferDataTypeMap_Clear(Snpe_IOBufferDataTypeMap_Handle_t handle); - -/** - * @brief Checks whether the map is empty - * - * @return 1 if the map is empty, 0 otherwise. - * - */ -SNPE_API -int Snpe_IOBufferDataTypeMap_Empty(Snpe_IOBufferDataTypeMap_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_IOBUFFER_DATATYPE_MAP_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.hpp deleted file mode 100644 index c39d3320..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.hpp +++ /dev/null @@ -1,69 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include - -#include "DlEnums.hpp" - - -#include "DlSystem/IOBufferDataTypeMap.h" - -namespace DlSystem { - -class IOBufferDataTypeMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_IOBufferDataTypeMap_Delete}; - -public: - - IOBufferDataTypeMap() - : BaseType(Snpe_IOBufferDataTypeMap_Create()) - { } - - void add(const char* name, IOBufferDataType_t bufferDataType){ - Snpe_IOBufferDataTypeMap_Add(handle(), name, static_cast(bufferDataType)); - } - - void remove(const char* name){ - Snpe_IOBufferDataTypeMap_Remove(handle(), name); - } - - IOBufferDataType_t getBufferDataType(const char* name){ - return static_cast(Snpe_IOBufferDataTypeMap_GetBufferDataType(handle(), name)); - } - - IOBufferDataType_t getBufferDataType(){ - return static_cast(Snpe_IOBufferDataTypeMap_GetBufferDataTypeOfFirst(handle())); - } - - size_t size() const{ - return Snpe_IOBufferDataTypeMap_Size(handle()); - } - - bool find(const char* name) const{ - return Snpe_IOBufferDataTypeMap_Find(handle(), name); - } - - void clear(){ - Snpe_IOBufferDataTypeMap_Clear(handle()); - } - - bool empty() const{ - return Snpe_IOBufferDataTypeMap_Empty(handle()); - } -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IOBufferDataTypeMap) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensor.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensor.h deleted file mode 100644 index 913f3bdc..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensor.h +++ /dev/null @@ -1,118 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _DL_SYSTEM_ITENSOR_H_ -#define _DL_SYSTEM_ITENSOR_H_ - -#include - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlError.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * Represents a tensor which holds n-dimensional data. It is important to - * understand how the tensor data is represented in memory - * relative to the tensor dimensions. Tensors store data in - * memory in row-major order (i.e. the last tensor dimension is - * the fastest varying one). For example, if you have a two - * dimensional tensor with 3 rows and 2 columns (i.e. the tensor - * dimensions are 3,2 as returned in tensor dimension vectors) - * with the following data in terms rows and columns: - * - * | 1 2 |
- * | 3 4 |
- * | 5 6 |
- * - * This data would be stored in memory as 1,2,3,4,5,6. - */ -typedef void* Snpe_ITensor_Handle_t; - - -/** - * Destroys/frees an ITensor - * - * @param[in] userBufferHandle : Handle to access the IUserBuffer - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_ITensor_Delete(Snpe_ITensor_Handle_t iTensorHandle); - -/** - * Returns a tensor iterator pointing to the beginning - * of the data in the tensor. - * - * @param[in] tensorHandle : Handle to access ITensor - * - * @return The tensor data as a void pointer. - */ -SNPE_API -void* Snpe_ITensor_GetData(Snpe_ITensor_Handle_t tensorHandle); - -/** - * @brief Gets the shape of this tensor. - * - * The last element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying dimension, etc. - * - * @param[in] tensorHandle : Handle to access ITensor - * - * @return A TensorShape handle holding the tensor dimensions. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_ITensor_GetShape(Snpe_ITensor_Handle_t tensorHandle); - -/** - * Returns the element size of the data in the tensor - * (discounting strides). This is how big a buffer would - * need to be to hold the tensor data contiguously in - * memory. - * - * @param[in] tensorHandle : Handle to access ITensor - * - * @return The size of the tensor (in elements). - */ -SNPE_API -size_t Snpe_ITensor_GetSize(Snpe_ITensor_Handle_t tensorHandle); - -SNPE_API -int Snpe_ITensor_IsQuantized(Snpe_ITensor_Handle_t tensorHandle); - -SNPE_API -float Snpe_ITensor_GetDelta(Snpe_ITensor_Handle_t tensorHandle); - -SNPE_API -float Snpe_ITensor_GetOffset(Snpe_ITensor_Handle_t tensorHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DL_SYSTEM_ITENSOR_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensor.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensor.hpp deleted file mode 100644 index 4785a39d..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensor.hpp +++ /dev/null @@ -1,95 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "TensorShape.hpp" -#include "ITensorItr.hpp" - -#include "DlSystem/ITensor.h" - - -namespace DlSystem { - - -class ITensor : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_ITensor_Delete}; - - template - T* getData(){ - return static_cast(Snpe_ITensor_GetData(handle())); - } - - template - const T* getData() const{ - return static_cast(Snpe_ITensor_GetData(handle())); - } - -public: - using iterator = DlSystem::ITensorItr; - using const_iterator = DlSystem::ITensorItr; - - - iterator begin(){ - return iterator(getData()); - } - - const_iterator begin() const{ - return const_iterator(getData()); - } - - const_iterator cbegin() const{ - return begin(); - } - - iterator end(){ - return begin() + getSize(); - } - - const_iterator end() const{ - return cbegin() + getSize(); - } - - const_iterator cend() const{ - return end(); - } - - TensorShape getShape() const{ - return moveHandle(Snpe_ITensor_GetShape(handle())); - } - - size_t getSize() const{ - return Snpe_ITensor_GetSize(handle()); - } - - // Serialize to std::ostream is no longer supported - void serialize(std::ostream &output) const = delete; - - bool isQuantized() const{ - return Snpe_ITensor_IsQuantized(handle()); - } - - float GetDelta() const{ - return Snpe_ITensor_GetDelta(handle()); - } - - float GetOffset() const{ - return Snpe_ITensor_GetOffset(handle()); - } -}; - - -} //ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ITensor) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensorFactory.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensorFactory.hpp deleted file mode 100644 index 5ef1e9d3..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensorFactory.hpp +++ /dev/null @@ -1,52 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "ITensor.hpp" - -#include - - -#include "SNPE/SNPEUtil.h" - -namespace DlSystem{ -// NOTE: These factories use a different handle type because they are singletons -// Never copy this pattern unless you're also implementing a singleton -class ITensorFactory : public Wrapper{ - friend BaseType; - - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - -public: - ITensorFactory() - : BaseType(nullptr) - { } - - - std::unique_ptr createTensor(const TensorShape &shape) noexcept{ - return makeUnique(Snpe_Util_CreateITensor(getHandle(shape))); - } - - // Create from std::istream is no longer supported - std::unique_ptr createTensor(std::istream &input) noexcept = delete; - - std::unique_ptr createTensor(const TensorShape &shape, - const unsigned char *data, - size_t dataSize) noexcept{ - auto handle = Snpe_Util_CreateITensorDataSize(getHandle(shape), data, dataSize); - return makeUnique(handle); - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ITensorFactory) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensorItr.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensorItr.hpp deleted file mode 100644 index 801aa217..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensorItr.hpp +++ /dev/null @@ -1,199 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include - -#include "Wrapper.hpp" -#include "ITensorItrImpl.hpp" - -namespace DlSystem{ - -template -class ITensorItr{ -public: - using iterator_category = std::bidirectional_iterator_tag; - using pointer = typename std::conditional::type; - using value_type = float; - using difference_type = std::ptrdiff_t; - using reference = typename std::conditional::type; - - - ITensorItr() = delete; - virtual ~ITensorItr() = default; - - explicit ITensorItr(pointer data) noexcept - : m_Impl{nullptr}, - m_IsTrivial{true}, - m_Data{data}, - m_DataStart{data} - { } - - ITensorItr(std::unique_ptr impl, - bool isTrivial = false, - float* data = nullptr) - : m_Impl(impl->clone()), - m_IsTrivial(isTrivial), - m_Data(data), - m_DataStart(data) - { } - - ITensorItr(const ITensorItr& itr) - : m_Impl(itr.m_Impl ? itr.m_Impl->clone() : nullptr), - m_IsTrivial(itr.m_IsTrivial), - m_Data(itr.m_Data), - m_DataStart(itr.m_DataStart) - { } - - ITensorItr(ITensorItr&& itr) noexcept - : m_Impl(std::move(itr.m_Impl)), - m_IsTrivial(itr.m_IsTrivial), - m_Data(itr.m_Data), - m_DataStart(itr.m_DataStart) - { } - - ITensorItr& operator=(const ITensorItr& other){ - if (this == &other) return *this; - - m_Impl = other.m_Impl ? other.m_Impl->clone() : nullptr; - m_IsTrivial = other.m_IsTrivial; - m_Data = other.m_Data; - m_DataStart = other.m_DataStart; - return *this; - } - ITensorItr& operator=(ITensorItr&& other) noexcept{ - if(this != &other){ - m_Impl = std::move(other.m_Impl); - m_IsTrivial = other.m_IsTrivial; - m_Data = other.m_Data; - m_DataStart = other.m_DataStart; - } - return *this; - } - - inline ITensorItr& operator++(){ - if (m_IsTrivial){ - m_Data++; - } else { - m_Impl->increment(); - } - return *this; - } - inline ITensorItr operator++(int){ - ITensorItr tmp(*this); - operator++(); - return tmp; - } - inline ITensorItr& operator--(){ - if (m_IsTrivial){ - m_Data--; - } else { - m_Impl->decrement(); - } - return *this; - } - inline ITensorItr operator--(int){ - ITensorItr tmp(*this); - operator--(); - return tmp; - } - inline ITensorItr& operator+=(int rhs){ - if (m_IsTrivial){ - m_Data += rhs; - } else { - m_Impl->increment(rhs); - } - return *this; - } - inline friend ITensorItr operator+(ITensorItr lhs, int rhs){ - lhs += rhs; - return lhs; - } - inline ITensorItr& operator-=(int rhs){ - if (m_IsTrivial){ - m_Data -= rhs; - } else { - m_Impl->decrement(rhs); - } - return *this; - } - inline friend ITensorItr operator-(ITensorItr lhs, int rhs){ - lhs -= rhs; - return lhs; - } - - inline size_t operator-(const ITensorItr& rhs){ - if (m_IsTrivial) return (m_Data - m_DataStart) - (rhs.m_Data - rhs.m_DataStart); - return m_Impl->getPosition() - rhs.m_Impl->getPosition(); - } - - inline friend bool operator<(const ITensorItr& lhs, const ITensorItr& rhs){ - if (lhs.m_IsTrivial) return lhs.m_Data < rhs.m_Data; - return lhs.m_Impl->dataPointer() < rhs.m_Impl->dataPointer(); - } - inline friend bool operator>(const ITensorItr& lhs, const ITensorItr& rhs){ - return rhs < lhs; - } - inline friend bool operator<=(const ITensorItr& lhs, const ITensorItr& rhs){ - return !(lhs > rhs); - } - inline friend bool operator>=(const ITensorItr& lhs, const ITensorItr& rhs){ - return !(lhs < rhs); - } - - inline bool operator==(const ITensorItr& rhs) const{ - if (m_IsTrivial) return m_Data == rhs.m_Data; - return m_Impl->dataPointer() == rhs.m_Impl->dataPointer(); - } - inline bool operator!=(const ITensorItr& rhs) const{ - return !operator==(rhs); - } - - inline reference operator[](size_t idx){ - if (m_IsTrivial) return *(m_DataStart + idx); - return m_Impl->getReferenceAt(idx); - } - inline reference operator*(){ - if (m_IsTrivial) return *m_Data; - return m_Impl->getReference(); - } - inline reference operator->(){ - return *(*this); - } - inline float* dataPointer() const{ - if (m_IsTrivial) return m_Data; - return m_Impl->dataPointer(); - } - - -protected: - std::unique_ptr<::DlSystem::ITensorItrImpl> m_Impl; - bool m_IsTrivial = false; - pointer m_Data = nullptr; - pointer m_DataStart = nullptr; -}; - - -inline void fill(ITensorItr first, ITensorItr end, float val){ - std::fill(first, end, val); -} -template -OutItr copy(InItr first, InItr last, OutItr result){ - return std::copy(first, last, result); -} - -} // ns DlSystem - - -// ALIAS_IN_ZDL_NAMESPACE -namespace zdl{ namespace DlSystem{ - template - using ITensorItr = ::DlSystem::ITensorItr; -}} diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensorItrImpl.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensorItrImpl.hpp deleted file mode 100644 index 6b9a497b..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/ITensorItrImpl.hpp +++ /dev/null @@ -1,32 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once -#include "Wrapper.hpp" - -namespace DlSystem { - -class ITensorItrImpl { -public: - ITensorItrImpl() = default; - virtual ~ITensorItrImpl() = default; - - virtual float getValue() const = 0; - virtual float& getReference() = 0; - virtual float& getReferenceAt(size_t idx) = 0; - virtual float* dataPointer() const = 0; - virtual void increment(int incVal = 1) = 0; - virtual void decrement(int decVal = 1) = 0; - virtual size_t getPosition() = 0; - virtual std::unique_ptr clone() = 0; - -private: - ITensorItrImpl& operator=(const ITensorItrImpl& other) = delete; - ITensorItrImpl(const ITensorItrImpl& other) = delete; -}; - -} // ns DlSystem diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.h deleted file mode 100644 index fc4cc316..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.h +++ /dev/null @@ -1,714 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _IUSER_BUFFER_H -#define _IUSER_BUFFER_H - -#include -#include - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlError.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE UserByfferEncoding handle - */ -typedef void* Snpe_UserBufferEncoding_Handle_t; - -/** - * @brief . - * - * An enum class of all supported element types in a IUserBuffer - */ -//enum class Snpe_UserBufferEncoding_ElementType_t -typedef enum -{ - /// Unknown element type. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN = 0, - - /// Each element is presented by float. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT = 1, - - /// Each element is presented by an unsigned int. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT = 2, - - /// Each element is presented by float16. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16 = 3, - - /// Each element is presented by an 8-bit quantized value. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8 = 10, - - /// Each element is presented by an 16-bit quantized value. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16 = 11, - - /// Each element is presented by Int32 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32 = 12, - - /// Each element is presented by UInt32 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32 = 13, - - /// Each element is presented by Int8 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8 = 14, - - /// Each element is presented by UInt8 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8 = 15, - - /// Each element is presented by Int16 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT16 = 16, - - /// Each element is presented by UInt16 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT16 = 17, - - /// Each element is present by Bool8 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8 = 18, - - /// Each element is present by Int64 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT64 = 19, - - /// Each element is present by UInt64 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT64 = 20 - -}Snpe_UserBufferEncoding_ElementType_t; - - -/** - * @brief Retrieves the element type - * - * @param[in] userBufferEncodingHandle : Handle to access userBufferEncoding - * - * @return Element type - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncoding_GetElementType(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access userBufferEncoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncoding_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys/frees a UserBufferEncoding - * - * @param[in] userBufferEncodingHandle : Handle to access UserBufferEncoding - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncoding_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - - -/** - * @brief . - * - * A base class buffer source type - * - * @note User buffer from CPU support all kinds of runtimes; - * User buffer from GLBUFFER support only GPU runtime. - */ -typedef void* Snpe_UserBufferSource_Handle_t; - -typedef enum -{ - /// Unknown buffer source type. - SNPE_USERBUFFERSOURCE_SOURCETYPE_UNKNOWN = 0, - - /// The network inputs are from CPU buffer. - SNPE_USERBUFFERSOURCE_SOURCETYPE_CPU = 1, - - /// The network inputs are from OpenGL buffer. - SNPE_USERBUFFERSOURCE_SOURCETYPE_GLBUFFER = 2 -}Snpe_UserBufferSource_SourceType_t; - -/** - * @brief Retrieves the source type - * - * @param[in] userBufferSourceHandle : Handle to access userBufferSource - * - * @return Source type - */ -SNPE_API -Snpe_UserBufferSource_SourceType_t Snpe_UserBufferSource_GetSourceType(Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -/** - * @brief Destroys/frees a UserBufferSource - * - * @param[in] userBufferSourceHandle : Handle to access UserBufferSource - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferSource_Delete(Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -/** - * @brief . - * - * An source type where input data is delivered from OpenGL buffer - */ -SNPE_API -Snpe_UserBufferSource_Handle_t Snpe_UserBufferSourceGLBuffer_Create(); - -/** - * @brief Destroys the userBuffer - * - * @param[in] userBufferSourceHandle : Handle to access the UserBuffer - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferSourceGLBuffer_Delete(Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -// Encoding 8 Bit -/** - * @brief . - * - * An encoding type where each element is represented by an unsigned int. - * - * Userbuffer size assumes uint8 encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 1 = 6 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUnsigned8Bit_Create(); - -/** - * @brief Copy Constructor for UserBufferEncodingUnsigned8Bit - * - * An encoding type where each element is represented by an unsigned int. - * - * Userbuffer size assumes uint8 encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 1 = 6 bytes in memory). - * - * @param[in] otherHandle : a handle to another UserBufferEncodingUnsigned8Bit to copy - * - * @return a handle to the UserBufferEncodingUnsigned8Bit - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUnsigned8Bit_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - -/** - * @brief Destroys the encodingUnsigned8Bit - * - * @param[in] userBufferEncodingHandle : Handle to access the encodingUnsigned8Bit - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingUnsigned8Bit_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingUnsigned8Bit_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - - -// Encoding Float -/** - * @brief . - * - * An encoding type where each element is represented by a float. - * - * Userbuffer size assumes float encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 4 = 24 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloat_Create(); - -/** - * @brief Copy Constructor for UserBufferEncodingFloat - * - * An encoding type where each element is represented by a float. - * - * Userbuffer size assumes float encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 4 = 24 bytes in memory). - * - * @param[in] otherHandle : a handle to another UserBufferEncodingFloat to copy - * - * @return a handle to the constructed UserBufferEncodingFloat - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloat_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - -/** - * @brief Destroys the encodingFloat - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingFloat_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingFloat_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -// Encoding FloatN -/** - * @brief . - * - * An encoding type where each element is represented by a float N - * - * Userbuffer size assumes float N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloatN_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingFloatN - * - * An encoding type where each element is represented by a float N - * - * Userbuffer size assumes float N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - * - * @param[in] otherHandle : a handle to another UserBufferEncodingFloatN to copy - * - * @return a handle to the constructed UserBufferEncodingFloatN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloatN_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - - -/** - * @brief Destroys the encodingFloatN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingFloatN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingFloatN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - - -/** - * @brief Get the Float type corresponding to a given bitwidth - * - * @param width bitwidth of Float type - * - * @return ElementType corresponding to a Float of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingFloatN_GetTypeFromWidth(uint8_t width); - -/** - * @brief . - * - * An encoding type where each element is represented by tfN, which is an - * N-bit quantized value, which has an exact representation of 0.0 - * - * Userbuffer size assumes tf N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingTfN_Create(uint64_t stepFor0, float stepSize, uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingTfN - * - * An encoding type where each element is represented by tfN, which is an - * N-bit quantized value, which has an exact representation of 0.0 - * - * Userbuffer size assumes tf N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - * @param otherHandle the UserBufferEncodingTfN to copy - * @return a handle to a newly constructed UserBufferEncodingTfN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingTfN_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - -/** - * @brief Destroys the encodingTfN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingTfN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingTfN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Sets the step value that represents 0 - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @param[in] stepExactly0 : The step value that represents 0 - * - */ -SNPE_API -void Snpe_UserBufferEncodingTfN_SetStepExactly0(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle, uint64_t stepExactly0); - -/** - * @brief Sets the float value that each step represents - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @param[in] quantizedStepSize : The float value of each step size - * - */ -SNPE_API -void Snpe_UserBufferEncodingTfN_SetQuantizedStepSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle, float quantizedStepSize); - -/** - * @brief Retrieves the step that represents 0.0 - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Step value - */ -SNPE_API -uint64_t Snpe_UserBufferEncodingTfN_GetStepExactly0(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the step size - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Step size - */ -SNPE_API -float Snpe_UserBufferEncodingTfN_GetQuantizedStepSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * Calculates the minimum floating point value that - * can be represented with this encoding. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Minimum representable floating point value - */ -SNPE_API -float Snpe_UserBufferEncodingTfN_GetMin(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * Calculates the maximum floating point value that - * can be represented with this encoding. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Maximum representable floating point value - */ -SNPE_API -float Snpe_UserBufferEncodingTfN_GetMax(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Get the tfN type corresponding to a given bitwidth - * - * @param width bitwidth of tfN type - * - * @return ElementType corresponding to a tfN of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingTfN_GetTypeFromWidth(uint8_t width); - -// Encoding Int N -/** - * @brief . - * - * An encoding type where each element is represented by a Int - * - * Userbuffer size assumes int N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingIntN_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingIntN - * - * An encoding type where each element is represented by a Int - * - * Userbuffer size assumes int N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - * @param otherHandle the UserBufferEncodingIntN to copy - * @return a handle to a newly constructed UserBufferEncodingIntN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingIntN_CreateCopy(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys the encodingIntN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingIntN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingIntN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Get the int type corresponding to a given bitwidth - * - * @param width bitwidth of int type - * - * @return ElementType corresponding to a int of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingIntN_GetTypeFromWidth(uint8_t bWidth); - -// Encoding Uint N -/** - * @brief . - * - * An encoding type where each element is represented by a Uint - * - * Userbuffer size assumes uint N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUintN_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingUintN - * - * An encoding type where each element is represented by a Uint - * - * Userbuffer size assumes uint N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - * @param otherHandle the UserBufferEncodingUintN to copy - * @return a handle to a newly constructed UserBufferEncodingUintN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUintN_CreateCopy(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys the encodingUintN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingUintN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingUintN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Get the uint type corresponding to a given bitwidth - * - * @param width bitwidth of uint type - * - * @return ElementType corresponding to a uint of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingUintN_GetTypeFromWidth(uint8_t bWidth); - - -// Encoding Bool -/** - * @brief . - * - * An encoding type where each element is represented by a Bool - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingBool_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingBool - * - * An encoding type where each element is represented by a bool - * - * @param otherHandle the UserBufferEncodingBool to copy - * @return a handle to a newly constructed UserBufferEncodingBool - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingBool_CreateCopy(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys the encodingBool - * - * @param[in] userBufferHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingBool_Delete(Snpe_UserBufferEncoding_Handle_t userBufferHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingBool_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferHandle); - - - -/** - * A typedef to indicate a SNPE IUserBuffer handle - * UserBuffer contains a pointer and info on how to walk it and interpret its content. - */ -typedef void* Snpe_IUserBuffer_Handle_t; - -/** - * Destroys/frees an IUserBuffer - * - * @param[in] userBufferHandle : Handle to access the IUserBuffer - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IUserBuffer_Delete(Snpe_IUserBuffer_Handle_t userBufferHandle); - - -/** - * @brief Retrieves the total number of bytes between elements in each dimension if - * the buffer were to be interpreted as a multi-dimensional array. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @warning Do not modify the TensorShape returned by reference. Treat it as a const reference. - * - * @return A const reference to the number of bytes between elements in each dimension. - * e.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would - * return strides of [24, 8, 4]. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_IUserBuffer_GetStrides_Ref(Snpe_IUserBuffer_Handle_t userBufferHandle); - -/** - * @brief Retrieves the size of the buffer, in bytes. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @return Size of the underlying buffer, in bytes. - */ -SNPE_API -size_t Snpe_IUserBuffer_GetSize(Snpe_IUserBuffer_Handle_t userBufferHandle); - -/** - * @brief Retrieves the size of the inference data in the buffer, in bytes. - * - * The inference results from a dynamic-sized model may not be exactly the same size - * as the UserBuffer provided to SNPE. This function can be used to get the amount - * of output inference data, which may be less or greater than the size of the UserBuffer. - * - * If the inference results fit in the UserBuffer, getOutputSize() would be less than - * or equal to getSize(). But if the inference results were more than the capacity of - * the provided UserBuffer, the results would be truncated to fit the UserBuffer. But, - * getOutputSize() would be greater than getSize(), which indicates a bigger buffer - * needs to be provided to SNPE to hold all of the inference results. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @return Size required for the buffer to hold all inference results, which can be less - * or more than the size of the buffer, in bytes. - */ -SNPE_API -size_t Snpe_IUserBuffer_GetOutputSize(Snpe_IUserBuffer_Handle_t userBufferHandle); - -/** - * @brief Changes the underlying memory that backs the UserBuffer. - * - * This can be used to avoid creating multiple UserBuffer objects - * when the only thing that differs is the memory location. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @param[in] buffer : Pointer to the memory location - * - * @return Whether the set succeeds. - */ -SNPE_API -int Snpe_IUserBuffer_SetBufferAddress(Snpe_IUserBuffer_Handle_t userBufferHandle, void* buffer); - -/** - * @brief Gets a reference to the data encoding object of - * the underlying buffer - * - * This is necessary when the UserBuffer is re-used, and the encoding - * parameters can change. For example, each input can be quantized with - * different step sizes. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @return Data encoding meta-data - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_IUserBuffer_GetEncoding_Ref(Snpe_IUserBuffer_Handle_t userBufferHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _IUSER_BUFFER_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.hpp deleted file mode 100644 index 727c195b..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.hpp +++ /dev/null @@ -1,390 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include -#include "TensorShape.hpp" - -#include "DlSystem/IUserBuffer.h" - - -namespace DlSystem { - - -class UserBufferEncoding: public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferEncoding_Delete}; -protected: - UserBufferEncoding(HandleType handle) - : BaseType(handle) - { } -public: - - virtual ~UserBufferEncoding() = default; - - UserBufferEncoding(UserBufferEncoding&& other) noexcept - : BaseType(std::move(other)) - { } - - enum class ElementType_t - { - /// Unknown element type. - UNKNOWN = 0, - - /// Each element is presented by 32-bit float. - FLOAT = 1, - - /// Each element is presented by an unsigned int. - UNSIGNED8BIT = 2, - - /// Each element is presented by 16-bit float. - FLOAT16 = 3, - - /// Each element is presented by an 8-bit quantized value. - TF8 = 10, - - /// Each element is presented by an 16-bit quantized value. - TF16 = 11, - - /// Each element is presented by Int32 - INT32 = 12, - - /// Each element is presented by UInt32 - UINT32 = 13, - - /// Each element is presented by Int8 - INT8 = 14, - - /// Each element is presented by UInt8 - UINT8 = 15, - - /// Each element is presented by Int16 - INT16 = 16, - - /// Each element is presented by UInt16 - UINT16 = 17, - - // Each element is presented by Bool8 - BOOL8 = 18, - - // Each element is presented by Int64 - INT64 = 19, - - // Each element is presented by UInt64 - UINT64 = 20 - }; - - ElementType_t getElementType() const noexcept{ - return static_cast(Snpe_UserBufferEncoding_GetElementType(handle())); - } - - size_t getElementSize() const noexcept{ - return Snpe_UserBufferEncoding_GetElementSize(handle()); - } -}; - - -class UserBufferSource: public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferSource_Delete}; - -public: - enum class SourceType_t - { - /// Unknown buffer source type. - UNKNOWN = 0, - - /// The network inputs are from CPU buffer. - CPU = 1, - - /// The network inputs are from OpenGL buffer. - GLBUFFER = 2 - }; -protected: - UserBufferSource(HandleType handle) - : BaseType(handle) - { } -public: - SourceType_t getSourceType() const noexcept{ - return static_cast(Snpe_UserBufferSource_GetSourceType(handle())); - } - -}; - -class UserBufferSourceGLBuffer : public UserBufferSource{ -public: - UserBufferSourceGLBuffer() - : UserBufferSource(Snpe_UserBufferSourceGLBuffer_Create()) - { } -}; - -class UserBufferEncodingUnsigned8Bit : public UserBufferEncoding{ -public: - using UserBufferEncoding::UserBufferEncoding; - UserBufferEncodingUnsigned8Bit() - : UserBufferEncoding(Snpe_UserBufferEncodingUnsigned8Bit_Create()) - { } -}; - -class UserBufferEncodingFloatN : public UserBufferEncoding{ -public: - using UserBufferEncoding::UserBufferEncoding; - - UserBufferEncodingFloatN(uint8_t bWidth=32) - : UserBufferEncoding(Snpe_UserBufferEncodingFloatN_Create(bWidth)) - { } - - UserBufferEncodingFloatN(const UserBufferEncodingFloatN& other) - : UserBufferEncoding(Snpe_UserBufferEncodingFloatN_CreateCopy(other.handle())) - { } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingFloatN_GetTypeFromWidth(width)); - } -}; - -class UserBufferEncodingFloat : public UserBufferEncoding{ -public: - using UserBufferEncoding::UserBufferEncoding; - UserBufferEncodingFloat() - : UserBufferEncoding(Snpe_UserBufferEncodingFloat_Create()) - { } - UserBufferEncodingFloat(const UserBufferEncodingFloat& other) - : UserBufferEncoding(Snpe_UserBufferEncodingFloat_CreateCopy(other.handle())) - { } - - UserBufferEncodingFloat(UserBufferEncodingFloat&& other) noexcept - : UserBufferEncoding(std::move(other)) - { } -}; - - -class UserBufferEncodingTfN : public UserBufferEncoding{ -public: - - using UserBufferEncoding::UserBufferEncoding; - template::value && std::is_floating_point::value, int>::type = 0> - UserBufferEncodingTfN(T stepFor0, U stepSize, uint8_t bWidth=8) - : UserBufferEncoding(Snpe_UserBufferEncodingTfN_Create(stepFor0, stepSize, bWidth)) - { } - - UserBufferEncodingTfN(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingTfN_CreateCopy(getHandle(ubEncoding))) - { } - UserBufferEncodingTfN(const UserBufferEncodingTfN& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingTfN_CreateCopy(getHandle(ubEncoding))) - { } - - void setStepExactly0(uint64_t stepExactly0){ - Snpe_UserBufferEncodingTfN_SetStepExactly0(handle(), stepExactly0); - } - - void setQuantizedStepSize(const float quantizedStepSize){ - Snpe_UserBufferEncodingTfN_SetQuantizedStepSize(handle(), quantizedStepSize); - } - - uint64_t getStepExactly0() const{ - return Snpe_UserBufferEncodingTfN_GetStepExactly0(handle()); - } - - float getMin() const{ - return Snpe_UserBufferEncodingTfN_GetMin(handle()); - } - float getMax() const{ - return Snpe_UserBufferEncodingTfN_GetMax(handle()); - } - - float getQuantizedStepSize() const{ - return Snpe_UserBufferEncodingTfN_GetQuantizedStepSize(handle()); - } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingTfN_GetTypeFromWidth(width)); - } -}; - -class UserBufferEncodingIntN : public UserBufferEncoding{ -public: - - UserBufferEncodingIntN(uint8_t bWidth=32) - : UserBufferEncoding(Snpe_UserBufferEncodingIntN_Create(bWidth)) - { } - - UserBufferEncodingIntN(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingIntN_CreateCopy(getHandle(ubEncoding))) - { } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingIntN_GetTypeFromWidth(width)); - } -}; - - - -class UserBufferEncodingUintN : public UserBufferEncoding{ -public: - - UserBufferEncodingUintN(uint8_t bWidth=32) - : UserBufferEncoding(Snpe_UserBufferEncodingUintN_Create(bWidth)) - { } - - UserBufferEncodingUintN(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingUintN_CreateCopy(getHandle(ubEncoding))) - { } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingUintN_GetTypeFromWidth(width)); - } -}; - - -class UserBufferEncodingTf8 : public UserBufferEncodingTfN{ -public: - using UserBufferEncodingTfN::UserBufferEncodingTfN; - UserBufferEncodingTf8() = delete; - - template::value && std::is_floating_point::value, int>::type = 0> - UserBufferEncodingTf8(T stepFor0, U stepSize) - : UserBufferEncodingTfN(stepFor0, stepSize, 8) - { } - - UserBufferEncodingTf8(const UserBufferEncoding& ubEncoding) - : UserBufferEncodingTfN(ubEncoding) - { } - -}; - -class UserBufferEncodingBool : public UserBufferEncoding{ -public: - UserBufferEncodingBool(uint8_t bWidth=8) - : UserBufferEncoding(Snpe_UserBufferEncodingBool_Create(bWidth)) - { } - - UserBufferEncodingBool(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingBool_CreateCopy(getHandle(ubEncoding))) - { } -}; - -class IUserBuffer: public Wrapper { - friend BaseType; - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction{Snpe_IUserBuffer_Delete}; - -public: - const TensorShape& getStrides() const{ - return *makeReference(Snpe_IUserBuffer_GetStrides_Ref(handle())); - } - - size_t getSize() const{ - return Snpe_IUserBuffer_GetSize(handle()); - } - - size_t getOutputSize() const{ - return Snpe_IUserBuffer_GetOutputSize(handle()); - } - - bool setBufferAddress(void* buffer) noexcept{ - return Snpe_IUserBuffer_SetBufferAddress(handle(), buffer); - } - - const UserBufferEncoding& getEncoding() const noexcept{ - auto h = Snpe_IUserBuffer_GetEncoding_Ref(handle()); - switch(Snpe_UserBufferEncoding_GetElementType(h)){ - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32: - return *makeReference(h); - - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8: - return *makeReference(h); - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16: - return *makeReference(h); - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8: - return *makeReference(h); - - default: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN: - return *makeReference(h); - } - } - UserBufferEncoding& getEncoding() noexcept{ - auto h = Snpe_IUserBuffer_GetEncoding_Ref(handle()); - switch(Snpe_UserBufferEncoding_GetElementType(h)){ - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32: - return *makeReference(h); - - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8: - return *makeReference(h); - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8: - return *makeReference(h); - - default: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN: - return *makeReference(h); - } - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncoding) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferSource) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferSourceGLBuffer) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingUnsigned8Bit) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingFloatN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingFloat) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingTfN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingIntN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingUintN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingTf8) - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IUserBuffer) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IUserBufferFactory.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IUserBufferFactory.hpp deleted file mode 100644 index b3bbb087..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/IUserBufferFactory.hpp +++ /dev/null @@ -1,68 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - -#include "Wrapper.hpp" -#include "IUserBuffer.hpp" -#include "TensorShape.hpp" - - -#include "SNPE/SNPEUtil.h" - -namespace DlSystem{ - - -// NOTE: These factories use a different handle type because they are singletons -// Never copy this pattern unless you're also implementing a singleton -class IUserBufferFactory : public Wrapper{ - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - -public: - IUserBufferFactory() - : BaseType(nullptr) - { } - - std::unique_ptr createUserBuffer(void *buffer, - size_t bufSize, - const TensorShape &strides, - UserBufferEncoding* userBufferEncoding) noexcept{ - if(!userBufferEncoding) return {}; - auto handle = Snpe_Util_CreateUserBuffer(buffer, - bufSize, - getHandle(strides), - getHandle(userBufferEncoding)); - return makeUnique(handle); - } - - std::unique_ptr createUserBuffer(void *buffer, - size_t bufSize, - const TensorShape &strides, - UserBufferEncoding* userBufferEncoding, - UserBufferSource* userBufferSource) noexcept{ - if(!userBufferEncoding || !userBufferSource) return {}; - auto handle = Snpe_Util_CreateUserBufferFromSource(buffer, - bufSize, - getHandle(strides), - getHandle(*userBufferEncoding), - getHandle(*userBufferSource)); - return makeUnique(handle); - } - -}; - - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IUserBufferFactory) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.h deleted file mode 100644 index 15b2a089..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.h +++ /dev/null @@ -1,329 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_PLATFORMCONFIG_H -#define DL_SYSTEM_PLATFORMCONFIG_H - -#include "DlSystem/DlError.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * @brief . - * - * A structure OpenGL configuration - * - * @note When certain OpenGL context and display are provided to UserGLConfig for using - * GPU buffer as input directly, the user MUST ensure the particular OpenGL - * context and display remain vaild throughout the execution of neural network models. - */ -typedef void* Snpe_UserGLConfig_Handle_t; - -/** - * @brief . - * - * Creates a new userGLConfig - * - */ -SNPE_API -Snpe_UserGLConfig_Handle_t Snpe_UserGLConfig_Create(); - -/** - * @brief Destroys the userGLConfig - * - * @param[in] handle : Handle to access the userGLConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGLConfig_Delete(Snpe_UserGLConfig_Handle_t handle); - -/** - * @brief Sets the EGL context - * - * @param[in] handle : Handle to access userGLConfig - * - * @param[in] userGLContext : void pointer - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGLConfig_SetUserGLContext(Snpe_UserGLConfig_Handle_t handle, void* userGLContext); - -/** - * @brief Sets the EGL Display - * - * @param[in] handle : Handle to access userGLConfig - * - * @param[in] userGLDisplay : void pointer - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGLConfig_SetUserGLDisplay(Snpe_UserGLConfig_Handle_t handle, void* userGLDisplay); - - -/** - * @brief Get EGL context - * - * @param[in] handle : Handle to access userGLConfig - * - * @return userGLContext of type void pointer - * - */ -SNPE_API -void* Snpe_UserGLConfig_GetUserGLContext(Snpe_UserGLConfig_Handle_t handle); - -/** - * @brief Get EGL Display - * - * @param[in] handle : Handle to access userGLConfig - * - * @return userGLDisplay of type void pointer - * - */ -SNPE_API -void* Snpe_UserGLConfig_GetUserGLDisplay(Snpe_UserGLConfig_Handle_t handle); - - -/** - * @brief . - * - * A structure Gpu configuration - */ -typedef void* Snpe_UserGpuConfig_Handle_t; - -/** - * @brief . - * - * Creates a new userGpuConfig - * - */ -SNPE_API -Snpe_UserGpuConfig_Handle_t Snpe_UserGpuConfig_Create(); - -/** - * @brief Destroys the userGpuConfig - * - * @param[in] handle : Handle to access userGLConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGpuConfig_Delete(Snpe_UserGpuConfig_Handle_t handle); - -/** - * @brief Set the userGpuConfig - * - * @param[in] handle : Handle to access userGpuConfig - * - * @param[in] glHandle : Handle needed to access userGlConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -void Snpe_UserGpuConfig_Set(Snpe_UserGpuConfig_Handle_t handle, Snpe_UserGLConfig_Handle_t glHandle); - -/** - * @brief Get the userGpuConfig - * - * @param[in] handle : Handle to access userGpuConfig - * - * @return Handle needed to access userGlConfig - */ -SNPE_API -Snpe_UserGLConfig_Handle_t Snpe_UserGpuConfig_Get_Ref(Snpe_UserGpuConfig_Handle_t handle); - - - -/** - * A typedef to indicate a SNPE PlatformConfig handle - */ -typedef void* Snpe_PlatformConfig_Handle_t; - - -/** - * @brief . - * - * Creates a new PlatformConfig - * - */ -SNPE_API -Snpe_PlatformConfig_Handle_t Snpe_PlatformConfig_Create(); - - -/** - * @brief Copy-Construct a PlatformConfig from another PlatformConfig - * - * @param[in] otherHandle Handle to the other PlatformConfig - * - * @return Handle to the Copy-Constructed PlatformConfig - */ -SNPE_API -Snpe_PlatformConfig_Handle_t Snpe_PlatformConfig_CreateCopy(Snpe_PlatformConfig_Handle_t otherHandle); - -/** - * @brief Destroys the PlatformConfig - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PlatformConfig_Delete(Snpe_PlatformConfig_Handle_t handle); - - -typedef enum -{ - /// Unknown platform type. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_UNKNOWN = 0, - - /// Snapdragon CPU. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_CPU = 1, - - /// Adreno GPU. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_GPU = 2, - - /// Hexagon DSP. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_DSP = 3 -} Snpe_PlatformConfig_PlatformType_t; - - -/** - * @brief Retrieves the platform type - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return Platform type - */ -SNPE_API -Snpe_PlatformConfig_PlatformType_t Snpe_PlatformConfig_GetPlatformType(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Indicates whther the plaform configuration is valid. - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return 1 if the platform configuration is valid; 0 otherwise. - */ -SNPE_API -int Snpe_PlatformConfig_IsValid(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Retrieves the Gpu configuration - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return userGpuConfig populated with the Gpu configuration. - * - */ -SNPE_API -Snpe_UserGpuConfig_Handle_t Snpe_PlatformConfig_GetUserGpuConfig(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Sets the Gpu configuration - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @param[in] gpuHandle : Gpu Configuration handle - * - * @return 1 if Gpu configuration was successfully set; 0 otherwise. - */ -SNPE_API -int Snpe_PlatformConfig_SetUserGpuConfig(Snpe_PlatformConfig_Handle_t handle, Snpe_UserGpuConfig_Handle_t gpuHandle); - -/** - * @brief Sets the platform options - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @param[in] options : Options as a const char* in the form of "keyword:options" - * - * @return 1 if options are pass validation; otherwise 0. If false, the options are not updated. - */ -SNPE_API -int Snpe_PlatformConfig_SetPlatformOptions(Snpe_PlatformConfig_Handle_t handle, const char* options); - -/** - * @brief Indicates whther the plaform configuration is valid. - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return 1 if the platform configuration is valid; 0 otherwise. - */ -SNPE_API -int Snpe_PlatformConfig_IsOptionsValid(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Gets the platform options - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return Options as a const char* - */ -SNPE_API -const char* Snpe_PlatformConfig_GetPlatformOptions(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Sets the platform options - * - * @note the returned string will be invalidated by subsequent calls to this function - * - * @param[in] handle : Handle needed to access the platformConfig - * @param[in] optionName : Name of platform options" - * @param[in] value : Value of specified optionName - * - * @return If 1, add "optionName:value" to platform options if optionName don't exist, otherwise update the - * value of specified optionName. - * If 0, the platform options will not be changed. - */ -SNPE_API -int Snpe_PlatformConfig_SetPlatformOptionValue(Snpe_PlatformConfig_Handle_t handle, const char* optionName, const char* value); - -/** - * @brief Removes the platform options - * - * @param[in] handle : Handle needed to access the platformConfig - * @param[in] optionName : Name of platform options" - * @param[in] value : Value of specified optionName - * - * @return If 1, removed "optionName:value" to platform options if optionName don't exist, do nothing. - * If 0, the platform options will not be changed. - */ -SNPE_API -int Snpe_PlatformConfig_RemovePlatformOptionValue(Snpe_PlatformConfig_Handle_t handle, const char* optionName, const char* value); - -SNPE_API -void Snpe_PlatformConfig_SetIsUserGLBuffer(int isUserGLBuffer); - -SNPE_API -int Snpe_PlatformConfig_GetIsUserGLBuffer(); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_PLATFORMCONFIG_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.hpp deleted file mode 100644 index 5995c51b..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.hpp +++ /dev/null @@ -1,265 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include "DlSystem/PlatformConfig.h" - -namespace DlSystem { - -struct UserGLConfig -{ - /// Holds user EGL context. - /// - void* userGLContext = nullptr; - - /// Holds user EGL display. - void* userGLDisplay = nullptr; -}; - -struct UserGpuConfig{ - /// Holds user OpenGL configuration. - /// - UserGLConfig userGLConfig; -}; - -class PlatformConfig : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_PlatformConfig_Delete}; - - class UserGLConfigInternal : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserGLConfig_Delete}; - - public: - UserGLConfigInternal() - : BaseType(Snpe_UserGLConfig_Create()) - { } - UserGLConfigInternal(const UserGLConfig& uglc) - : UserGLConfigInternal() - { - setUserGLContext(uglc.userGLContext); - setUserGLDisplay(uglc.userGLDisplay); - } - void setUserGLContext(void* userGLContext){ - Snpe_UserGLConfig_SetUserGLContext(handle(), userGLContext); - } - void setUserGLDisplay(void* userGLDisplay){ - Snpe_UserGLConfig_SetUserGLDisplay(handle(), userGLDisplay); - } - - void* getUserGLContext(){ - return Snpe_UserGLConfig_GetUserGLContext(handle()); - } - void* getUserGLDisplay(){ - return Snpe_UserGLConfig_GetUserGLDisplay(handle()); - } - }; - - - - class UserGpuConfigInternal : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserGpuConfig_Delete}; - - public: - UserGpuConfigInternal() - : BaseType(Snpe_UserGpuConfig_Create()) - { } - - void set(const UserGLConfig& userGLConfig){ - UserGLConfigInternal uglc(userGLConfig); - Snpe_UserGpuConfig_Set(handle(), getHandle(uglc)); - } - - void get(UserGLConfig& uglc){ - UserGLConfigInternal uglci(moveHandle(Snpe_UserGpuConfig_Get_Ref(handle()), true)); - - uglc.userGLContext = uglci.getUserGLContext(); - uglc.userGLDisplay = uglci.getUserGLDisplay(); - } - - }; -public: - - /** - * @brief . - * - * An enum class of all supported platform types - */ - enum class PlatformType_t - { - /// Unknown platform type. - UNKNOWN = 0, - - /// Snapdragon CPU. - CPU = 1, - - /// Adreno GPU. - GPU = 2, - - /// Hexagon DSP. - DSP = 3 - }; - - /** - * @brief . - * - * A union class user platform configuration information - */ - struct PlatformConfigInfo - { - /// Holds user GPU Configuration. - /// - UserGpuConfig userGpuConfig; - - }; - - ~PlatformConfig() = default; - - PlatformConfig() - : BaseType(Snpe_PlatformConfig_Create()) - { } - - PlatformConfig(const PlatformConfig& other) - : BaseType(Snpe_PlatformConfig_CreateCopy(other.handle())) - { } - - /** - * @brief Retrieves the platform type - * - * @return Platform type - */ - PlatformType_t getPlatformType() const{ - return static_cast(Snpe_PlatformConfig_GetPlatformType(handle())); - }; - - /** - * @brief Indicates whther the plaform configuration is valid. - * - * @return True if the platform configuration is valid; false otherwise. - */ - bool isValid() const{ - return Snpe_PlatformConfig_IsValid(handle()); - }; - - /** - * @brief Retrieves the Gpu configuration - * - * @param[out] userGpuConfig The passed in userGpuConfig populated with the Gpu configuration on return. - * - * @return True if Gpu configuration was retrieved; false otherwise. - */ - bool getUserGpuConfig(UserGpuConfig& userGpuConfig) const{ - auto platformType = static_cast(Snpe_PlatformConfig_GetPlatformType(handle())); - if(platformType != PlatformType_t::GPU) return false; - - UserGpuConfigInternal gpuConf(moveHandle(Snpe_PlatformConfig_GetUserGpuConfig(handle()))); - - gpuConf.get(userGpuConfig.userGLConfig); - return true; - } - - /** - * @brief Sets the Gpu configuration - * - * @param[in] userGpuConfig Gpu Configuration - * - * @return True if Gpu configuration was successfully set; false otherwise. - */ - bool setUserGpuConfig(UserGpuConfig& userGpuConfig){ - UserGpuConfigInternal gpuConf; - gpuConf.set(userGpuConfig.userGLConfig); - return Snpe_PlatformConfig_SetUserGpuConfig(handle(), getHandle(gpuConf)); - } - - /** - * @brief Sets the platform options - * - * @param[in] options Options as a string in the form of "keyword:options" - * - * @return True if options are pass validation; otherwise false. If false, the options are not updated. - */ - bool setPlatformOptions(const std::string& options){ - return Snpe_PlatformConfig_SetPlatformOptions(handle(), options.c_str()); - } - - /** - * @brief Indicates whther the plaform configuration is valid. - * - * @return True if the platform configuration is valid; false otherwise. - */ - bool isOptionsValid() const{ - return Snpe_PlatformConfig_IsOptionsValid(handle()); - } - - /** - * @brief Gets the platform options - * - * @return Options as a string - */ - std::string getPlatformOptions() const { - return Snpe_PlatformConfig_GetPlatformOptions(handle()); - } - - /** - * @brief Sets the platform options - * - * @param[in] optionName Name of platform options" - * @param[in] value Value of specified optionName - * - * @return If true, add "optionName:value" to platform options if optionName don't exist, otherwise update the - * value of specified optionName. - * If false, the platform options will not be changed. - */ - bool setPlatformOptionValue(const std::string& optionName, const std::string& value){ - return Snpe_PlatformConfig_SetPlatformOptionValue(handle(), optionName.c_str(), value.c_str()); - } - - /** - * @brief Removes the platform options - * - * @param[in] optionName Name of platform options" - * @param[in] value Value of specified optionName - * - * @return If true, removed "optionName:value" to platform options if optionName don't exist, do nothing. - * If false, the platform options will not be changed. - */ - bool removePlatformOptionValue(const std::string& optionName, const std::string& value){ - return Snpe_PlatformConfig_RemovePlatformOptionValue(handle(), optionName.c_str(), value.c_str()); - } - - static void SetIsUserGLBuffer(bool isUserGLBuffer){ - Snpe_PlatformConfig_SetIsUserGLBuffer(isUserGLBuffer); - } - static bool GetIsUserGLBuffer(){ - return Snpe_PlatformConfig_GetIsUserGLBuffer(); - } - -}; - - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserGLConfig) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserGpuConfig) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, PlatformConfig) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/RuntimeList.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/RuntimeList.h deleted file mode 100644 index 2b699a7a..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/RuntimeList.h +++ /dev/null @@ -1,203 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_RUNTIME_LIST_H -#define DL_SYSTEM_RUNTIME_LIST_H - -#include - -#include "DlSystem/DlEnums.h" -#include "DlSystem/DlError.h" - -#include "StringList.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE RuntimeList handle - */ -typedef void* Snpe_RuntimeList_Handle_t; - -/** - * @brief . - * - * Creates a new runtime list - * - */ -SNPE_API -Snpe_RuntimeList_Handle_t Snpe_RuntimeList_Create(); - - -/** - * Copy-Constructs a RuntimeList and returns a handle to it - * - * @param runtimeListHandle the other RuntimeList to copy - * - * @return the handle to the created RuntimeList - */ -SNPE_API -Snpe_RuntimeList_Handle_t Snpe_RuntimeList_CreateCopy(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief Destroys the RuntimeList - * - * @param[in] runtimeListHandle : Handle needed to access the runtimeList - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Delete(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param src Source RuntimeList handle - * - * @param dst Destination RuntimeList handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Assign(Snpe_RuntimeList_Handle_t src, Snpe_RuntimeList_Handle_t dst); - -/** - * @brief Returns the Runtime from list at position index - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @param[in] index : position in runtimeList - * - * @return The Runtime from list at position index - */ -SNPE_API -Snpe_Runtime_t Snpe_RuntimeList_GetRuntime(Snpe_RuntimeList_Handle_t runtimeListHandle, int index); - -/** - * @brief Set the Runtime of the list at position index - * - * @param[in] runtimeListHandle : Handle needed to access the runtimeList - * - * @param[in] index : position in runtimeList - * - * @param[in] runtime : The Runtime to assign to position index - * - * @return SNPE_SUCCESS on success - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_SetRuntime(Snpe_RuntimeList_Handle_t runtimeListHandle, size_t index, Snpe_Runtime_t runtime); - -/** - * @brief Adds runtime to the end of the runtime list - * order of precedence is former followed by latter entry - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @param[in] runtime to add - * - * @return Error code. Ruturns SNPE_SUCCESS If the runtime added successfully - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Add(Snpe_RuntimeList_Handle_t runtimeListHandle, Snpe_Runtime_t runtime); - -/** - * @brief Removes the runtime from the list - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @param[in] runtime to be removed - * - * @return Error code. Ruturns SNPE_SUCCESS If the runtime removed successfully - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Remove(Snpe_RuntimeList_Handle_t runtimeListHandle, Snpe_Runtime_t runtime) ; - -/** - * @brief Returns the number of runtimes in the list - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @return number of entries in the runtimeList. - */ -SNPE_API -size_t Snpe_RuntimeList_Size(Snpe_RuntimeList_Handle_t runtimeListHandle) ; - -/** - * @brief Returns 1 if the list is empty - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @return 1 if list empty, 0 otherwise. - */ -SNPE_API -int Snpe_RuntimeList_Empty(Snpe_RuntimeList_Handle_t runtimeListHandle) ; - -/** - * @brief . - * - * Removes all runtime from the list - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @return Error code. Returns SNPE_SUCCESS if runtime list is cleared successfully. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Clear(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief Get a StringList of names from the runtime list in order of precedence - * - * @param runtimeListHandle Handle to a RuntimeList - * - * @return Handle to a StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_RuntimeList_GetRuntimeListNames(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief . - * - * @param[in] runtime const char* - * Returns a Runtime enum corresponding to the in param string - * - */ -SNPE_API -Snpe_Runtime_t Snpe_RuntimeList_StringToRuntime(const char* str); - -/** - * @brief . - * - * @param[in] runtime - * Returns a const char* corresponding to the in param runtime enum - * - */ -SNPE_API -const char* Snpe_RuntimeList_RuntimeToString(Snpe_Runtime_t runtime); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_RUNTIME_LIST_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/RuntimeList.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/RuntimeList.hpp deleted file mode 100644 index a2abf2b7..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/RuntimeList.hpp +++ /dev/null @@ -1,115 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "StringList.hpp" -#include "DlEnums.hpp" -#include "DlSystem/RuntimeList.h" - - - - - - -namespace DlSystem { - -class RuntimeList : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_RuntimeList_Delete}; - - static Runtime_t GetRuntime(HandleType handle, size_t idx){ - return static_cast(Snpe_RuntimeList_GetRuntime(handle, int(idx))); - } - static Snpe_ErrorCode_t SetRuntime(HandleType handle, size_t idx, Runtime_t runtime){ - return Snpe_RuntimeList_SetRuntime(handle, idx, static_cast(runtime)); - } - -private: - using RuntimeReference = WrapperDetail::MemberIndexedReference; - friend RuntimeReference; - -public: - - RuntimeList() - : BaseType(Snpe_RuntimeList_Create()) - { } - RuntimeList(const RuntimeList& other) - : BaseType(Snpe_RuntimeList_CreateCopy(other.handle())) - { } - RuntimeList(RuntimeList&& other) noexcept - : BaseType(std::move(other)) - { } - - RuntimeList(const Runtime_t& runtime) - : BaseType(Snpe_RuntimeList_Create()) - { - Snpe_RuntimeList_Add(handle(), static_cast(runtime)); - } - - RuntimeList& operator=(const RuntimeList& other){ - if(this != &other){ - Snpe_RuntimeList_Assign(other.handle(), handle()); - } - return *this; - } - - RuntimeList& operator=(RuntimeList&& other) noexcept{ - return moveAssign(std::move(other)); - } - - Runtime_t operator[](size_t idx) const{ - return GetRuntime(handle(), idx); - } - - RuntimeReference operator[](size_t idx) noexcept{ - return {*this, idx}; - } - - bool add(const Runtime_t& runtime){ - return SNPE_SUCCESS == Snpe_RuntimeList_Add(handle(), static_cast(runtime)); - } - - void remove(Runtime_t runtime) noexcept{ - Snpe_RuntimeList_Remove(handle(), static_cast(runtime)); - } - - size_t size() const noexcept{ - return Snpe_RuntimeList_Size(handle()); - } - - bool empty() const noexcept{ - return Snpe_RuntimeList_Empty(handle()); - } - - void clear() noexcept{ - Snpe_RuntimeList_Clear(handle()); - } - - StringList getRuntimeListNames() const{ - return moveHandle(Snpe_RuntimeList_GetRuntimeListNames(handle())); - } - - static Runtime_t stringToRuntime(const char* runtimeStr){ - return static_cast(Snpe_RuntimeList_StringToRuntime(runtimeStr)); - } - static const char* runtimeToString(Runtime_t runtime){ - return Snpe_RuntimeList_RuntimeToString(static_cast(runtime)); - } - -}; - - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, RuntimeList) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/SnpeApiExportDefine.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/SnpeApiExportDefine.h deleted file mode 100644 index 62c6718f..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/SnpeApiExportDefine.h +++ /dev/null @@ -1,34 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -// Macro controlling visibility of SNPE API - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef SNPE_API -#define SNPE_API -#endif - -#ifdef __cplusplus -} // extern "C" -#endif diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/String.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/String.hpp deleted file mode 100644 index 85b2ef22..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/String.hpp +++ /dev/null @@ -1,70 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - - -#include - - -#include "Wrapper.hpp" - -namespace DlSystem{ - - -// Just a backwards compatible wrapper for std::string -class String{ -public: - String() = delete; - explicit String(const std::string& str) - : m_String(str) - { } - explicit String(std::string&& str) noexcept - : m_String(std::move(str)) - { } - - explicit String(const char* str) - : m_String(str) - { } - - String(String&& other) noexcept = default; - String(const String& other) = delete; - - - String& operator=(String&& other) noexcept = default; - String& operator=(const String& other) = delete; - - bool operator<(const String& rhs) const noexcept{ return m_String < rhs.m_String; } - bool operator>(const String& rhs) const noexcept{ return m_String > rhs.m_String; } - bool operator<=(const String& rhs) const noexcept{ return m_String <= rhs.m_String; } - bool operator>=(const String& rhs) const noexcept{ return m_String >= rhs.m_String; } - bool operator==(const String& rhs) const noexcept{ return m_String == rhs.m_String; } - bool operator!=(const String& rhs) const noexcept{ return m_String != rhs.m_String; } - - - bool operator<(const std::string& rhs) const noexcept{ return m_String < rhs; } - bool operator>(const std::string& rhs) const noexcept{ return m_String > rhs; } - bool operator<=(const std::string& rhs) const noexcept{ return m_String <= rhs; } - bool operator>=(const std::string& rhs) const noexcept{ return m_String >= rhs; } - bool operator==(const std::string& rhs) const noexcept{ return m_String == rhs; } - bool operator!=(const std::string& rhs) const noexcept{ return m_String != rhs; } - - - const char* c_str() const noexcept{ return m_String.c_str(); } - - explicit operator std::string&() noexcept{ return m_String; } - explicit operator const std::string&() const noexcept{ return m_String; } - -private: - std::string m_String; -}; - - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, String) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/StringList.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/StringList.h deleted file mode 100644 index faa793b3..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/StringList.h +++ /dev/null @@ -1,154 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_STRING_LIST_H -#define DL_SYSTEM_STRING_LIST_H - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate a SNPE StringList handle - */ -typedef void* Snpe_StringList_Handle_t; - -/** - * Constructs a StringList and returns a handle to it - * - * @return the handle to the created StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_StringList_Create(); - -/** - * Constructs a StringList and returns a handle to it - * - * @param[in] size : size of list - * - * @return the handle to the created StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_StringList_CreateSize(size_t size); - -/** - * Constructs a StringList and returns a handle to it - * - * @param[in] other : StringList handle to be copied from - * - * @return the handle to the created StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_StringList_CreateCopy(Snpe_StringList_Handle_t other); - -/** - * Destroys/frees a StringList - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_StringList_Delete(Snpe_StringList_Handle_t stringListHandle); - - -/** - * Append a string to the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * @param[in] str Null-terminated ASCII string to append to the list. - * - * @return SNPE_SUCCESS if Append operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_StringList_Append(Snpe_StringList_Handle_t stringListHandle, const char* string); - -/** - * Returns the string at the indicated position, - * or an empty string if the positions is greater than the size - * of the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * @param[in] idx Position in the list of the desired string - * - * @return the string at the indicated position - */ -SNPE_API -const char* Snpe_StringList_At(Snpe_StringList_Handle_t stringListHandle, size_t idx); - -/** - * Pointer to the first string in the list. - * Can be used to iterate through the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return Pointer to the first string in the list. - */ -SNPE_API -const char** Snpe_StringList_Begin(Snpe_StringList_Handle_t stringListHandle); - -/** - * Pointer to one after the last string in the list. - * Can be used to iterate through the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return Pointer to one after the last string in the list - */ -SNPE_API -const char** Snpe_StringList_End(Snpe_StringList_Handle_t stringListHandle); - -/** - * Return the number of valid string pointers held by this list. - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return The size of the StringList - */ -SNPE_API -size_t Snpe_StringList_Size(Snpe_StringList_Handle_t stringListHandle); - -/** - * Copy-assigns the contents of src into dst - * - * @param src Source StringList handle - * @param dst Destination StringList handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_StringList_Assign(Snpe_StringList_Handle_t src, Snpe_StringList_Handle_t dst); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_STRING_LIST_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/StringList.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/StringList.hpp deleted file mode 100644 index 2fd84bf1..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/StringList.hpp +++ /dev/null @@ -1,73 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/DlError.hpp" - -#include "DlSystem/StringList.h" - - -namespace DlSystem { - -class StringList : public Wrapper{ - friend BaseType; - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction = Snpe_StringList_Delete; - -public: - StringList() - : BaseType(Snpe_StringList_Create()) - { } - explicit StringList(size_t length) - : BaseType(Snpe_StringList_CreateSize(length)) - { } - StringList(const StringList& other) - : BaseType(Snpe_StringList_CreateCopy(other.handle())) - { } - StringList(StringList&& other) noexcept - : BaseType(std::move(other)) - { } - - - StringList& operator=(const StringList& other){ - if(this != &other){ - Snpe_StringList_Assign(other.handle(), handle()); - } - return *this; - } - StringList& operator=(StringList&& other) noexcept{ - return moveAssign(std::move(other)); - } - - - DlSystem::ErrorCode append(const char* str){ - return static_cast(Snpe_StringList_Append(handle(), str)); - } - - const char* at(size_t idx) const noexcept{ - return Snpe_StringList_At(handle(), idx); - } - - const char** begin() const noexcept{ - return Snpe_StringList_Begin(handle()); - } - const char** end() const noexcept{ - return Snpe_StringList_End(handle()); - } - - size_t size() const noexcept{ - return Snpe_StringList_Size(handle()); - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, StringList) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorMap.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorMap.h deleted file mode 100644 index aa367eda..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorMap.h +++ /dev/null @@ -1,154 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_TENSORMAP_H -#define DL_SYSTEM_TENSORMAP_H - -#include "DlSystem/ITensor.h" -#include "DlSystem/StringList.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate a SNPE Tensor Map handle - */ -typedef void* Snpe_TensorMap_Handle_t; - - -/** - * Constructs a TensorMap and returns a handle to it - * - * @return the handle to the created TensorMap - */ -SNPE_API -Snpe_TensorMap_Handle_t Snpe_TensorMap_Create(); - - -/** - * Copy-Constructs a TensorMap and returns a handle to it - * - * @param tensorMapHandle the other TensorMap to copy - * - * @return the handle to the created TensorMap - */ -SNPE_API -Snpe_TensorMap_Handle_t Snpe_TensorMap_CreateCopy(Snpe_TensorMap_Handle_t tensorMapHandle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param src Source TensorMap handle - * - * @param dst Destination TensorMap handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorMap_Assign(Snpe_TensorMap_Handle_t srcHandle, Snpe_TensorMap_Handle_t dstHandle); - - -/** - * Destroys/frees Tensor Map - * - * @param[in] handle : handle to tensorMap - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorMap_Delete(Snpe_TensorMap_Handle_t handle); - -/** - * @brief Adds a name and the corresponding tensor pointer - * to the map - * - * @param[in] handle : Handle to tensorMap - * @param[in] name : The name of the tensor - * @param[in] tensorHandle : Handle to access ITensor - * - * @note If a tensor with the same name already exists, the - * tensor is replaced with the existing tensor. - */ -SNPE_API -void Snpe_TensorMap_Add(Snpe_TensorMap_Handle_t handle, const char *name, Snpe_ITensor_Handle_t tensorHandle); - -/** - * @brief Removes a mapping of tensor and its name by its name - * - * @param[in] handle : Handle to tensorMap - * @param[in] name : The name of tensor to be removed - * - * @note If no tensor with the specified name is found, nothing - * is done. - */ -SNPE_API -void Snpe_TensorMap_Remove(Snpe_TensorMap_Handle_t handle, const char *name); - -/** - * @brief Returns the number of tensors in the map - * - * @param[in] handle : Handle to tensorMap - * - * @return Number of tensors in the map - */ -SNPE_API -size_t Snpe_TensorMap_Size(Snpe_TensorMap_Handle_t handle); - -/** - * @brief . - * - * @param[in] handle : Handle to tensorMap - * Removes all tensors from the map - */ -SNPE_API -void Snpe_TensorMap_Clear(Snpe_TensorMap_Handle_t handle); - -/** - * @brief Returns the tensor given its name. - * - * @param[in] handle : Handle to tensorMap - * @param[in] name : The name of the tensor to get. - * - * @return nullptr if no tensor with the specified name is - * found; otherwise, a valid pointer to the tensor. - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_TensorMap_GetTensor_Ref(Snpe_TensorMap_Handle_t handle, const char *name); - -/** - * @brief . - * - * @param[in] handle : Handle to tensorMap - * - * @return A StringList of the names of all tensors - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_TensorMap_GetTensorNames(Snpe_TensorMap_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_TENSOR_MAP_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorMap.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorMap.hpp deleted file mode 100644 index 20a6c21f..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorMap.hpp +++ /dev/null @@ -1,81 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/ITensor.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/DlError.hpp" - -#include "DlSystem/TensorMap.h" - -namespace DlSystem { - -class TensorMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_TensorMap_Delete}; -public: - - TensorMap() - : BaseType(Snpe_TensorMap_Create()) - { } - - TensorMap(const TensorMap& other) - : BaseType(Snpe_TensorMap_CreateCopy(other.handle())) - { } - - TensorMap(TensorMap&& other) noexcept - : BaseType(std::move(other)) - { } - - TensorMap& operator=(const TensorMap& other){ - if(this != &other){ - Snpe_TensorMap_Assign(other.handle(), handle()); - } - return *this; - } - TensorMap& operator=(TensorMap&& other) noexcept{ - return moveAssign(std::move(other)); - } - - DlSystem::ErrorCode add(const char* name, ITensor* tensor){ - if(!tensor) return DlSystem::ErrorCode::SNPE_CAPI_BAD_ARGUMENT; - Snpe_TensorMap_Add(handle(), name, getHandle(*tensor)); - return DlSystem::ErrorCode::NONE; - } - - void remove(const char* name) noexcept{ - Snpe_TensorMap_Remove(handle(), name); - } - - size_t size() const noexcept{ - return Snpe_TensorMap_Size(handle()); - } - - void clear() noexcept{ - Snpe_TensorMap_Clear(handle()); - } - - - ITensor* getTensor(const char* name) const noexcept{ - return makeReference(Snpe_TensorMap_GetTensor_Ref(handle(), name)); - } - - StringList getTensorNames() const{ - return moveHandle(Snpe_TensorMap_GetTensorNames(handle())); - } - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, TensorMap) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShape.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShape.h deleted file mode 100644 index 1fde628c..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShape.h +++ /dev/null @@ -1,174 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_TENSOR_SHAPE_H -#define DL_SYSTEM_TENSOR_SHAPE_H - -#include - -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE TensorShape handle - */ -typedef void* Snpe_TensorShape_Handle_t; - - -/** - * @brief . - * - * Creates a new shape with a list of dims specified in array - * - * @param[in] dims The dimensions are specified in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] size Size of the array. - * - * @return the handle to the created TensorShape - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShape_CreateDimsSize(const size_t *dims, size_t size); - -/** - * Constructs a TensorShape and returns a handle to it - * - * @return the handle to the created TensorShape - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShape_Create(); - -/** - * @brief . - * - * copy constructor. - * @param[in] other object to copy. - * - * @return the handle to the created TensorShape. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShape_CreateCopy(Snpe_TensorShape_Handle_t other); - -/** - * Destroys/frees Tensor Shape - * - * @param[in] handle : handle to tensorShape - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShape_Delete(Snpe_TensorShape_Handle_t tensorShapeHandle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param srcHandle Source TensorShape handle - * @param dstHandle Destination TensorShape handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShape_Assign(Snpe_TensorShape_Handle_t srcHandle, Snpe_TensorShape_Handle_t dstHandle); - -/** - * @brief . - * - * Concatenates additional dimensions specified in - * the array to the existing dimensions. - * - * @param[in] handle : handle to tensorShape - * @param[in] dims The dimensions are specified in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] size Size of the array. - * - */ -SNPE_API -void Snpe_TensorShape_Concatenate(Snpe_TensorShape_Handle_t tensorShape, const size_t *dims, size_t size); - -/** - * @brief . - * - * @param[in] handle : handle to tensorShape - * - * Retrieves the rank i.e. number of dimensions. - * - * @return The rank - */ -SNPE_API -size_t Snpe_TensorShape_Rank(Snpe_TensorShape_Handle_t tensorShape); - -/** - * @brief . - * - * @param[in] handle : handle to tensorShape - * - * @param[in] index : Position in the dimension array. - * - * @return The dimension value in tensor shape - */ -SNPE_API -size_t Snpe_TensorShape_At(Snpe_TensorShape_Handle_t tensorShapeHandle, size_t index); - -/** - * @brief Set a value in a TensorShape at the provided index - * - * @param[in] handle : handle to tensorShape - * - * @param[in] index : Position in the dimension array. - * - * @param[in] value : Dimension value to set - * - * @return SNPE_SUCCESS on success - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShape_Set(Snpe_TensorShape_Handle_t tensorShapeHandle, size_t index, size_t value); - -/** - * @brief . - * - * Retrieves a pointer to the first dimension of shape - * - * @param[in] handle : handle to tensorShape - * - * @return nullptr if no dimension exists; otherwise, points to - * the first dimension. - * - */ -SNPE_API -const size_t* Snpe_TensorShape_GetDimensions(Snpe_TensorShape_Handle_t tensorShape); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_TENSOR_SHAPE_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShape.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShape.hpp deleted file mode 100644 index 776637c7..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShape.hpp +++ /dev/null @@ -1,104 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include - -#include "Wrapper.hpp" - -#include "DlSystem/TensorShape.h" - -namespace DlSystem { - - -using Dimension = size_t; - - - -class TensorShape : public Wrapper { - friend BaseType; - using BaseType::BaseType; - -protected: - static constexpr DeleteFunctionType DeleteFunction{Snpe_TensorShape_Delete}; - -private: - using DimensionReference = WrapperDetail::MemberIndexedReference; - friend DimensionReference; - -public: - - TensorShape() - : BaseType(Snpe_TensorShape_Create()) - { } - - TensorShape(const TensorShape& other) - : BaseType(Snpe_TensorShape_CreateCopy(other.handle())) - { } - - TensorShape(TensorShape&& other) noexcept - : BaseType(std::move(other)) - { } - - TensorShape(std::initializer_list dims) - : BaseType(Snpe_TensorShape_CreateDimsSize(dims.begin(), dims.size())) - { } - - TensorShape& operator=(const TensorShape& other) noexcept{ - if(this != &other){ - Snpe_TensorShape_Assign(other.handle(), handle()); - } - return *this; - } - - TensorShape& operator=(TensorShape&& other) noexcept{ - return moveAssign(std::move(other)); - } - - TensorShape(const size_t *dims, size_t size) - : BaseType(Snpe_TensorShape_CreateDimsSize(dims, size)) - { } - - TensorShape(const std::vector& dims) - : TensorShape(dims.data(), dims.size()) - { } - - - void concatenate(const size_t *dims, size_t size){ - Snpe_TensorShape_Concatenate(handle(), dims, size); - } - - void concatenate(const size_t &dim){ - return concatenate(&dim, 1); - } - - size_t operator[](size_t idx) const{ - return Snpe_TensorShape_At(handle(), idx); - } - - DimensionReference operator[](size_t idx){ - return {*this, idx}; - } - - size_t rank() const{ - return Snpe_TensorShape_Rank(handle()); - } - - const size_t* getDimensions() const{ - return Snpe_TensorShape_GetDimensions(handle()); - } - - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, Dimension) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, TensorShape) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.h deleted file mode 100644 index 520fa5ab..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.h +++ /dev/null @@ -1,163 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - - -/** - * @file - */ - -#ifndef _SNPE_TENSOR_SHAPE_MAP_H_ -#define _SNPE_TENSOR_SHAPE_MAP_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/TensorShape.h" -#include "DlSystem/StringList.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE TensorShapeMap handle - */ -typedef void* Snpe_TensorShapeMap_Handle_t; - -/** - * Constructs a TensorShapeMap and returns a handle to it - * - * @return the handle to the created TensorShapeMap - */ -SNPE_API -Snpe_TensorShapeMap_Handle_t Snpe_TensorShapeMap_Create(); - -/** - * @brief . - * - * copy constructor. - * - * @param[in] tsmHandle : Handle to the other object to copy. - * @return the handle to the created TensorShapeMap - */ -SNPE_API -Snpe_TensorShapeMap_Handle_t Snpe_TensorShapeMap_CreateCopy(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * Destroys/frees Tensor Shape Map - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Delete(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * @brief . - * - * assignment operator. Copy-assigns from srcHandle to dstHandle - * @param[in] srcHandle : handle to source Tensor Shape Map object - * @param[out] dstHandle : handle to destination Tensor Shape Map object - * - * @return Returns SNPE_SUCCESS if Assignment successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Assign(Snpe_TensorShapeMap_Handle_t srcHandle, Snpe_TensorShapeMap_Handle_t dstHandle); - -/** - * @brief Adds a name and the corresponding tensor pointer - * to the map - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @param[in] name The name of the tensor - * @param[in] tsHandle : Handle to access Tensor Shape - * - * @return Returns SNPE_SUCCESS if Add operation successful - * @note If a tensor with the same name already exists, no new - * tensor is added. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Add(Snpe_TensorShapeMap_Handle_t tsmHandle, const char* name, Snpe_TensorShape_Handle_t tsHandle); - -/** - * @brief Removes a mapping of tensor and its name by its name - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @param[in] name The name of tensor to be removed - * @return Returns SNPE_SUCCESS if Remove operation successful - * - * @note If no tensor with the specified name is found, nothing - * is done. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Remove(Snpe_TensorShapeMap_Handle_t tsmHandle, const char* name); - -/** - * @brief Returns the number of tensors in the map - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @return Returns number entries in TensorShapeMap - */ -SNPE_API -size_t Snpe_TensorShapeMap_Size(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * @brief . - * - * Removes all tensors from the map - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @return Returns SNPE_SUCCESS if Clear operation successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Clear(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * @brief Returns the tensor given its name. - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @param[in] name The name of the tensor to get. - * - * @return nullptr if no tensor with the specified name is - * found; otherwise, a valid Tensor Shape Handle. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShapeMap_GetTensorShape(Snpe_TensorShapeMap_Handle_t tsmHandle, const char* name); - -/** - * @brief . - * - * @param[in] tsmHandle : handle to access Tensor Shape Map - * @return A stringList Handle to access names of all tensor shapes - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_TensorShapeMap_GetTensorShapeNames(Snpe_TensorShapeMap_Handle_t tsmHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_TENSOR_SHAPE_MAP_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.hpp deleted file mode 100644 index 8b79a6e2..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.hpp +++ /dev/null @@ -1,77 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include "DlSystem/StringList.hpp" -#include "DlSystem/TensorShape.hpp" -#include "DlSystem/DlError.hpp" - -#include "DlSystem/TensorShapeMap.h" - -namespace DlSystem { - -class TensorShapeMap : public Wrapper { - friend BaseType; - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction{Snpe_TensorShapeMap_Delete}; - -public: - TensorShapeMap() - : BaseType(Snpe_TensorShapeMap_Create()) - { } - TensorShapeMap(const TensorShapeMap& other) - : BaseType(Snpe_TensorShapeMap_CreateCopy(other.handle())) - { } - TensorShapeMap(TensorShapeMap&& other) noexcept - : BaseType(std::move(other)) - { } - - TensorShapeMap& operator=(const TensorShapeMap& other){ - if(this != &other){ - Snpe_TensorShapeMap_Assign(other.handle(), handle()); - } - return *this; - } - TensorShapeMap& operator=(TensorShapeMap&& other) noexcept{ - return moveAssign(std::move(other)); - } - - DlSystem::ErrorCode add(const char *name, const TensorShape& tensorShape){ - return static_cast( - Snpe_TensorShapeMap_Add(handle(), name, getHandle(tensorShape)) - ); - } - - DlSystem::ErrorCode remove(const char* name) noexcept{ - return static_cast(Snpe_TensorShapeMap_Remove(handle(), name)); - } - - size_t size() const noexcept{ - return Snpe_TensorShapeMap_Size(handle()); - } - - DlSystem::ErrorCode clear() noexcept{ - return static_cast(Snpe_TensorShapeMap_Clear(handle())); - } - - TensorShape getTensorShape(const char* name) const noexcept{ - return moveHandle(Snpe_TensorShapeMap_GetTensorShape(handle(), name)); - } - - StringList getTensorShapeNames() const{ - return moveHandle(Snpe_TensorShapeMap_GetTensorShapeNames(handle())); - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, TensorShapeMap) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.h deleted file mode 100644 index 2da1c792..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.h +++ /dev/null @@ -1,151 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_USER_BUFFER_MAP_H -#define DL_SYSTEM_USER_BUFFER_MAP_H - -#include "DlSystem/StringList.h" -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE UserBufferMap handle - */ -typedef void* Snpe_UserBufferMap_Handle_t; - -/** - * @brief . - * - * Creates a new empty UserBuffer map - */ -SNPE_API -Snpe_UserBufferMap_Handle_t Snpe_UserBufferMap_Create(); - -/** - * copy constructor. - * @param[in] other : Handle to the other userBufferMap to be copied from. - */ -SNPE_API -Snpe_UserBufferMap_Handle_t Snpe_UserBufferMap_CreateCopy(Snpe_UserBufferMap_Handle_t other); - - -/** - * @brief Adds a name and the corresponding UserBuffer pointer - * to the map - * - * @param[in] handle : Handle to access UserBufferMap - * @param[in] name : The name of the UserBuffer - * @param[in] bufferHandle : Handle to access UserBuffer - * - * @note If a UserBuffer with the same name already exists, the new - * UserBuffer pointer would be updated. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Add(Snpe_UserBufferMap_Handle_t handle, const char *name, Snpe_IUserBuffer_Handle_t bufferHandle); - -/** - * @brief Removes a mapping of one UserBuffer and its name by its name - * - * @param[in] handle : Handle to access UserBufferMap - * - * @param[in] name : The name of UserBuffer to be removed - * - * @note If no UserBuffer with the specified name is found, nothing - * is done. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Remove(Snpe_UserBufferMap_Handle_t handle, const char *name); - -/** - * @brief Returns the number of UserBuffers in the map - * @param[in] handle : Handle to access UserBufferMap - */ -SNPE_API -size_t Snpe_UserBufferMap_Size(Snpe_UserBufferMap_Handle_t handle); - -/** - * @brief . - * - * @param[in] handle : Handle to access UserBufferMap - * Removes all UserBuffers from the map - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Clear(Snpe_UserBufferMap_Handle_t handle); - -/** - * @brief Returns the UserBuffer given its name. - * - * @param[in] handle : Handle to access UserBufferMap - * - * @param[in] name : The name of the UserBuffer to get. - * - * @return nullptr if no UserBuffer with the specified name is - * found; otherwise, a valid pointer to the UserBuffer. - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_UserBufferMap_GetUserBuffer_Ref(Snpe_UserBufferMap_Handle_t handle , const char *name); - -/** - * @brief . - * - * Returns the names of all UserBuffers - * - * @param[in] handle : Handle to access UserBufferMap - * - * @return A list of UserBuffer names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_UserBufferMap_GetUserBufferNames(Snpe_UserBufferMap_Handle_t handle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param src Source UserBufferMap handle - * @param dst Destination UserBufferMap handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Assign(Snpe_UserBufferMap_Handle_t srcHandle, Snpe_UserBufferMap_Handle_t dstHandle); - -/** - * Destroys/frees UserBuffer Map - * - * @param[in] handle : Handle to access UserBuffer Map - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Delete(Snpe_UserBufferMap_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_USER_BUFFER_MAP_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.hpp deleted file mode 100644 index acf3207c..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.hpp +++ /dev/null @@ -1,80 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - -#include "Wrapper.hpp" -#include "DlSystem/DlError.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/IUserBuffer.hpp" - -#include "DlSystem/UserBufferMap.h" - -namespace DlSystem { - -class UserBufferMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferMap_Delete}; - -public: - UserBufferMap() - : BaseType(Snpe_UserBufferMap_Create()) - { } - - UserBufferMap(const UserBufferMap& other) - : BaseType(Snpe_UserBufferMap_CreateCopy(other.handle())) - { } - UserBufferMap(UserBufferMap&& other) noexcept - : BaseType(std::move(other)) - { } - - UserBufferMap& operator=(const UserBufferMap& other){ - if(this != &other){ - Snpe_UserBufferMap_Assign(other.handle(), handle()); - } - return *this; - } - UserBufferMap& operator=(UserBufferMap&& other) noexcept{ - return moveAssign(std::move(other)); - } - - DlSystem::ErrorCode add(const char* name, IUserBuffer* buffer){ - if(!buffer) return ErrorCode::SNPE_CAPI_BAD_ARGUMENT; - return static_cast(Snpe_UserBufferMap_Add(handle(), name, getHandle(*buffer))); - } - - DlSystem::ErrorCode remove(const char* name) noexcept{ - return static_cast(Snpe_UserBufferMap_Remove(handle(), name)); - } - - size_t size() const noexcept{ - return Snpe_UserBufferMap_Size(handle()); - } - - DlSystem::ErrorCode clear() noexcept{ - return static_cast(Snpe_UserBufferMap_Clear(handle())); - } - - IUserBuffer* getUserBuffer(const char* name) const noexcept{ - return makeReference(Snpe_UserBufferMap_GetUserBuffer_Ref(handle(), name)); - } - - StringList getUserBufferNames() const{ - return moveHandle(Snpe_UserBufferMap_GetUserBufferNames(handle())); - } - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferMap) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.h deleted file mode 100644 index c927d33e..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.h +++ /dev/null @@ -1,156 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_USER_MEMORY_MAP_H -#define DL_SYSTEM_USER_MEMORY_MAP_H - -#include "DlSystem/StringList.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE User Memory handle - */ -typedef void* Snpe_UserMemoryMap_Handle_t; - -/** - * @brief . - * - * Creates a new empty UserMemory map - */ -SNPE_API -Snpe_UserMemoryMap_Handle_t Snpe_UserMemoryMap_Create(); - -/** - * copy constructor. - * @param[in] other : Handle to the other object to copy. - */ -SNPE_API -Snpe_UserMemoryMap_Handle_t Snpe_UserMemoryMap_Copy(Snpe_UserMemoryMap_Handle_t other); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param[in] srcHandle Source UserMemoryMap handle - * - * @param[out] dstHandle Destination UserMemoryMap handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Assign(Snpe_UserMemoryMap_Handle_t srcHandle, Snpe_UserMemoryMap_Handle_t dstHandle); - -/** - * Destroys/frees UserMemory Map - * - * @param[in] handle : Handle to access UserMemory Map - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Delete(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief Adds a name and the corresponding buffer address - * to the map - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of the UserMemory - * @param[in] address : The pointer to the Buffer Memory - * - * @note If a UserBuffer with the same name already exists, the new - * address would be updated. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Add(Snpe_UserMemoryMap_Handle_t handle, const char *name, void *address); - -/** - * @brief Removes a mapping of one Buffer address and its name by its name - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of Memory address to be removed - * - * @note If no UserBuffer with the specified name is found, nothing - * is done. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Remove(Snpe_UserMemoryMap_Handle_t handle, const char *name); - -/** - * @brief Returns the number of User Memory addresses in the map - * @param[in] handle : Handle to access UserMemory Map - */ -SNPE_API -size_t Snpe_UserMemoryMap_Size(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief . - * - * Removes all User Memory from the map - * @param[in] handle : Handle to access UserMemory Map - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Clear(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief . - * Returns the names of all User Memory - * - * @param[in] handle : Handle to access UserMemory Map - * - * @return Returns a handle to the stringList. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_UserMemoryMap_GetUserBufferNames(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief Returns the no of UserMemory addresses mapped to the buffer - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of the UserMemory - * - */ -SNPE_API -size_t Snpe_UserMemoryMap_GetUserMemoryAddressCount(Snpe_UserMemoryMap_Handle_t handle, const char *name); - -/** - * @brief Returns address at a specified index corresponding to a UserMemory buffer name - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of the buffer - * @param[in] index : The index in the list of addresses - * - */ -SNPE_API -void* Snpe_UserMemoryMap_GetUserMemoryAddressAtIndex(Snpe_UserMemoryMap_Handle_t handle, const char *name, uint32_t index); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_USER_MEMORY_MAP_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.hpp deleted file mode 100644 index 36e9cd37..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.hpp +++ /dev/null @@ -1,76 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/DlError.hpp" -#include "DlSystem/StringList.hpp" - -#include "DlSystem/UserMemoryMap.h" - -namespace DlSystem { - -class UserMemoryMap : public Wrapper { - friend BaseType; -// Use this to get free move Ctor and move assignment operator, provided this class does not specify -// as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserMemoryMap_Delete}; -public: - UserMemoryMap() - : BaseType(Snpe_UserMemoryMap_Create()) - { } - UserMemoryMap(const UserMemoryMap& other) - : BaseType(Snpe_UserMemoryMap_Copy(other.handle())) - { } - UserMemoryMap(UserMemoryMap&& other) noexcept - : BaseType(std::move(other)) - { } - - UserMemoryMap& operator=(const UserMemoryMap& other){ - if(this != &other){ - Snpe_UserMemoryMap_Assign(handle(), other.handle()); - } - return *this; - } - - DlSystem::ErrorCode add(const char* name, void* address) noexcept{ - return static_cast(Snpe_UserMemoryMap_Add(handle(), name, address)); - } - - DlSystem::ErrorCode remove(const char* name){ - return static_cast(Snpe_UserMemoryMap_Remove(handle(), name)); - } - - size_t size() const noexcept{ - return Snpe_UserMemoryMap_Size(handle()); - } - - DlSystem::ErrorCode clear() noexcept{ - return static_cast(Snpe_UserMemoryMap_Clear(handle())); - } - - StringList getUserBufferNames() const{ - return moveHandle(Snpe_UserMemoryMap_GetUserBufferNames(handle())); - } - - size_t getUserMemoryAddressCount(const char* name) const noexcept{ - return Snpe_UserMemoryMap_GetUserMemoryAddressCount(handle(), name); - } - - void* getUserMemoryAddressAtIndex(const char* name, uint32_t index) const noexcept{ - return Snpe_UserMemoryMap_GetUserMemoryAddressAtIndex(handle(), name, index); - } - -}; - - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserMemoryMap) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.h deleted file mode 100644 index 282ee547..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.h +++ /dev/null @@ -1,107 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _PLATFORM_VALIDATOR_H_ -#define _PLATFORM_VALIDATOR_H_ - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" -#include "DlSystem/DlEnums.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE PlatformValidator handle - */ -typedef void* Snpe_PlatformValidator_Handle_t; - -/** - * @brief . - * - * Creates a new Platform Validator - * - */ -SNPE_API -Snpe_PlatformValidator_Handle_t Snpe_PlatformValidator_Create(); - - -/** - * Destroys/frees Platform Validator - * - * @param[in] handle : Handle to access Platform Validator - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PlatformValidator_Delete(Snpe_PlatformValidator_Handle_t handle); - -/** - * @brief Sets the runtime processor for compatibility check - * - * @return Void - */ -SNPE_API -void Snpe_PlatformValidator_SetRuntime(Snpe_PlatformValidator_Handle_t handle, - Snpe_Runtime_t runtime, - bool unsignedPD=true); - -/** - * @brief Checks if the Runtime prerequisites for SNPE are available. - * - * @return 1 if the Runtime prerequisites are available, else 0. - */ -SNPE_API -int Snpe_PlatformValidator_IsRuntimeAvailable(Snpe_PlatformValidator_Handle_t handle, - bool unsignedPD=true); - -/** - * @brief Returns the core version for the Runtime selected. - * - * @return char* which contains the actual core version value - */ -SNPE_API -const char* Snpe_PlatformValidator_GetCoreVersion(Snpe_PlatformValidator_Handle_t handle); - -/** - * @brief Returns the library version for the Runtime selected. - * - * @return char* which contains the actual lib version value - */ -SNPE_API -const char* Snpe_PlatformValidator_GetLibVersion(Snpe_PlatformValidator_Handle_t handle); - -/** - * @brief Runs a small program on the runtime and Checks if SNPE is supported for Runtime. - * - * @return If 1, the device is ready for SNPE execution, else return 0. - */ -SNPE_API -int Snpe_PlatformValidator_RuntimeCheck(Snpe_PlatformValidator_Handle_t handle, - bool unsignedPD=true); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _PLATFORM_VALIDATOR_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.hpp deleted file mode 100644 index de52635c..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.hpp +++ /dev/null @@ -1,57 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - -#include "Wrapper.hpp" - -#include "DlSystem/DlEnums.hpp" - - -#include "PlatformValidator/PlatformValidator.h" - - -namespace SNPE { - -class PlatformValidator : public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_PlatformValidator_Delete}; - -public: - PlatformValidator() - : BaseType(Snpe_PlatformValidator_Create()) - { } - - void setRuntime(DlSystem::Runtime_t runtime, bool unsignedPD=true){ - Snpe_PlatformValidator_SetRuntime(handle(), static_cast(runtime), unsignedPD); - } - - bool isRuntimeAvailable(bool unsignedPD=true){ - return Snpe_PlatformValidator_IsRuntimeAvailable(handle(), unsignedPD); - } - - std::string getCoreVersion(){ - return Snpe_PlatformValidator_GetCoreVersion(handle()); - } - - std::string getLibVersion(){ - return Snpe_PlatformValidator_GetLibVersion(handle()); - } - - bool runtimeCheck(bool unsignedPD=true){ - return Snpe_PlatformValidator_RuntimeCheck(handle(), unsignedPD); - } - -}; - -} // ns SNPE - -ALIAS_IN_ZDL_NAMESPACE(SNPE, PlatformValidator) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.h deleted file mode 100644 index 8a2bb7d2..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.h +++ /dev/null @@ -1,85 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef _SNPE_APPLICATION_BUFFER_MAP_H_ -#define _SNPE_APPLICATION_BUFFER_MAP_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" -#include "DlSystem/StringList.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -typedef void* Snpe_ApplicationBufferMap_Handle_t; - -SNPE_API -Snpe_ApplicationBufferMap_Handle_t Snpe_ApplicationBufferMap_Create(); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Delete(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Add(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name, - const uint8_t* buff, - size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_AddFloat(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name, - const float* buff, - size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Remove(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name); - -SNPE_API -size_t Snpe_ApplicationBufferMap_Size(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Clear(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - - -SNPE_API -Snpe_StringList_Handle_t Snpe_ApplicationBufferMap_GetUserBufferNames(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_GetUserBuffer(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name, - size_t* size, - const uint8_t** data); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_APPLICATION_BUFFER_MAP_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.hpp deleted file mode 100644 index 6ad745bb..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.hpp +++ /dev/null @@ -1,90 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include -#include - -#include "Wrapper.hpp" -#include "DlSystem/StringList.hpp" - -#include "SNPE/ApplicationBufferMap.h" - -namespace PSNPE { - -class ApplicationBufferMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_ApplicationBufferMap_Delete}; -public: - ApplicationBufferMap() - : BaseType(Snpe_ApplicationBufferMap_Create()){} - - explicit ApplicationBufferMap(const std::unordered_map> &buffer) - : ApplicationBufferMap(){ - for(const auto &kv: buffer){ - add(kv.first.c_str(), kv.second); - } - } - - void add(const char *name, const std::vector &buff){ - Snpe_ApplicationBufferMap_Add(handle(), name, buff.data(), buff.size()); - } - - void add(const char *name, const std::vector &buff){ - Snpe_ApplicationBufferMap_Add(handle(), name, reinterpret_cast(buff.data()), buff.size()*sizeof(float)); - } - - void remove(const char *name) noexcept{ - Snpe_ApplicationBufferMap_Remove(handle(), name); - } - - size_t size() const noexcept{ - return Snpe_ApplicationBufferMap_Size(handle()); - } - - void clear() noexcept{ - Snpe_ApplicationBufferMap_Clear(handle()); - } - - std::vector getUserBuffer(const char *name) const{ - size_t size{}; - const uint8_t *data{}; - Snpe_ApplicationBufferMap_GetUserBuffer(handle(), name, &size, &data); - - return std::vector(data, data + size); - } - - std::vector operator[](const char *name) const{ - return getUserBuffer(name); - } - - DlSystem::StringList getUserBufferNames() const{ - return moveHandle(Snpe_ApplicationBufferMap_GetUserBufferNames(handle())); - } - - std::unordered_map> getUserBuffer() const{ - std::unordered_map> toret; - for(auto name: getUserBufferNames()){ - toret.emplace(name, getUserBuffer(name)); - } - - return toret; - } - -}; - -} // ns PSNPE - - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, ApplicationBufferMap) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/PSNPE.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/PSNPE.h deleted file mode 100644 index 2358d535..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/PSNPE.h +++ /dev/null @@ -1,898 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022,2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _SNPE_PSNPE_H_ -#define _SNPE_PSNPE_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlContainer/DlContainer.h" -#include "SNPE/ApplicationBufferMap.h" -#include "SNPE/RuntimeConfigList.h" -#include "SNPE/UserBufferList.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/IBufferAttributes.h" - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/UserMemoryMap.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate the callback PSNPE handle of Async Output mode - */ -typedef void* Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t; - -//SNPE_API -//Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t Snpe_PSNPE_OutputAsyncCallbackParam_Create(size_t index, -// int status, -// const char* errorMsg); -// -//SNPE_API -//Snpe_ErrorCode_t Snpe_PSNPE_OutputAsyncCallbackParam_Delete(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -// NOTE: we don't need _{Create,Delete} functions because the user does not create or delete these handles -// They're passed in to the callback functions they created - -/** - * @brief Get the data index of an output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return The data idx for output async mode - */ -SNPE_API -size_t Snpe_PSNPE_OutputAsyncCallbackParam_GetDataIdx(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -/** - * @brief Execute an output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return True if executed successfully with outputAsync mode - */ -SNPE_API -int Snpe_PSNPE_OutputAsyncCallbackParam_GetExecuteStatus(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -/** - * @brief Get the error message during the execution of PSNPE output async mode - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return Error message - */ -SNPE_API -const char* Snpe_PSNPE_OutputAsyncCallbackParam_GetErrorMsg(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -/** - * @brief Get the ID of an output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return The id of an PSNPE object for output async mode - */ -SNPE_API -size_t Snpe_PSNPE_OutputAsyncCallbackParam_GetID(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - - - -/** - * A typedef to indicate the output callback of PSNPE handle of input-output async mode - */ -typedef void* Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t; - -/** - * @brief Get the data index of an input-output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return The data index for input-output async mode - */ -SNPE_API -size_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetDataIdx(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Execute an input-output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return True if executed successfully with input-output async mode - */ -SNPE_API -int Snpe_PSNPE_InputOutputAsyncCallbackParam_GetExecuteStatus(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the error message during the execution of PSNPE input-output async mode - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return error message - */ -SNPE_API -const char* Snpe_PSNPE_InputOutputAsyncCallbackParam_GetErrorMsg(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the names of output buffers to the network - * - * @param[in] ioacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return Handle of output buffer name list - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetUserBufferNames(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the output buffer map of PSNPE object for input-output async mode - * - * @param[in] ioacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return The reference handle of output ApplicationBufferMap - */ -SNPE_API -Snpe_ApplicationBufferMap_Handle_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetOutputMap_Ref(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the id of the output callback for input-output async mode - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return The id for output callback for input-output async mode - */ -SNPE_API -size_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetID(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * A typedef to indicate the input callback of PSNPE handle of input-output async mode - */ -typedef void* Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t; - -/** - * @brief Get the input list for input callback of input-output async mode - * - * @param[in] ioacpHandle Handle to access the object of input callback of input-output async mode - * - * @return List the inputs - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputs(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t ioiacpHandle); - -/** - * @brief Get the input names for input callback of input-output async mode - * - * @param[in] ioacpHandle Handle to access the object of input callback of input-output async mode - * - * @return List the names of input - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputNames(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t ioiacpHandle); - -/** - * @brief Get the id of the input callback for input-output async mode - * - * @param[in] oacpHandle Handle to access the object of input-output async mode - * - * @return The id of input callback for input-output async mode - */ -SNPE_API -size_t Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetID(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t ioiacpHandle); - -/** - * @brief A struct to indicate userbuffer data type in output callback of input-output async mode - */ -typedef struct{ - /// data for the one output - const uint8_t* data; - /// the data size of this output - size_t size; -} Snpe_UserBufferData_t; - -/** - * @brief Get the output data of the output callback for input-output async mode - * - * @param[in] oacpHandle Handle to access the object of output callback of input-output async mode - * - * @param[in] name The output name of output callback of input-output async mode - * - * @return The output data of output callback for input-output async mode - */ -SNPE_API -Snpe_UserBufferData_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetUserBuffer(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle, - const char* name); -/** - * A typedef to indicate build configuration - */ -typedef void* Snpe_BuildConfig_Handle_t; - -/** - * A typedef to indicate a PSNPE object - */ -typedef void* Snpe_PSNPE_Handle_t; - -/** - * A typedef to indicate if PSNPE object is built in serial or parallel, default = 0 - */ -typedef enum SNPE_API { - SNPE_PSNPE_BUILDMODE_SERIAL = 0, - SNPE_PSNPE_BUILDMODE_PARALLEL = 1 -} Snpe_PSNPE_BuildMode_t; - -/** - * A typedef to indicate if PSNPE objects are executed in sync mode or output async mode or input-output async mode, default = 0 - */ -typedef enum SNPE_API { - SNPE_PSNPE_INPUTOUTPUTTRANSMISSIONMODE_SYNC = 0, - SNPE_PSNPE_INPUTOUTPUTTRANSMISSIONMODE_OUTPUTASYNC = 1, - SNPE_PSNPE_INPUTOUTPUTTRANSMISSIONMODE_INPUTOUTPUTASYNC = 2 -} Snpe_PSNPE_InputOutputTransmissionMode_t; - -// BuildConfig -/** - * @brief Create the object of snpe build config - * - * @return the SNPE build handle - */ -SNPE_API -Snpe_BuildConfig_Handle_t Snpe_BuildConfig_Create(); - -/** - * @brief Release the object of snpe build config - * - * @param[in] buildConfigHandle Handle to access the object of snpe buid config - * - * @return The error of build config result - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_Delete(Snpe_BuildConfig_Handle_t buildConfigHandle); - -/** - * @brief Get the mode of build snpe object, serial or parallel - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The value of Snpe_PSNPE_BuildMode_t - */ -SNPE_API -Snpe_PSNPE_BuildMode_t Snpe_BuildConfig_GetBuildMode(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the mode of build snpe object, serial or parallel - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] buildMode A typedef of Snpe_PSNPE_BuildMode_t - * - * @return The result of setting mode - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetBuildMode(Snpe_BuildConfig_Handle_t bcHandle, Snpe_PSNPE_BuildMode_t buildMode); - -/** - * @brief Set the dlc model - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] dlcHandle A handle of snpe DLC container - * - * @return The result of setting dlc model - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetContainer(Snpe_BuildConfig_Handle_t bcHandle, Snpe_DlContainer_Handle_t dlcHandle); - -/** - * @brief Get dlc container in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of DLC container - */ -SNPE_API -Snpe_DlContainer_Handle_t Snpe_BuildConfig_GetContainer_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output buffer names in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] slHandle A handle of the output layer name list - * - * @return The result of setting output names - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputBufferNames(Snpe_BuildConfig_Handle_t bcHandle, Snpe_StringList_Handle_t slHandle); - -/** - * @brief Get output buffer names in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of output buffer name list. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_BuildConfig_GetOutputBufferNames_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output buffer names in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] slHandle List of tensor names to output. An empty list will result in producing output for the final output tensor of the model. The list will be copied - * - * @return The result of setting output tensors - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputTensors(Snpe_BuildConfig_Handle_t bcHandle, Snpe_StringList_Handle_t slHandle); - -/** - * @brief Get output tensors in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of output tensor list - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_BuildConfig_GetOutputTensors_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set runtime config list for snpe buildConfig - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] rclHandle Handle to access the object of runtime config list - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetRuntimeConfigList(Snpe_BuildConfig_Handle_t bcHandle, Snpe_RuntimeConfigList_Handle_t rclHandle); - -/** - * @brief Get runtime config list for snpe buildConfig - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of runtime config list - */ -SNPE_API -Snpe_RuntimeConfigList_Handle_t Snpe_BuildConfig_GetRuntimeConfigList_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Get input thread number of input data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The number of input thread - */ -SNPE_API -size_t Snpe_BuildConfig_GetInputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set input thread number of input data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] threadNumbers The number of input thread for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle, size_t threadNumbers); - -/** - * @brief Get output thread number of output data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The number of output thread - */ -SNPE_API -size_t Snpe_BuildConfig_GetOutputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output thread number of output data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] threadNumbers The number of output thread for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle, size_t threadNumbers); - -/** - * @brief Set output callback for output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] callbackFunc The ouutput callback function for output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputCallback(Snpe_BuildConfig_Handle_t bcHandle, - void (*callbackFunc)(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t)); -/** - * @brief Set the id of output callback function for output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] id The id of output callback function - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputCallbackID(Snpe_BuildConfig_Handle_t bcHandle, size_t id); - -/** - * @brief Set the inside output callback handle to NULL for output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_ClearOutputCallback(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output callback for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] callbackFunc The output callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputCallback(Snpe_BuildConfig_Handle_t bcHandle, - void (*callbackFunc)(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t)); - -/** - * @brief Set the id of output callback function for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] id The id of output callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputCallbackID(Snpe_BuildConfig_Handle_t bcHandle, size_t id); - -/** - * @brief Set the inside output callback handle to NULL for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_ClearInputOutputCallback(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set input callback for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] callbackFunc The input callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputInputCallback(Snpe_BuildConfig_Handle_t bcHandle, - Snpe_ApplicationBufferMap_Handle_t (*callbackFunc)( - Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t - ) - ); - -/** - * @brief Set the id of input callback function for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] id The id of input callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputInputCallbackID(Snpe_BuildConfig_Handle_t bcHandle, size_t id); - -/** - * @brief Set the inside input callback handle to NULL for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_ClearInputOutputInputCallback(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the input and output transmission mode including sync mode, output async mode and input-output async mode, defult is sync mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] iotMode The typedef of Snpe_PSNPE_InputOutputTransmissionMode_t - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputTransmissionMode(Snpe_BuildConfig_Handle_t bcHandle, - Snpe_PSNPE_InputOutputTransmissionMode_t iotMode); - -/** - * @brief Get the input and output transmission mode including sync mode, output async mode and input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The typedef of Snpe_PSNPE_InputOutputTransmissionMode_t - */ -SNPE_API -Snpe_PSNPE_InputOutputTransmissionMode_t Snpe_BuildConfig_GetInputOutputTransmissionMode(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the profiling level for PSNPE build config, default is SNPE_PROFILING_LEVEL_OFF - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] profilingLevel The typedef of Snpe_ProfilingLevel_t - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetProfilingLevel(Snpe_BuildConfig_Handle_t bcHandle, Snpe_ProfilingLevel_t profilingLevel); - -/** - * @brief Get the profiling level for PSNPE build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The typedef of Snpe_ProfilingLevel_t - */ -SNPE_API -Snpe_ProfilingLevel_t Snpe_BuildConfig_GetProfilingLevel(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief To be deprecated, set the encode value when you want to divide one image into 2 or 4 parts to run, default is 0 which means the input don't need dividing. - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] encode0 The uint64 value of encode0 - * - * @param[in] encode1 The uint64 value of encode1 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEncode(Snpe_BuildConfig_Handle_t bcHandle, uint64_t encode0, uint64_t encode1); - -/** - * @brief To be deprecated, set the encode0 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] encode0 The uint64 value of encode0 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEncode0(Snpe_BuildConfig_Handle_t bcHandle, uint64_t encode0); - -/** - * @brief To be deprecated, set the encode1 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] encode1 The uint64 value of encode1 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEncode1(Snpe_BuildConfig_Handle_t bcHandle, uint64_t encode1); - -/** - * @brief To be deprecated, get the encode0 and encode1 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The uint64 value of encode - */ -SNPE_API -uint64_t* Snpe_BuildConfig_GetEncode(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief To be deprecated, get the encode0 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The uint64 value of encode0 - */ -SNPE_API -uint64_t Snpe_BuildConfig_GetEncode0(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief To be deprecated, get the encode1 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The uint64 value of encode1 - */ -SNPE_API -uint64_t Snpe_BuildConfig_GetEncode1(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set true or false for enabling init cache for snpe build config, enabling init cache = 1 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] enableInitCache True for enabing init cache - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEnableInitCache(Snpe_BuildConfig_Handle_t bcHandle, int enableInitCache); - -/** - * @brief Get the satus of enabling init cache for snpe build config, enabling init cache = 1. - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] enableInitCache True for enabing init cache - * - * @return 1 or 0 for enabling init cache - */ -SNPE_API -int Snpe_BuildConfig_GetEnableInitCache(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Handle needed to access the platformConfig. - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] platformOptions Options as a const char* - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetPlatformOptions(Snpe_BuildConfig_Handle_t bcHandle, const char* platformOptions); - -/** - * @brief Get the optional platform features for snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return Options as a const char* - */ -SNPE_API -const char* Snpe_BuildConfig_GetPlatformOptions(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the path directory of output diag log you want to save - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] diaglogOutputDir The string directory - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetDiaglogOutputDir(Snpe_BuildConfig_Handle_t bcHandle, const char* diaglogOutputDir); - -/** - * @brief Get the path of output diag log - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The string directory - */ -SNPE_API -const char* Snpe_BuildConfig_GetDiaglogOutputDir(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Create the handle of PSNPE object - * - * @return The handle of PSNPE object - */ -SNPE_API -Snpe_PSNPE_Handle_t Snpe_PSNPE_Create(); - -/** - * @brief Release the handle of PSNPE object - * - * @param[in] psnpeHandle Handle to access the PSNPE object - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_Delete(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Build the instance of PSNPE object accorading of snpe build config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_Build(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Execute PSNPE object for sync mode. - * - * @param[in] psnpeHandle Handle to access the PSNPE object - * - * @param[in] inputBufferListHandle Handle to access the input user buffer list - * - * @param[in] outputBufferListHandle Handle to access the output user buffer list - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_Execute(Snpe_PSNPE_Handle_t psnpeHandle, - Snpe_UserBufferList_Handle_t inputBufferListHandle, - Snpe_UserBufferList_Handle_t outputBufferListHandle); - -/** - * @brief Execute PSNPE object for input-output async mode - * - * @param[in] psnpeHandle Handle to access the PSNPE object - * - * @param[in] inputMapHandle Handle to access the input buffer map - * - * @param[in] dataIndex The index of input data - * - * @param[in] isTF8buff If the input buffer is TF8 - * - * @param[in] isTF8Outputbuff If the output buffer is TF8 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_ExecuteInputOutputAsync(Snpe_PSNPE_Handle_t psnpeHandle, - Snpe_StringList_Handle_t inputMapHandle, - size_t dataIndex, - int isTF8buff, - int isTF8Outputbuff); - -/** - * @brief Get the input tensor names for PSNPE object. - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The string list of input tensor names - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_GetInputTensorNames(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the output tensor names for PSNPE object - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The string list of output tensor names - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_GetOutputTensorNames(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the input dimension shape for PSNPE object - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The tensor shape of input dimension - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_PSNPE_GetInputDimensions(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the input dimension shape for the specific input name for PSNPE object - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @param[in] name The name of input data - * - * @return The tensor shape of a specific input name - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_PSNPE_GetInputDimensions_Name(Snpe_PSNPE_Handle_t psnpeHandle, const char* name); - -/** - * @brief Get the number of elements in each dimension for input and output buffer - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @param[in] name The name of input and output buffer - * - * @return Dimension size - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_PSNPE_GetBufferAttributesDims(Snpe_PSNPE_Handle_t psnpeHandle, const char* name); - -/* To be deprecated, please use new api Snpe_PSNPE_RegisterUserMemoryMappedBuffers */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_RegisterIonBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_UserMemoryMap_Handle_t ionBufferMapHandle); - -/* To be deprecated, please use new api Snpe_PSNPE_DeregisterUserMemoryMappedBuffers */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_DeregisterIonBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_StringList_Handle_t ionBufferNames); - -/** - * @brief Register Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferMapHandle A UserMemoryMap of virtual addresses - * - * @note UserBuffer type passed for registration must match the data type of the tensor in the dlc - * For regular UserBuffers SNPE performs an online data conversion (quantization or - * dequantization etc). This is not possible for memory mapped buffers hence can lead to - * issues during execution or accuracy degradation - * - * @return SNPE_SUCCESS upon successful memory mapped buffer registration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_RegisterUserMemoryMappedBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_UserMemoryMap_Handle_t bufferMapHandle); - -/** - * @brief Deregister Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferNamesHandle A StringList of memory mapped buffer names - * - * @return SNPE_SUCCESS upon successful memory mapped buffer deregistration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_DeregisterUserMemoryMappedBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_StringList_Handle_t bufferNamesHandle); - -/** - * @brief Get the error message during the failed execution - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The error message - */ -SNPE_API -const char* Snpe_PSNPE_GetLastErrorString(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the handle of IBufferAttributes - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @param[in] name The name of attribute buffer - * - * @return Handle to access IBufferAttributes - */ -SNPE_API -Snpe_IBufferAttributes_Handle_t Snpe_PSNPE_GetInputOutputBufferAttributes(Snpe_PSNPE_Handle_t psnpeHandle, const char *name); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_PSNPE_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/PSNPE.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/PSNPE.hpp deleted file mode 100644 index bd3af1ac..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/PSNPE.hpp +++ /dev/null @@ -1,537 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include -#include -#include - - -#include "Wrapper.hpp" - - -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/DlVersion.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/DlOptional.hpp" -#include "DlSystem/IBufferAttributes.hpp" -#include "DlSystem/UserMemoryMap.hpp" - -#include "SNPE/UserBufferList.hpp" -#include "SNPE/ApplicationBufferMap.hpp" -#include "SNPE/RuntimeConfigList.hpp" -#include "DlContainer/IDlContainer.hpp" - -#include "SNPE/RuntimeConfigList.hpp" - - -#include "SNPE/PSNPE.h" - -namespace PSNPE{ - -enum BuildMode { - SERIAL = 0, - PARALLEL = 1 -}; -/** - * @brief Input and output transmission mode - */ -enum InputOutputTransmissionMode { - sync = 0, - outputAsync = 1, - inputOutputAsync = 2 -}; - - -struct OutputAsyncCallbackParam : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - - - template - using DataIndexReference = WrapperDetail::GenericConstMemberReference - ; - - - template - using ExecuteStatusReference = WrapperDetail::GenericConstMemberReference - >; - - - static std::string ErrMsgGetter(Snpe_DlVersion_Handle_t handle){ - return Snpe_PSNPE_OutputAsyncCallbackParam_GetErrorMsg(handle); - } - template - using ErrorMsgReference = WrapperDetail::GenericConstMemberReference - ; - - template - using CallbackIDReference = WrapperDetail::GenericConstMemberReference - ; - - - - -public: - OutputAsyncCallbackParam() = delete; - OutputAsyncCallbackParam(OutputAsyncCallbackParam&& other) noexcept - : BaseType(std::move(other)) - { } - - DataIndexReference dataIndex{*this}; - ExecuteStatusReference executeStatus{*this}; - ErrorMsgReference errorMsg{*this}; - - CallbackIDReference callbackID{*this}; -}; - - - -struct InputOutputInputAsyncCallbackParam : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - - - static std::vector GetInputs(HandleType handle){ - DlSystem::StringList inputs(moveHandle(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputs(handle))); - - return std::vector(inputs.begin(), inputs.end()); - } - - template - using InputsReference = WrapperDetail::GenericConstMemberReference - ; - - - static DlSystem::StringList GetInputNames(HandleType handle){ - return moveHandle(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputNames(handle)); - } - template - using InputNamesReference = WrapperDetail::GenericConstMemberReference - ; - - template - using CallbackIDReference = WrapperDetail::GenericConstMemberReference - ; - - -public: - InputOutputInputAsyncCallbackParam() = delete; - InputOutputInputAsyncCallbackParam(InputOutputInputAsyncCallbackParam&& other) noexcept - : BaseType(std::move(other)) - { } - - InputsReference> inputs{*this}; - InputNamesReference inputNames{*this}; - CallbackIDReference callbackID{*this}; - -}; - - - - - -struct InputOutputAsyncCallbackParam : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - - template - using DataIndexReference = WrapperDetail::GenericConstMemberReference - ; - - static bool GetExecuteStatus(HandleType handle){ - return Snpe_PSNPE_InputOutputAsyncCallbackParam_GetExecuteStatus(handle); - } - template - using ExecuteStatusReference = WrapperDetail::GenericConstMemberReference - ; - - static std::string ErrMsgGetter(Snpe_DlVersion_Handle_t handle){ - return Snpe_PSNPE_OutputAsyncCallbackParam_GetErrorMsg(handle); - } - template - using ErrorMsgReference = WrapperDetail::GenericConstMemberReference - ; - - - - // This should work - static ApplicationBufferMap GetOutputMap(HandleType handle){ - return moveHandle(Snpe_PSNPE_InputOutputAsyncCallbackParam_GetOutputMap_Ref(handle), true); - } - - template - using OutputMapReference = WrapperDetail::GenericConstMemberReference - ; - - template - using CallbackIDReference = WrapperDetail::GenericConstMemberReference - ; - -public: - - InputOutputAsyncCallbackParam(InputOutputAsyncCallbackParam&& other) noexcept - : BaseType(std::move(other)) - { } - - DataIndexReference dataIndex{*this}; - OutputMapReference outputMap{*this}; /// OOOH, this will be super tricky to not have a copy every time - ExecuteStatusReference executeStatus{*this}; - ErrorMsgReference errorMsg{*this}; - CallbackIDReference callbackID{*this}; -}; - -/** - * @brief This callback is called when the output data is ready, only use for Output Async mode - */ -using OutputAsyncCallbackFunc = std::function; -/** - * @brief This callback is called when the output data is ready, only use for Output-Input Async mode - */ -using InputOutputAsyncCallbackFunc = std::function; -/** - * @brief This callback is called when the input data is ready,only use for Output-Input Async mode - */ -using InputOutputAsyncInputCallback = std::function(InputOutputInputAsyncCallbackParam)>; - - -struct BuildConfig final { - BuildMode buildMode = BuildMode::SERIAL; ///< Specify build in serial mode or parallel mode - zdl::DlContainer::IDlContainer* container;///< The opened container ptr - zdl::DlSystem::StringList outputBufferNames;///< Specify the output layer name - zdl::DlSystem::StringList outputTensors;///< Specify the output layer name - RuntimeConfigList runtimeConfigList;///< The runtime config list for PSNPE, @see RuntimeConfig - size_t inputThreadNumbers = 1;///< Specify the number of threads used in the execution phase to process input data, only used in inputOutputAsync mode - size_t outputThreadNumbers = 1;///< Specify the number of threads used in the execution phase to process output data, only used in inputOutputAsync and outputAsync mode - OutputAsyncCallbackFunc outputCallback;///< The callback to deal with output data ,only used in outputAsync mode - InputOutputAsyncCallbackFunc inputOutputCallback;///< The callback to deal with output data ,only used in inputOutputAsync mode - InputOutputAsyncInputCallback inputOutputInputCallback;///< The callback to deal with input data ,only used in inputOutputAsync mode - InputOutputTransmissionMode inputOutputTransmissionMode = InputOutputTransmissionMode::sync;///< Specify execution mode - zdl::DlSystem::ProfilingLevel_t profilingLevel = zdl::DlSystem::ProfilingLevel_t::OFF;///< Specify profiling level for Diaglog - uint64_t encode[2] = {0, 0}; - bool enableInitCache = false; - std::string platformOptions; - std::string diaglogOutputDir = "./diaglogs/"; ///< Specify a diaglog output directory to save the generated Diaglog files. - - size_t callbackID{}; -}; - - - - - -class PSNPE : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_PSNPE_Delete}; -// struct BuildConfigInternal : public Wrapper{ -// -// }; -public: - PSNPE() - : BaseType(Snpe_PSNPE_Create()) - { } - -private: - - template - static std::unordered_map& getCallbackMap(){ - static std::unordered_map toret; - return toret; - } - template - static std::mutex& getCallbackMapMutex(){ - static std::mutex mtx; - return mtx; - } - - static void outputCallbackTrampoline(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t paramHandle){ - OutputAsyncCallbackParam param(moveHandle(paramHandle)); - std::function callback; - { - std::lock_guard lk(getCallbackMapMutex()); - callback = getCallbackMap()[param.callbackID]; - } - callback(std::move(param)); - } - static void inputOutputCallbackTrampoline(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t paramHandle){ - InputOutputAsyncCallbackParam param(moveHandle(paramHandle)); - std::function callback; - { - std::lock_guard lk(getCallbackMapMutex()); - callback = getCallbackMap()[param.callbackID]; - } - callback(std::move(param)); - } - - static Snpe_ApplicationBufferMap_Handle_t inputOutputInputCallbackTrampoline( - Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t paramHandle - ){ - InputOutputInputAsyncCallbackParam param(moveHandle(paramHandle)); - - std::function(InputOutputInputAsyncCallbackParam)> callback; - { - std::lock_guard lk(getCallbackMapMutex()); - callback = getCallbackMap()[param.callbackID]; - } - auto abm = callback(std::move(param)); - return WrapperDetail::HandleReleaser::release(*abm); - } - - template - class CallbackIdManager{ - public: - ~CallbackIdManager(){ - clear(); - } - std::pair registerCallback(WrapperCallbackType func){ - size_t id = get(); - - std::lock_guard lk(getCallbackMapMutex()); - getCallbackMap()[id] = std::move(func); - return {id, CapiCallback}; - } - private: - size_t m_CallbackId{}; - - void clear(){ - if(m_CallbackId){ - std::lock_guard lk(getCallbackMapMutex()); - getCallbackMap().erase(m_CallbackId); - } - } - - size_t get(){ - static std::atomic id{0}; - clear(); - m_CallbackId = ++id; - return m_CallbackId; - } - - }; - CallbackIdManager outputCallbackIdManager; - - CallbackIdManager inputOutputCallbackIdManager; - - CallbackIdManager inputOutputInputCallbackIdManager; - - -public: - - - - bool build(BuildConfig& buildConfig) noexcept{ - // Copy the BuildConfig across the CAPI boundary - - Snpe_BuildConfig_Handle_t bcHandle = Snpe_BuildConfig_Create(); - - Snpe_BuildConfig_SetBuildMode(bcHandle, static_cast(buildConfig.buildMode)); - Snpe_BuildConfig_SetContainer(bcHandle, getHandle(buildConfig.container)); - Snpe_BuildConfig_SetOutputBufferNames(bcHandle, getHandle(buildConfig.outputBufferNames)); - Snpe_BuildConfig_SetOutputTensors(bcHandle, getHandle(buildConfig.outputTensors)); - Snpe_BuildConfig_SetRuntimeConfigList(bcHandle, getHandle(buildConfig.runtimeConfigList)); - - Snpe_BuildConfig_SetInputThreadNumbers(bcHandle, buildConfig.inputThreadNumbers); - Snpe_BuildConfig_SetOutputThreadNumbers(bcHandle, buildConfig.outputThreadNumbers); - - - if(buildConfig.outputCallback){ - auto id_callback = outputCallbackIdManager.registerCallback(buildConfig.outputCallback); - Snpe_BuildConfig_SetOutputCallbackID(bcHandle, id_callback.first); - Snpe_BuildConfig_SetOutputCallback(bcHandle, id_callback.second); - } - - if(buildConfig.inputOutputCallback){ - auto id_callback = inputOutputCallbackIdManager.registerCallback(buildConfig.inputOutputCallback); - Snpe_BuildConfig_SetInputOutputCallbackID(bcHandle, id_callback.first); - Snpe_BuildConfig_SetInputOutputCallback(bcHandle, id_callback.second); - } - - if(buildConfig.inputOutputInputCallback){ - auto id_callback = inputOutputInputCallbackIdManager.registerCallback(buildConfig.inputOutputInputCallback); - Snpe_BuildConfig_SetInputOutputInputCallbackID(bcHandle, id_callback.first); - Snpe_BuildConfig_SetInputOutputInputCallback(bcHandle, id_callback.second); - } - - - Snpe_BuildConfig_SetInputOutputTransmissionMode(bcHandle, - static_cast(buildConfig.inputOutputTransmissionMode)); - - Snpe_BuildConfig_SetProfilingLevel(bcHandle, static_cast(buildConfig.profilingLevel)); - Snpe_BuildConfig_SetEncode(bcHandle, buildConfig.encode[0], buildConfig.encode[1]); - Snpe_BuildConfig_SetEnableInitCache(bcHandle, buildConfig.enableInitCache); - Snpe_BuildConfig_SetPlatformOptions(bcHandle, buildConfig.platformOptions.c_str()); - Snpe_BuildConfig_SetDiaglogOutputDir(bcHandle, buildConfig.diaglogOutputDir.c_str()); - - - auto status = Snpe_PSNPE_Build(handle(), bcHandle); - Snpe_BuildConfig_Delete(bcHandle); - - - return status == SNPE_SUCCESS; - } - - /** - * @brief Execute snpe instances in Async Output mode and Sync mode - * - * @param[in] inputBufferList A list of user buffers that contains the input data - * - * @param[in,out] outputBufferList A list of user buffers that will hold the output data - * - */ - bool execute(UserBufferList& inputBufferList, UserBufferList& outputBufferList) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_Execute(handle(), getHandle(inputBufferList), getHandle(outputBufferList)); - } - - /** - * @brief Execute snpe instances in Async Input/Output mode - * - * @param[in]inputMap A map of input buffers that contains input data. The names of buffers - * need to be matched with names retrived through getInputTensorNames() - * - * @param dataIndex Index of the input data - * - * @param isTF8buff Whether prefer to using 8 bit quantized element for inference - * - * @return True if executed successfully; flase, otherwise. - */ - bool executeInputOutputAsync(const DlSystem::StringList& inputMap, size_t dataIndex, bool isTF8buff, bool isTF8Outputbuff) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_ExecuteInputOutputAsync(handle(), getHandle(inputMap), dataIndex, isTF8buff, isTF8Outputbuff); - } - bool executeInputOutputAsync(const std::vector& inputMap, size_t dataIndex, bool isTF8buff, bool isTF8Outputbuff) noexcept{ - DlSystem::StringList sl(inputMap.size()); - for(auto&& e : inputMap) sl.append(e.c_str()); - return executeInputOutputAsync(sl, dataIndex, isTF8buff, isTF8Outputbuff); - } - - bool executeInputOutputAsync(const DlSystem::StringList& inputMap, size_t dataIndex, bool isTF8buff) noexcept{ - return executeInputOutputAsync(inputMap, dataIndex, isTF8buff, isTF8buff); - } - bool executeInputOutputAsync(const std::vector& inputMap, size_t dataIndex, bool isTF8buff) noexcept{ - return executeInputOutputAsync(inputMap, dataIndex, isTF8buff, isTF8buff); - } - - - - /** - * @brief Returns the input layer names of the network. - * - * @return StringList which contains the input layer names - */ - const DlSystem::StringList getInputTensorNames() const noexcept{ - return moveHandle(Snpe_PSNPE_GetInputTensorNames(handle())); - } - - /** - * @brief Returns the output layer names of the network. - * - * @return StringList which contains the output layer names - */ - const DlSystem::StringList getOutputTensorNames() const noexcept{ - return moveHandle(Snpe_PSNPE_GetOutputTensorNames(handle())); - } - - /** - * @brief Returns the input tensor dimensions of the network. - * - * @return TensorShape which contains the dimensions. - */ - const DlSystem::TensorShape getInputDimensions() const noexcept{ - return moveHandle(Snpe_PSNPE_GetInputDimensions(handle())); - } - - const zdl::DlSystem::TensorShape getInputDimensions(const char *name) const noexcept{ - return moveHandle(Snpe_PSNPE_GetInputDimensions_Name(handle(), name)); - } - - /** - * @brief Returns attributes of buffers. - * - * @see zdl::SNPE - * - * @return BufferAttributes of input/output tensor named. - */ - zdl::DlSystem::TensorShape getBufferAttributesDims(const char *name) const noexcept{ - return moveHandle(Snpe_PSNPE_GetBufferAttributesDims(handle(), name)); - } - - DlSystem::Optional getInputOutputBufferAttributes(const char *name) const noexcept{ - return { - new DlSystem::IBufferAttributes(moveHandle(Snpe_PSNPE_GetInputOutputBufferAttributes(handle(), name))), - DlSystem::Optional::LIFECYCLE::POINTER_OWNED - }; - } - /* To be deprecated, please use new api registerMemoryMappedBuffers */ - bool registerIonBuffers(const DlSystem::UserMemoryMap& ionBufferMap) const noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_RegisterIonBuffers(handle(), getHandle(ionBufferMap)); - } - /* To be deprecated, please use new api deregisterMemoryMappedBuffers */ - bool deregisterIonBuffers(const DlSystem::StringList& ionBufferNames) const noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_DeregisterIonBuffers(handle(), getHandle(ionBufferNames)); - } - - bool registerMemoryMappedBuffers(const DlSystem::UserMemoryMap& memoryMappedBufferMap) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_RegisterUserMemoryMappedBuffers(handle(), getHandle(memoryMappedBufferMap)); - } - - bool deregisterMemoryMappedBuffers(const DlSystem::StringList& bufferNames) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_DeregisterUserMemoryMappedBuffers(handle(), getHandle(bufferNames)); - } - - const char* getLastErrorString(){ - return Snpe_PSNPE_GetLastErrorString(handle()); - } - -private: - PSNPE(const PSNPE&) = delete; - PSNPE& operator=(const PSNPE&) = delete; - -}; - -} // ns PSNPE - - - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, BuildMode) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputTransmissionMode) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, OutputAsyncCallbackParam) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputAsyncCallbackParam) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputInputAsyncCallbackParam) - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, OutputAsyncCallbackFunc) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputAsyncCallbackFunc) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputAsyncInputCallback) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, BuildConfig) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, PSNPE) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.h deleted file mode 100644 index 59295d59..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.h +++ /dev/null @@ -1,118 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef _SNPE_RUNTIME_CONFIG_LIST_H_ -#define _SNPE_RUNTIME_CONFIG_LIST_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/DlEnums.h" -#include "DlSystem/RuntimeList.h" -#include "DlSystem/TensorShapeMap.h" - - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void* Snpe_RuntimeConfig_Handle_t; - -SNPE_API -Snpe_RuntimeConfig_Handle_t Snpe_RuntimeConfig_Create(); - -SNPE_API -Snpe_RuntimeConfig_Handle_t Snpe_RuntimeConfig_CreateCopy(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_Delete(Snpe_RuntimeConfig_Handle_t rcHandle); - - -SNPE_API -Snpe_Runtime_t Snpe_RuntimeConfig_GetRuntime(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetRuntime(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_Runtime_t runtime); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetRuntimeList(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_RuntimeList_Handle_t rlHandle); - -SNPE_API -Snpe_RuntimeList_Handle_t Snpe_RuntimeConfig_GetRuntimeList_Ref(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_PerformanceProfile_t Snpe_RuntimeConfig_GetPerformanceProfile(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetPerformanceProfile(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_PerformanceProfile_t perfProfile); - -SNPE_API -int Snpe_RuntimeConfig_GetEnableCPUFallback(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetEnableCPUFallback(Snpe_RuntimeConfig_Handle_t rcHandle, int enableCpuFallback); - - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetInputDimensionsMap(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_TensorShapeMap_Handle_t tsmHandle); - -SNPE_API -Snpe_TensorShapeMap_Handle_t Snpe_RuntimeConfig_GetInputDimensionsMap_Ref(Snpe_RuntimeConfig_Handle_t rcHandle); - - - -typedef void* Snpe_RuntimeConfigList_Handle_t; - -SNPE_API -Snpe_RuntimeConfigList_Handle_t Snpe_RuntimeConfigList_Create(); - -SNPE_API -Snpe_RuntimeConfigList_Handle_t Snpe_RuntimeConfigList_CreateSize(size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_Delete(Snpe_RuntimeConfigList_Handle_t rclHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_PushBack(Snpe_RuntimeConfigList_Handle_t rclHandle, Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_RuntimeConfig_Handle_t Snpe_RuntimeConfigList_At_Ref(Snpe_RuntimeConfigList_Handle_t rclHandle, size_t idx); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_Assign(Snpe_RuntimeConfigList_Handle_t rclSrcHandle, Snpe_RuntimeConfigList_Handle_t rclDstHandle); - -SNPE_API -size_t Snpe_RuntimeConfigList_Size(Snpe_RuntimeConfigList_Handle_t rclHandle); - -SNPE_API -size_t Snpe_RuntimeConfigList_Capacity(Snpe_RuntimeConfigList_Handle_t rclHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_Clear(Snpe_RuntimeConfigList_Handle_t rclHandle); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_RUNTIME_CONFIG_LIST_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.hpp deleted file mode 100644 index faf052c5..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.hpp +++ /dev/null @@ -1,153 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - - -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/RuntimeList.hpp" -#include "DlSystem/TensorShapeMap.hpp" - - -#include "SNPE/RuntimeConfigList.h" - -namespace PSNPE { - - - -struct RuntimeConfig : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_RuntimeConfig_Delete}; - - template - using RuntimeReference = WrapperDetail::GenericMemberReference - , - CastingSetter >; - - - template - using RuntimeListReference = WrapperMemberReference< - RuntimeListType, - Snpe_RuntimeList_Handle_t, - Snpe_RuntimeConfig_GetRuntimeList_Ref, - Snpe_RuntimeConfig_SetRuntimeList - >; - - template - using InputDimensionsMapReference = WrapperMemberReference< - InputDimensionsMapType, - Snpe_TensorShapeMap_Handle_t, - Snpe_RuntimeConfig_GetInputDimensionsMap_Ref, - Snpe_RuntimeConfig_SetInputDimensionsMap - >; - - template - using PerfProfileReference = WrapperDetail::GenericMemberReference - , - CastingSetter >; - - template - using EnableCPUFallbackReference = WrapperDetail::GenericMemberReference - , - CastingSetter >; - -public: - RuntimeConfig() - : BaseType(Snpe_RuntimeConfig_Create()) - { } - RuntimeConfig(const RuntimeConfig& other) - : BaseType(Snpe_RuntimeConfig_CreateCopy(other.handle())) - { } - - RuntimeConfig(RuntimeConfig&& other) noexcept - : BaseType(std::move(other)) - { } - - RuntimeConfig& operator=(RuntimeConfig&& other) noexcept{ - return moveAssign(std::move(other)); - } - - - RuntimeReference runtime{*this, DlSystem::Runtime_t::CPU_FLOAT32}; - RuntimeListReference runtimeList{*this}; - PerfProfileReference perfProfile{*this, DlSystem::PerformanceProfile_t::HIGH_PERFORMANCE}; - InputDimensionsMapReference inputDimensionsMap{*this}; - EnableCPUFallbackReference enableCPUFallback{*this, false}; - -}; - - -class RuntimeConfigList : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_RuntimeConfigList_Delete}; - -public: - RuntimeConfigList() - : BaseType(Snpe_RuntimeConfigList_Create()) - { } - RuntimeConfigList(size_t size) - : BaseType(Snpe_RuntimeConfigList_CreateSize(size)) - { } - - RuntimeConfigList(RuntimeConfigList&& other) noexcept - : BaseType(std::move(other)) - { } - - RuntimeConfigList& operator=(RuntimeConfigList&& other) noexcept{ - return moveAssign(std::move(other)); - } - RuntimeConfigList& operator=(const RuntimeConfigList& other){ - Snpe_RuntimeConfigList_Assign(other.handle(), handle()); - return *this; - } - - - - void push_back(const RuntimeConfig& runtimeConfig){ - Snpe_RuntimeConfigList_PushBack(handle(), getHandle(runtimeConfig)); - } - - RuntimeConfig& operator[](size_t index){ - return *makeReference(Snpe_RuntimeConfigList_At_Ref(handle(), index)); - } - const RuntimeConfig& operator[](size_t index) const{ - return *makeReference(Snpe_RuntimeConfigList_At_Ref(handle(), index)); - } - - size_t size() const noexcept{ - return Snpe_RuntimeConfigList_Size(handle()); - } - size_t capacity() const noexcept{ - return Snpe_RuntimeConfigList_Capacity(handle()); - } - - void clear() noexcept{ - Snpe_RuntimeConfigList_Clear(handle()); - } - -}; - -} // ns PSNPE - - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, RuntimeConfig) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, RuntimeConfigList) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPE.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPE.h deleted file mode 100644 index eb05473a..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPE.h +++ /dev/null @@ -1,336 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _SNPE_SNPE_H_ -#define _SNPE_SNPE_H_ - - -#include "DlSystem/IBufferAttributes.h" -#include "DlSystem/ITensor.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/TensorMap.h" -#include "DlSystem/StringList.h" -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/UserBufferMap.h" -#include "DlSystem/UserMemoryMap.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#include "DiagLog/IDiagLog.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate a SNPE handle - */ -typedef void* Snpe_SNPE_Handle_t; - -/** - * Destroys/frees a SNPE object - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_Delete(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the names of input tensors to the network - * - * To support multiple input scenarios, where multiple tensors are - * passed through execute() in a TensorMap, each tensor needs to - * be uniquely named. The names of tensors can be retrieved - * through this function. - * - * In the case of a single input, one name will be returned. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return A StringList of input tensor names. - * - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetInputTensorNames(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the names of output tensors to the network - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return List of output tensor names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetOutputTensorNames(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the names of output tensor from the input layer name - * - * @param[in] snpeHandle Handle to access the SNPE object - * @param[in] name Layer name - * - * @return Output tensor names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetOutputTensorNamesByLayerName(Snpe_SNPE_Handle_t snpeHandle, const char* name); - - -/** - * @brief Processes the input data and returns the output - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] inputHandle A map of tensors that contains the input data for - * each input. The names of tensors needs to be - * matched with names retrieved through - * getInputTensorNames() - * - * @param[in,out] outputHandle An empty map of tensors that will contain the output - * data of potentially multiple layers (the key - * in the map is the layer name) upon return - * - * @note output TensorMap has to be empty. To forward propagate - * and get results in user-supplied tensors, use - * Snpe_SNPE_ExecuteUserBuffers(). - * - * @return SNPE_SUCCESS upon successful execution - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_ExecuteITensors(Snpe_SNPE_Handle_t snpeHandle, Snpe_TensorMap_Handle_t inputHandle, Snpe_TensorMap_Handle_t outputHandle); - -/** - * @brief Processes the input data and returns the output - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] inputHandle A single tensor contains the input data. - * - * @param[in,out] outputHandle An empty map of tensors that will contain the output - * data of potentially multiple layers (the key - * in the map is the layer name) upon return - * - * @note output TensorMap has to be empty. To forward propagate - * and get results in user-supplied tensors, use - * Snpe_SNPE_ExecuteUserBuffers. - * - * @return SNPE_SUCCESS upon successful execution - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_ExecuteITensor(Snpe_SNPE_Handle_t snpeHandle, Snpe_ITensor_Handle_t inputHandle, Snpe_TensorMap_Handle_t outputHandle); - -/** - * @brief Processes the input data and returns the output, using - * user-supplied buffers - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] inputHandle A map of UserBuffers that contains the input data for - * each input. The names of UserBuffers needs to be - * matched with names retrieved through - * getInputTensorNames() - * - * @param[in,out] outputHandle A map of UserBuffers that will hold the output - * data of potentially multiple layers (the key - * in the map is the UserBuffer name) - * - * @note input and output UserBuffer maps must be fully pre-populated. with - * dimensions matching what the network expects. - * For example, if there are 5 output UserBuffers they all have to be - * present in map. - * - * Caller must guarantee that for the duration of execute(), the buffer - * stored in UserBuffer would remain valid. For more detail on buffer - * ownership and lifetime requirements, please refer to zdl::DlSystem::UserBuffer - * documentation. - * - * @return SNPE_SUCCESS upon successful execution - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_ExecuteUserBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_UserBufferMap_Handle_t inputHandle, Snpe_UserBufferMap_Handle_t outputHandle); - - -/** - * @brief Register Client ION Buffers - * - * @note To be deprecated, please use new api Snpe_SNPE_RegisterUserMemoryMappedBuffers - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] ionBufferMapHandle A UserMemoryMap of virtual addresses - * - * @return SNPE_SUCCESS upon successful ION Buffer registration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_RegisterIonBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_UserMemoryMap_Handle_t ionBufferMapHandle); - -/** - * @brief Deregister Client ION Buffers - * - * @note To be deprecated, please use new api Snpe_SNPE_DeregisterUserMemoryMappedBuffers - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] ionBufferNamesHandle A StringList of ION Buffer names - * - * @return SNPE_SUCCESS upon successful ION Buffer deregistration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_DeregisterIonBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_StringList_Handle_t ionBufferNamesHandle); - -/** - * @brief Register Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferMapHandle A UserMemoryMap of virtual addresses - * - * @note UserBuffer type passed for registration must match the data type of the tensor in the dlc - * For regular UserBuffers SNPE performs an online data conversion (quantization or - * dequantization etc). This is not possible for memory mapped buffers hence can lead to - * issues during execution or accuracy degradation - * - * @return SNPE_SUCCESS upon successful memory mapped buffer registration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_RegisterUserMemoryMappedBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_UserMemoryMap_Handle_t bufferMapHandle); - -/** - * @brief Deregister Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferNamesHandle A StringList of memory mapped buffer names - * - * @return SNPE_SUCCESS upon successful memory mapped buffer deregistration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_DeregisterUserMemoryMappedBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_StringList_Handle_t bufferNamesHandle); - -/** - * @brief Returns the version string embedded at model conversion - * time. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return Model version string, which is a free-form string - * supplied at the time of the conversion - * - */ -SNPE_API -const char* Snpe_SNPE_GetModelVersion(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Returns the dimensions of the input data to the model in the - * form of TensorShape. The dimensions in TensorShape corresponds to - * what the tensor dimensions would need to be for an input tensor to - * the model. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] name input name. - * - * @note Note that this function only makes sense for networks - * that have a fixed input size. For networks in which the - * input size varies with each call of Execute(), this - * function should not be used. - * - * @return a TensorShape that maintains dimensions, - * matching the tensor dimensions for input to the model, - * where the last entry is the fastest varying dimension, etc. - * - * @see Snpe_ITensor_Handle_t - * @see Snpe_TensorShape_Handle_t - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_SNPE_GetInputDimensions(Snpe_SNPE_Handle_t snpeHandle, const char* name); - -/** - * @brief Returns the dimensions of the first input's data to the model in the - * form of TensorShape. The dimensions in TensorShape corresponds to - * what the tensor dimensions would need to be for an input tensor to - * the model. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @note Note that this function only makes sense for networks - * that have a fixed input size. For networks in which the - * input size varies with each call of Execute(), this - * function should not be used. - * - * @return a TensorShape that maintains dimensions, - * matching the tensor dimensions for first input to the model, - * where the last entry is the fastest varying dimension, etc. - * - * @see Snpe_ITensor_Handle_t - * @see Snpe_TensorShape_Handle_t - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_SNPE_GetInputDimensionsOfFirstTensor(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the output layer(s) for the network. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @note The output layers returned by this function may be - * different than those specified when the network was created - * via the @ref CAPI_SNPEBuilder "SNPEBuilder". For example, if the - * network was created in debug mode with no explicit output - * layers specified, this will contain all layers. - * - * - * @return A StringList of output layer names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetOutputLayerNames(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Returns attributes of buffers used to feed input tensors and receive result from output tensors. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] name Tensor name. - * - * @return BufferAttributes of input/output tensor named - */ -SNPE_API -Snpe_IBufferAttributes_Handle_t Snpe_SNPE_GetInputOutputBufferAttributes(Snpe_SNPE_Handle_t snpeHandle, const char *name); - -/** - * @brief . - * - * Get the diagnostic logging interface - * - * @param[in] snpeHandle Handle to access the SNPE object - * - */ -SNPE_API -Snpe_IDiagLog_Handle_t Snpe_SNPE_GetDiagLogInterface_Ref(Snpe_SNPE_Handle_t snpeHandle); - - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPE.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPE.hpp deleted file mode 100644 index d4ad18df..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPE.hpp +++ /dev/null @@ -1,125 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/TensorMap.hpp" -#include "DlSystem/UserBufferMap.hpp" -#include "DlSystem/UserMemoryMap.hpp" -#include "DlSystem/IBufferAttributes.hpp" -#include "DiagLog/IDiagLog.hpp" - -#include "DlSystem/DlOptional.hpp" - - -#include "SNPE/SNPE.h" - -namespace SNPE{ - -class SNPE : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_SNPE_Delete}; - - template - static DlSystem::Optional makeOptional(H handle){ - return DlSystem::Optional(T(moveHandle(handle))); - } -public: - - DlSystem::Optional getInputTensorNames() const noexcept{ - return makeOptional(Snpe_SNPE_GetInputTensorNames(handle())); - } - - DlSystem::Optional getOutputTensorNames() const noexcept{ - return makeOptional(Snpe_SNPE_GetOutputTensorNames(handle())); - } - - DlSystem::StringList getOutputTensorNamesByLayerName(const char *name) const noexcept{ - return moveHandle(Snpe_SNPE_GetOutputTensorNamesByLayerName(handle(), name)); - } - - bool execute(const DlSystem::TensorMap& input, DlSystem::TensorMap& output) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_ExecuteITensors(handle(), getHandle(input), getHandle(output)); - } - - - bool execute(const DlSystem::ITensor* input, DlSystem::TensorMap& output) noexcept{ - if(!input) return false; - return SNPE_SUCCESS == Snpe_SNPE_ExecuteITensor(handle(), getHandle(*input), getHandle(output)); - } - - bool execute(const DlSystem::UserBufferMap& input, const DlSystem::UserBufferMap& output) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_ExecuteUserBuffers(handle(), getHandle(input), getHandle(output)); - } - - - /* To be deprecated, please use new api registerMemoryMappedBuffers */ - bool registerIonBuffers(const DlSystem::UserMemoryMap& ionBufferMap) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_RegisterUserMemoryMappedBuffers(handle(), getHandle(ionBufferMap)); - } - - /* To be deprecated, please use new api deregisterMemoryMappedBuffers */ - bool deregisterIonBuffers(const DlSystem::StringList& ionBufferNames) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_DeregisterUserMemoryMappedBuffers(handle(), getHandle(ionBufferNames)); - } - - bool registerMemoryMappedBuffers(const DlSystem::UserMemoryMap& memoryMappedBufferMap) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_RegisterUserMemoryMappedBuffers(handle(), getHandle(memoryMappedBufferMap)); - } - - bool deregisterMemoryMappedBuffers(const DlSystem::StringList& bufferNames) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_DeregisterUserMemoryMappedBuffers(handle(), getHandle(bufferNames)); - } - - std::string getModelVersion() const{ - auto str = Snpe_SNPE_GetModelVersion(handle()); - return str ? str : ""; - } - - DlSystem::Optional getInputDimensions() const noexcept{ - return makeOptional(Snpe_SNPE_GetInputDimensionsOfFirstTensor(handle())); - } - - DlSystem::Optional getInputDimensions(const char* name) const noexcept{ - return makeOptional(Snpe_SNPE_GetInputDimensions(handle(), name)); - } - - DlSystem::Optional getOutputLayerNames() const noexcept{ - return makeOptional(Snpe_SNPE_GetOutputLayerNames(handle())); - } - - - DlSystem::Optional getInputOutputBufferAttributes(const char* name) const noexcept{ - return DlSystem::Optional( - new DlSystem::IBufferAttributes(moveHandle(Snpe_SNPE_GetInputOutputBufferAttributes(handle(), name))), - DlSystem::Optional::LIFECYCLE::POINTER_OWNED - ); - } - - DlSystem::Optional getDiagLogInterface() noexcept{ - auto diagLogHandle = Snpe_SNPE_GetDiagLogInterface_Ref(handle()); - if(!diagLogHandle) return {}; - // Bind lifespan of this reference to this object - auto toret = makeReference(diagLogHandle); - return {toret, DlSystem::Optional::LIFECYCLE::POINTER_NOT_OWNED}; - } - -private: - SNPE(const SNPE&) = delete; - SNPE& operator=(const SNPE&) = delete; - -}; - -} // ns SNPE - -ALIAS_IN_ZDL_NAMESPACE(SNPE, SNPE) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.h deleted file mode 100644 index 6adcebad..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.h +++ /dev/null @@ -1,334 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _SNPE_BUILDER_H_ -#define _SNPE_BUILDER_H_ - -#include "SNPE/SNPE.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/DlError.h" -#include "DlSystem/IOBufferDataTypeMap.h" -#include "DlSystem/TensorShapeMap.h" -#include "DlSystem/RuntimeList.h" -#include "DlSystem/PlatformConfig.h" -#include "DlContainer/DlContainer.h" - -#ifdef __cplusplus -extern "C" { -#endif - - - -/** - * A typedef to indicate a SNPEBuilder handle - */ -typedef void* Snpe_SNPEBuilder_Handle_t; - -/** - * The builder class for creating SNPE objects. - * Not meant to be extended. - */ - - -/** - * @brief Constructor of NeuralNetwork Builder ith a supplied model. - * - * @param[in] containerHandle A DlContainer holding the model. - * - * @return A new instance of a SNPEBuilder object - * that can be used to configure and build - * an instance of SNPE. - * - */ -SNPE_API -Snpe_SNPEBuilder_Handle_t Snpe_SNPEBuilder_Create(Snpe_DlContainer_Handle_t containerHandle); - -/** - * Destroys/frees a SNPEBuilder object - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_Delete(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle); - -/** - * @brief Requests a performance profile. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] performanceProfile The target performance profile. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetPerformanceProfile(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_PerformanceProfile_t performanceProfile); - -/** - * @brief Sets the profiling level. Default profiling level for - * SNPEBuilder is off. Off and basic only applies to DSP runtime. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] profilingLevel The target profiling level. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetProfilingLevel(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_ProfilingLevel_t profilingLevel); - -/** - * @brief Sets a preference for execution priority. - * - * This allows the caller to give coarse hint to SNPE runtime - * about the priority of the network. SNPE runtime is free to use - * this information to co-ordinate between different workloads - * that may or may not extend beyond SNPE. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] priority The target performance profile. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetExecutionPriorityHint(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_ExecutionPriorityHint_t priority); - -/** - * @brief Sets the layers that will generate output. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] outputLayerNames List of layer names to - * output. An empty list will - * result in only the final - * layer of the model being - * the output layer. The list - * will be copied. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetOutputLayers(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_StringList_Handle_t outputLayerNames); - -/** - * @brief Sets the output tensor names. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] outputTensorNames List of tensor names to - * output. An empty list will - * result in producing output for the final - * output tensor of the model. - * The list will be copied. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetOutputTensors(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_StringList_Handle_t outputTensorNames); - -/** - * @brief Sets whether this neural network will perform inference with - * input from user-supplied buffers, and write output to user-supplied - * buffers. Default behaviour is to use tensors created by - * ITensorFactory. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] bufferMode Boolean whether to use user-supplied buffer or not. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetUseUserSuppliedBuffers(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int bufferMode); - -/** - * @brief Sets the debug mode of the runtime. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] debugMode This enables debug mode for the runtime. It - * does two things. For an empty - * outputLayerNames list, all layers will be - * output. It might also disable some internal - * runtime optimizations (e.g., some networks - * might be optimized by combining layers, - * etc.). - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetDebugMode(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int debugMode); - - - -/** - * @brief Sets network's input dimensions to enable resizing of - * the spatial dimensions of each layer for fully convolutional networks, - * and the batch dimension for all networks. - * - * @param[in] tensorShapeMapHandle : Handle to the map of input names and their new dimensions. - * The new dimensions overwrite the input dimensions - * embedded in the model and then resize each layer - * of the model. If the model contains - * layers whose dimensions cannot be resized e.g FullyConnected, - * exception will be thrown when SNPE instance is actually built. - * In general the batch dimension is always resizable. - * After resizing of layers' dimensions in model based - * on new input dimensions, the new model is revalidated - * against all runtime constraints, whose failures may - * result in cpu fallback situation. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetInputDimensions(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_TensorShapeMap_Handle_t inputDimensionsMapHandle); - -/** - * @brief Sets the mode of init caching functionality. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] mode Boolean. This flag enables/disables the functionality of init caching. - * When init caching functionality is enabled, a set of init caches - * will be created during network building/initialization process - * and will be added to DLC container. If such DLC container is saved - * by the user, in subsequent network building/initialization processes - * these init caches will be loaded from the DLC so as to reduce initialization time. - * In disable mode, no init caches will be added to DLC container. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetInitCacheMode(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int cacheMode); - -/** - * @brief Returns an instance of SNPE based on the current parameters. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @return A new instance of a @ref CAPI_SNPE "SNPE" object that can be used - * to execute models or null if any errors occur. - */ -SNPE_API -Snpe_SNPE_Handle_t Snpe_SNPEBuilder_Build(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle); - -/** - * @brief Sets the platform configuration. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] platformConfig The platform configuration. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetPlatformConfig(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_PlatformConfig_Handle_t platformConfigHandle); - -/** - * @brief Sets network's runtime order of precedence. Example: - * CPU_FLOAT32, GPU_FLOAT16, AIP_FIXED8_TF - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] runtimeListHandle The list of runtime in order of precedence - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetRuntimeProcessorOrder(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief Sets the unconsumed tensors as output - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] setOutput Boolean. This enables unconsumed tensors (i.e) - * outputs which are not inputs to any - * layer (basically dead ends) to be marked - * for output - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetUnconsumedTensorsAsOutputs(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int setOutput); - -/** - * @brief Execution terminated when exceeding time limit. - * Only valid for dsp runtime currently. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] timeout Time limit value in microseconds - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetTimeOut(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, uint64_t timeoutMicroSec); - - -/** - * @brief Sets the datatype of the buffer. - * Only valid for dsp runtime currently. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] dataTypeMapHandle Map of the buffer names and the datatype that needs to be set. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetBufferDataType(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_IOBufferDataTypeMap_Handle_t dataTypeMapHandle); - -/** - * @brief Sets up the entire initialization callflow to - * happen on the user's thread - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] singleThreadedInit Flag indicating user's intent to perform initialization callflow - * on caller's thread. - * When set to 1, initialization will happen on the user's thread - * When set to 0, initialization will happen on a new thread. This is the default - * behavior (analogous to not calling this API) -*/ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetSingleThreadedInit(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int singleThreadedInit); - -/** - * @brief Sets the fixed point execution mode for CPU runtime. - * If a floating point DLC is executed with this option set, the program will be terminated with an exception. - * If a quantized DLC is executed without this option set, the execution will be in floating point mode in CPU. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] cpuFxpMode Boolean If set to true, enables the fixed point mode. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetCpuFixedPointMode( - Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, bool cpuFxpMode); - -/** - * @brief Sets model name for logging - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] modelName String Model name for logging. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetModelName( - Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, const char *modelName); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_BUILDER_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.hpp deleted file mode 100644 index 37995f4e..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.hpp +++ /dev/null @@ -1,136 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - - -#include "Wrapper.hpp" -#include "SNPE.hpp" -#include "DlSystem/RuntimeList.hpp" -#include "DlContainer/IDlContainer.hpp" -#include "DlSystem/PlatformConfig.hpp" -#include "DlSystem/TensorShapeMap.hpp" - -#include "DlSystem/DlEnums.hpp" - -#include "DlSystem/IOBufferDataTypeMap.hpp" - -#include "SNPE/SNPEBuilder.h" - - -namespace SNPE { - -class SNPEBuilder : public Wrapper { - friend BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_SNPEBuilder_Delete}; -public: - - explicit SNPEBuilder(DlContainer::IDlContainer *container) - : BaseType(Snpe_SNPEBuilder_Create(getHandle(container))) - { } - - - SNPEBuilder& setPerformanceProfile(DlSystem::PerformanceProfile_t performanceProfile){ - Snpe_SNPEBuilder_SetPerformanceProfile(handle(), static_cast(performanceProfile)); - return *this; - } - - SNPEBuilder& setProfilingLevel(DlSystem::ProfilingLevel_t profilingLevel){ - Snpe_SNPEBuilder_SetProfilingLevel(handle(), static_cast(profilingLevel)); - return *this; - } - - SNPEBuilder& setExecutionPriorityHint(DlSystem::ExecutionPriorityHint_t priority){ - Snpe_SNPEBuilder_SetExecutionPriorityHint(handle(), static_cast(priority)); - return *this; - } - - SNPEBuilder& setOutputLayers(const DlSystem::StringList& outputLayerNames){ - Snpe_SNPEBuilder_SetOutputLayers(handle(), getHandle(outputLayerNames)); - return *this; - } - - SNPEBuilder& setOutputTensors(const DlSystem::StringList& outputTensorNames){ - Snpe_SNPEBuilder_SetOutputTensors(handle(), getHandle(outputTensorNames)); - return *this; - } - - SNPEBuilder& setUseUserSuppliedBuffers(int bufferMode){ - Snpe_SNPEBuilder_SetUseUserSuppliedBuffers(handle(), bufferMode); - return *this; - } - - SNPEBuilder& setDebugMode(int debugMode){ - Snpe_SNPEBuilder_SetDebugMode(handle(), debugMode); - return *this; - } - - SNPEBuilder& setInputDimensions(const DlSystem::TensorShapeMap& inputDimensionsMap){ - Snpe_SNPEBuilder_SetInputDimensions(handle(), getHandle(inputDimensionsMap)); - return *this; - } - - SNPEBuilder& setInitCacheMode(int cacheMode){ - Snpe_SNPEBuilder_SetInitCacheMode(handle(), cacheMode); - return *this; - } - - SNPEBuilder& setPlatformConfig(const DlSystem::PlatformConfig& platformConfigHandle){ - Snpe_SNPEBuilder_SetPlatformConfig(handle(), getHandle(platformConfigHandle)); - return *this; - } - - SNPEBuilder& setRuntimeProcessorOrder(const DlSystem::RuntimeList& runtimeList){ - Snpe_SNPEBuilder_SetRuntimeProcessorOrder(handle(), getHandle(runtimeList)); - return *this; - } - - SNPEBuilder& setUnconsumedTensorsAsOutputs(int setOutput){ - Snpe_SNPEBuilder_SetUnconsumedTensorsAsOutputs(handle(), setOutput); - return *this; - } - - SNPEBuilder& setTimeOut(uint64_t timeoutMicroSec){ - Snpe_SNPEBuilder_SetTimeOut(handle(), timeoutMicroSec); - return *this; - } - - - SNPEBuilder& setBufferDataType(const DlSystem::IOBufferDataTypeMap& dataTypeMap){ - Snpe_SNPEBuilder_SetBufferDataType(handle(), getHandle(dataTypeMap)); - return *this; - } - - SNPEBuilder& setSingleThreadedInit(int singleThreadedInit){ - Snpe_SNPEBuilder_SetSingleThreadedInit(handle(), singleThreadedInit); - return *this; - } - - SNPEBuilder& setCpuFixedPointMode(bool cpuFxpMode){ - Snpe_SNPEBuilder_SetCpuFixedPointMode(handle(), cpuFxpMode); - return *this; - } - - SNPEBuilder& setModelName(DlSystem::String modelName){ - Snpe_SNPEBuilder_SetModelName(handle(), modelName.c_str()); - return *this; - } - - std::unique_ptr build() noexcept{ - auto h = Snpe_SNPEBuilder_Build(handle()); - return h ? makeUnique(h) : nullptr; - } - -}; - -} // ns SNPE - - -ALIAS_IN_ZDL_NAMESPACE(SNPE, SNPEBuilder) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEFactory.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEFactory.hpp deleted file mode 100644 index 6c2486ee..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEFactory.hpp +++ /dev/null @@ -1,88 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/DlVersion.hpp" -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/IUserBufferFactory.hpp" - - -#include "SNPE/SNPEUtil.h" -#include "DlSystem/DlEnums.h" - -namespace SNPE { - - -class SNPEFactory { -public: - - - static bool isRuntimeAvailable(DlSystem::Runtime_t runtime){ - return Snpe_Util_IsRuntimeAvailable(static_cast(runtime)); - } - - static bool isRuntimeAvailable(DlSystem::Runtime_t runtime, DlSystem::RuntimeCheckOption_t option){ - return Snpe_Util_IsRuntimeAvailableCheckOption(static_cast(runtime), - static_cast(option)); - } - - static DlSystem::ITensorFactory& getTensorFactory(){ - static DlSystem::ITensorFactory iTensorFactory; - return iTensorFactory; - } - - static DlSystem::IUserBufferFactory& getUserBufferFactory(){ - static DlSystem::IUserBufferFactory iUserBufferFactory; - return iUserBufferFactory; - } - - static DlSystem::Version_t getLibraryVersion(){ - return WrapperDetail::moveHandle(Snpe_Util_GetLibraryVersion()); - } - - static bool setSNPEStorageLocation(const char* storagePath){ - return SNPE_SUCCESS == Snpe_Util_SetSNPEStorageLocation(storagePath); - } - - static bool addOpPackage(const std::string& regLibraryPath){ - return SNPE_SUCCESS == Snpe_Util_AddOpPackage(regLibraryPath.c_str()); - } - - static bool isGLCLInteropSupported(){ - return Snpe_Util_IsGLCLInteropSupported(); - } - - static const char* getLastError(){ - return Snpe_Util_GetLastError(); - } - - static bool initializeLogging(const DlSystem::LogLevel_t& level){ - return Snpe_Util_InitializeLogging(static_cast(level)); - } - - static bool initializeLogging(const DlSystem::LogLevel_t& level, const std::string& logPath){ - return Snpe_Util_InitializeLoggingPath(static_cast(level), logPath.c_str()); - } - - static bool setLogLevel(const DlSystem::LogLevel_t& level){ - return Snpe_Util_SetLogLevel(static_cast(level)); - } - - static bool terminateLogging(){ - return Snpe_Util_TerminateLogging(); - } -}; - - -} // ns SNPE - - -ALIAS_IN_ZDL_NAMESPACE(SNPE, SNPEFactory) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEUtil.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEUtil.h deleted file mode 100644 index a3e1d1e1..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/SNPEUtil.h +++ /dev/null @@ -1,354 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _SNPE_UTIL_H_ -#define _SNPE_UTIL_H_ - -#include "SNPE/SNPE.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/ITensor.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlVersion.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * @brief Creates a UserBuffer - * - * @param[in] buffer Pointer to the buffer that the caller supplies - * - * @param[in] bufSize Buffer size, in bytes - * - * @param[in] stridesHandle Total number of bytes between elements in each dimension. - * E.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would have strides of [24, 8, 4]. - * - * @param[in] userBufferEncodingHandle Handle to a UserBufferEncoding object - * - * @note Caller has to ensure that memory pointed to by buffer stays accessible - * for the lifetime of the object created - * - * @return Handle to the created UserBuffer - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_Util_CreateUserBuffer(void *buffer, - size_t bufSize, - Snpe_TensorShape_Handle_t stridesHandle, - Snpe_IUserBuffer_Handle_t userBufferEncodingHandle); - -/** - * @brief Creates a UserBuffer with a provided UserBufferSource - * - * @param[in] buffer Pointer to the buffer that the caller supplies - * - * @param[in] bufSize Buffer size, in bytes - * - * @param[in] stridesHandle Total number of bytes between elements in each dimension. - * E.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would have strides of [24, 8, 4]. - * - * @param[in] userBufferEncodingHandle Handle to a UserBufferEncoding object - * - * @param[in] userBufferSourceHandle Handle to a UserBufferSource object - * - * @return Handle to the created UserBuffer - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_Util_CreateUserBufferFromSource(void *buffer, - size_t bufSize, - Snpe_TensorShape_Handle_t stridesHandle, - Snpe_IUserBuffer_Handle_t userBufferEncodingHandle, - Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -/** - * @brief Creates a UserBuffer - * - * @param[in] buffer Pointer to the buffer that the caller supplies - * - * @param[in] bufSize Buffer size, in bytes - * - * @param[in] stridesHandle Total number of bytes between elements in each dimension. - * E.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would have strides of [24, 8, 4]. - * - * @param[in] userBufferEncodingHandle Reference to an UserBufferEncoding object - * - * @param[in] userBufferSourceHandle Reference to an UserBufferSource object - * - * @note Caller has to ensure that memory pointed to by buffer stays accessible - * for the lifetime of the object created - * - * @return the created UserBuffer - * - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_Util_CreateUserGlBuffer(void *buffer, - size_t bufSize, - Snpe_TensorShape_Handle_t stridesHandle, - Snpe_IUserBuffer_Handle_t userBufferEncodingHandle, - Snpe_IUserBuffer_Handle_t userBufferSourceHandle); - -/** - * Creates a new ITensor with uninitialized data. - * - * ITensor buffer size assumes float32 encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 4 = 24 bytes in memory) - * - * The strides for the tensor will match the tensor dimensions - * (i.e., the tensor data is contiguous in memory). - * - * @param[in] shapeHandle The dimensions for the tensor in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @return The created tensor - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_Util_CreateITensor(Snpe_TensorShape_Handle_t shapeHandle); - -/** - * Create a new ITensor with specific data. - * (i.e. the tensor data is contiguous in memory). This tensor is - * primarily used to create a tensor where tensor size can't be - * computed directly from dimension. One such example is - * NV21-formatted image, or any YUV formatted image - * - * @param[in] shapeHandle The dimensions for the tensor in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] data The actual data with which the Tensor object is filled. - * - * @param[in] dataSize The size of data - * - * @return A handle to the created tensor - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_Util_CreateITensorDataSize(Snpe_TensorShape_Handle_t shapeHandle, const uint8_t* data, size_t dataSize); - -/** - * Create a new ITensor with specific data. - * (i.e. the tensor data is contiguous in memory). This tensor is - * primarily used to create a tensor where tensor size can't be - * computed directly from dimension. One such example is - * NV21-formatted image, or any YUV formatted image - * - * @param[in] shapeHandle The dimensions for the tensor in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] data The actual data with which the Tensor object is filled. - * - * @param[in] dataSize The size of data - * - * @return the created tensor - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_Util_CreateITensor_NV21(Snpe_TensorShape_Handle_t shapeHandle, unsigned char *data, size_t dataSize); - -/** - * Indicates whether the supplied runtime is available on the - * current platform. - * - * @param[in] runtime The target runtime to check. - * - * @return Boolean: Non-zero if the supplied runtime is available; 0 otherwise - * - */ -SNPE_API -int Snpe_Util_IsRuntimeAvailable(Snpe_Runtime_t runtime); - -/** - * Indicates whether the supplied runtime is available on the - * current platform. - * - * @param[in] runtime The target runtime to check. - * - * @param[in] runtimeCheckOption Extent to perform runtime available check. - * - * @return Boolean: Non-zero if the supplied runtime is available; 0 otherwise - * - */ -SNPE_API -int Snpe_Util_IsRuntimeAvailableCheckOption(Snpe_Runtime_t runtime, Snpe_RuntimeCheckOption_t runtimeCheckOption); - - -/** - * Gets the version of the SNPE library. - * - * @return Version of the SNPE library. - * - */ -SNPE_API -Snpe_DlVersion_Handle_t Snpe_Util_GetLibraryVersion(); - -/** - * Set the SNPE storage location for all SNPE instances in this - * process. Note that this may only be called once, and if so - * must be called before creating any SNPE instances. - * - * @param[in] storagePath Absolute path to a directory which SNPE may - * use for caching and other storage purposes. - * - * @return Boolean: Non-zero if the supplied path was succesfully set as - * the SNPE storage location, 0 otherwise. - * - */ -SNPE_API -int Snpe_Util_SetSNPEStorageLocation(const char* storagePath); - -/** - * @brief Register a user-defined op package with SNPE. - * - * @param[in] regLibraryPath Path to the registration library - * that allows clients to register a set of operations that are - * part of the package, and share op info with SNPE - * - * @return Boolean: Non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_AddOpPackage(const char* regLibraryPath ); - -/** - * Indicates whether the OpenGL and OpenCL interoperability is supported - * on GPU platform. - * - * @return Boolean: Non-zero if the OpenGL and OpenCl interop is supported; 0 otherwise - * - */ -SNPE_API -int Snpe_Util_IsGLCLInteropSupported(); - -/** - * @return A string description of the last error - */ -SNPE_API -const char* Snpe_Util_GetLastError(); - -/** - * Initializes logging with the specified log level. - * initializeLogging with level, is used on Android platforms - * and after successful initialization, SNPE - * logs are printed in android logcat logs. - * - * It is recommended to initializeLogging before creating any - * SNPE instances, in order to capture information related to - * core initialization. If this is called again after first - * time initialization, subsequent calls are ignored. - * Also, Logging can be re-initialized after a call to - * terminateLogging API by calling initializeLogging again. - * - * A typical usage of Logging life cycle can be - * initializeLogging() - * any other SNPE API like isRuntimeAvailable() - * * setLogLevel() - optional - can be called anytime - * between initializeLogging & terminateLogging - * SNPE instance creation, inference, destroy - * terminateLogging(). - * - * Please note, enabling logging can have performance impact. - * - * @param[in] level Log level (LOG_INFO, LOG_WARN, etc.). - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_InitializeLogging(Snpe_LogLevel_t level); - -/** - * Initializes logging with the specified log level and log path. - * initializeLogging with level & log path, is used on non Android - * platforms and after successful initialization, SNPE - * logs are printed in std output & into log files created in the - * log path. - * - * It is recommended to initializeLogging before creating any - * SNPE instances, in order to capture information related to - * core initialization. If this is called again after first - * time initialization, subsequent calls are ignored. - * Also, Logging can be re-initialized after a call to - * terminateLogging API by calling initializeLogging again. - * - * A typical usage of Logging life cycle can be - * initializeLogging() - * any other SNPE API like isRuntimeAvailable() - * * setLogLevel() - optional - can be called anytime - * between initializeLogging & terminateLogging - * SNPE instance creation, inference, destroy - * terminateLogging() - * - * Please note, enabling logging can have performance impact - * - * @param[in] level Log level (LOG_INFO, LOG_WARN, etc.). - * - * @param[in] logPath of directory to store logs. - * If path is empty, the default path is "./Log". - * For android, the log path is ignored. - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_InitializeLoggingPath(Snpe_LogLevel_t level, const char* logPath); - -/** - * Updates the current logging level with the specified level. - * setLogLevel is optional, called anytime after initializeLogging - * and before terminateLogging, to update the log level set. - * Log levels can be updated multiple times by calling setLogLevel - * A call to setLogLevel() is ignored if it is made before - * initializeLogging() or after terminateLogging() - * - * @param[in] level Log level (LOG_INFO, LOG_WARN, etc.). - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_SetLogLevel(Snpe_LogLevel_t level); - -/** - * Terminates logging. - * - * It is recommended to terminateLogging after initializeLogging - * in order to disable logging information. - * If this is called before initialization or after first time termination, - * calls are ignored. - * - * @warning Snpe_Util_TerminateLogging() must not be called while another thread is executing. - * In a multi-threaded use case, the individual threads must have a cooperative life cycle - * management strategy for the logger. - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_TerminateLogging(); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_UTIL_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/UserBufferList.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/UserBufferList.h deleted file mode 100644 index e6a42ddb..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/UserBufferList.h +++ /dev/null @@ -1,77 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022,2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef _SNPE_USER_BUFFER_LIST_H_ -#define _SNPE_USER_BUFFER_LIST_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/UserBufferMap.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void* Snpe_UserBufferList_Handle_t; - -SNPE_API -Snpe_UserBufferList_Handle_t Snpe_UserBufferList_Create(); - -SNPE_API -Snpe_UserBufferList_Handle_t Snpe_UserBufferList_CreateCopy(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -Snpe_UserBufferList_Handle_t Snpe_UserBufferList_CreateSize(size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_Delete(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_PushBack(Snpe_UserBufferList_Handle_t userBufferListHandle, - Snpe_UserBufferMap_Handle_t userBufferMapHandle); - -SNPE_API -Snpe_UserBufferMap_Handle_t Snpe_UserBufferList_At_Ref(Snpe_UserBufferList_Handle_t userBufferListHandle, - size_t idx); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_Assign(Snpe_UserBufferList_Handle_t srcUserBufferListHandle, - Snpe_UserBufferList_Handle_t dstUserBufferListHandle); - -SNPE_API -size_t Snpe_UserBufferList_Size(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -size_t Snpe_UserBufferList_Capacity(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_Clear(Snpe_UserBufferList_Handle_t userBufferListHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_USER_BUFFER_LIST_H_ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/UserBufferList.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/UserBufferList.hpp deleted file mode 100644 index fec82dbc..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SNPE/UserBufferList.hpp +++ /dev/null @@ -1,76 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/UserBufferMap.hpp" - -#include "SNPE/UserBufferList.h" - - -namespace PSNPE { - -class UserBufferList : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferList_Delete}; - -public: - UserBufferList() - : BaseType(Snpe_UserBufferList_Create()) - { } - explicit UserBufferList(size_t size) - : BaseType(Snpe_UserBufferList_CreateSize(size)) - { } - - UserBufferList(const UserBufferList& other) - : BaseType(Snpe_UserBufferList_CreateCopy(other.handle())) - { } - UserBufferList(UserBufferList&& other) noexcept - : BaseType(std::move(other)) - { } - - UserBufferList& operator=(const UserBufferList& other){ - if(this != &other){ - Snpe_UserBufferList_Assign(other.handle(), handle()); - } - return *this; - } - UserBufferList& operator=(UserBufferList&& other){ - return moveAssign(std::move(other)); - } - - - void push_back(const DlSystem::UserBufferMap& userBufferMap){ - Snpe_UserBufferList_PushBack(handle(), getHandle(userBufferMap)); - } - - DlSystem::UserBufferMap& operator[](size_t idx){ - return *makeReference(Snpe_UserBufferList_At_Ref(handle(), idx)); - } - - size_t size() const noexcept{ - return Snpe_UserBufferList_Size(handle()); - } - - size_t capacity() const noexcept{ - return Snpe_UserBufferList_Capacity(handle()); - } - - void clear() noexcept{ - Snpe_UserBufferList_Clear(handle()); - } -}; - - -} // ns PSNPE - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, UserBufferList) diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SnpeUdo/UdoBase.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SnpeUdo/UdoBase.h deleted file mode 100644 index f7af604a..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SnpeUdo/UdoBase.h +++ /dev/null @@ -1,546 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SNPE_UDO_BASE_H -#define SNPE_UDO_BASE_H - -#include - -// Provide values to use for API version. -#define API_VERSION_MAJOR 1 -#define API_VERSION_MINOR 6 -#define API_VERSION_TEENY 0 - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -// Defines a bitmask of enum values. -typedef uint32_t SnpeUdo_Bitmask_t; -typedef SnpeUdo_Bitmask_t Udo_Bitmask_t; - -// A string of characters, rather than an array of bytes. -// Assumed to be UTF-8. -typedef char* SnpeUdo_String_t; -typedef SnpeUdo_String_t Udo_String_t; - -// The maximum allowable length of a SnpeUdo_String_t in bytes, -// including null terminator. SNPE will truncate strings longer -// than this. -#define SNPE_UDO_MAX_STRING_SIZE 1024 - -/** - * An enum which holds the various error types. - * The error types are divided to classes : - * 0 - 99 : generic errors - * 100 - 200 : errors related to configuration - * - */ -typedef enum -{ - /// No Error - SNPE_UDO_NO_ERROR = 0, UDO_NO_ERROR = 0, - /// Unsupported value for core type - SNPE_UDO_WRONG_CORE = 1, UDO_WRONG_CORE = 1, - /// Invalid attribute/argument passed into UDO API - SNPE_UDO_INVALID_ARGUMENT = 2, UDO_INVALID_ARGUMENT = 2, - /// Unsupported feature error - SNPE_UDO_UNSUPPORTED_FEATURE = 3, UDO_UNSUPPORTED_FEATURE = 3, - /// Error relating to memory allocation - SNPE_UDO_MEM_ALLOC_ERROR = 4, UDO_MEM_ALLOC_ERROR = 4, - /* Configuration Specific errors */ - /// No op with given attributes available in library - SNPE_UDO_WRONG_OPERATION = 100, UDO_WRONG_OPERATION = 100, - /// Unsupported value for core type in UDO configuration - SNPE_UDO_WRONG_CORE_TYPE = 101, UDO_WRONG_CORE_TYPE = 101, - /// Wrong number of params in UDO definition - SNPE_UDO_WRONG_NUM_OF_PARAMS = 102, UDO_WRONG_NUM_OF_PARAMS = 102, - /// Wrong number of dimensions for tensor(s) in UDO definition - SNPE_UDO_WRONG_NUM_OF_DIMENSIONS = 103, UDO_WRONG_NUM_OF_DIMENSIONS = 103, - /// Wrong number of input tensors in UDO definition - SNPE_UDO_WRONG_NUM_OF_INPUTS = 104, UDO_WRONG_NUM_OF_INPUTS = 104, - /// Wrong number of output tensors in UDO definition - SNPE_UDO_WRONG_NUM_OF_OUTPUTS = 105, UDO_WRONG_NUM_OF_OUTPUTS = 105, - SNPE_UDO_PROGRAM_CACHE_NOT_FOUND = 106, UDO_PROGRAM_CACHE_NOT_FOUND = 106, - SNPE_UDO_UNKNOWN_ERROR = 0xFFFFFFFF, UDO_UNKNOWN_ERROR = 0xFFFFFFFF -} SnpeUdo_ErrorType_t; - -typedef SnpeUdo_ErrorType_t Udo_ErrorType_t; - -/** - * An enum which holds the various data types. - * Designed to be used as single values or combined into a bitfield parameter - * (0x1, 0x2, 0x4, etc) - * \n FIXED_XX types are targeted for data in tensors. - * \n UINT / INT types are targeted for scalar params - */ -typedef enum -{ - /// data type: 16-bit floating point - SNPE_UDO_DATATYPE_FLOAT_16 = 0x01, UDO_DATATYPE_FLOAT_16 = 0x01, - /// data type: 32-bit floating point - SNPE_UDO_DATATYPE_FLOAT_32 = 0x02, UDO_DATATYPE_FLOAT_32 = 0x02, - /// data type: 4-bit fixed point - SNPE_UDO_DATATYPE_FIXED_4 = 0x04, UDO_DATATYPE_FIXED_4 = 0x04, - /// data type: 8-bit fixed point - SNPE_UDO_DATATYPE_FIXED_8 = 0x08, UDO_DATATYPE_FIXED_8 = 0x08, - /// data type: 16-bit fixed point - SNPE_UDO_DATATYPE_FIXED_16 = 0x10, UDO_DATATYPE_FIXED_16 = 0x10, - /// data type: 32-bit fixed point - SNPE_UDO_DATATYPE_FIXED_32 = 0x20, UDO_DATATYPE_FIXED_32 = 0x20, - /// data type: 8-bit unsigned integer - SNPE_UDO_DATATYPE_UINT_8 = 0x100, UDO_DATATYPE_UINT_8 = 0x100, - /// data type: 16-bit unsigned integer - SNPE_UDO_DATATYPE_UINT_16 = 0x200, UDO_DATATYPE_UINT_16 = 0x200, - /// data type: 32-bit unsigned integer - SNPE_UDO_DATATYPE_UINT_32 = 0x400, UDO_DATATYPE_UINT_32 = 0x400, - /// data type: 8-bit signed integer - SNPE_UDO_DATATYPE_INT_8 = 0x1000, UDO_DATATYPE_INT_8 = 0x1000, - /// data type: 16-bit signed integer - SNPE_UDO_DATATYPE_INT_16 = 0x2000, UDO_DATATYPE_INT_16 = 0x2000, - /// data type: 32-bit signed integer - SNPE_UDO_DATATYPE_INT_32 = 0x4000, UDO_DATATYPE_INT_32 = 0x4000, - SNPE_UDO_DATATYPE_LAST = 0xFFFFFFFF, UDO_DATATYPE_LAST = 0xFFFFFFFF -} SnpeUdo_DataType_t; - -typedef SnpeUdo_DataType_t Udo_DataType_t; - -/** - * An enum which holds the various layouts. - * Designed to be used as single values or combined into a bitfield parameter - * (0x1, 0x2, 0x4, etc) - */ -typedef enum -{ - /// data layout (4D): NHWC (batch-height-width-channel) - SNPE_UDO_LAYOUT_NHWC = 0x01, UDO_LAYOUT_NHWC = 0x01, - /// data layout (4D): NCHW (batch-channel-height-width) - SNPE_UDO_LAYOUT_NCHW = 0x02, UDO_LAYOUT_NCHW = 0x02, - /// data layout (5D): NDHWC (batch-depth-height-width-channel) - SNPE_UDO_LAYOUT_NDHWC = 0x04, UDO_LAYOUT_NDHWC = 0x04, - SNPE_UDO_LAYOUT_GPU_OPTIMAL1 = 0x08, UDO_LAYOUT_GPU_OPTIMAL1 = 0x08, - SNPE_UDO_LAYOUT_GPU_OPTIMAL2 = 0x10, UDO_LAYOUT_GPU_OPTIMAL2 = 0x10, - SNPE_UDO_LAYOUT_DSP_OPTIMAL1 = 0x11, UDO_LAYOUT_DSP_OPTIMAL1 = 0x11, - SNPE_UDO_LAYOUT_DSP_OPTIMAL2 = 0x12, UDO_LAYOUT_DSP_OPTIMAL2 = 0x12, - // Indicates no data will be allocated for this tensor. - // Used to specify optional inputs/outputs positionally. - SNPE_UDO_LAYOUT_NULL = 0x13, UDO_LAYOUT_NULL = 0x13, - SNPE_UDO_LAYOUT_LAST = 0xFFFFFFFF, UDO_LAYOUT_LAST = 0xFFFFFFFF -} SnpeUdo_TensorLayout_t; - -typedef SnpeUdo_TensorLayout_t Udo_TensorLayout_t; - -/** - * An enum which holds the UDO library Core type . - * Designed to be used as single values or combined into a bitfield parameter - * (0x1, 0x2, 0x4, etc) - */ -typedef enum -{ - /// Library target IP Core is undefined - SNPE_UDO_CORETYPE_UNDEFINED = 0x00, UDO_CORETYPE_UNDEFINED = 0x00, - /// Library target IP Core is CPU - SNPE_UDO_CORETYPE_CPU = 0x01, UDO_CORETYPE_CPU = 0x01, - /// Library target IP Core is GPU - SNPE_UDO_CORETYPE_GPU = 0x02, UDO_CORETYPE_GPU = 0x02, - /// Library target IP Core is DSP - SNPE_UDO_CORETYPE_DSP = 0x04, UDO_CORETYPE_DSP = 0x04, - SNPE_UDO_CORETYPE_LAST = 0xFFFFFFFF, UDO_CORETYPE_LAST = 0xFFFFFFFF -} SnpeUdo_CoreType_t; - -typedef SnpeUdo_CoreType_t Udo_CoreType_t; - -/** - * An enum to specify the parameter type : Scalar or Tensor - */ -typedef enum -{ - /// UDO static param type: scalar - SNPE_UDO_PARAMTYPE_SCALAR = 0x00, UDO_PARAMTYPE_SCALAR = 0x00, - /// UDO static param type: string - SNPE_UDO_PARAMTYPE_STRING = 0x01, UDO_PARAMTYPE_STRING = 0x01, - /// UDO static param type: tensor - SNPE_UDO_PARAMTYPE_TENSOR = 0x02, UDO_PARAMTYPE_TENSOR = 0x02, - SNPE_UDO_PARAMTYPE_LAST = 0xFFFFFFFF, UDO_PARAMTYPE_LAST = 0xFFFFFFFF -} SnpeUdo_ParamType_t; - -typedef SnpeUdo_ParamType_t Udo_ParamType_t; - -/** - * An enum to specify quantization type - */ -typedef enum -{ - /// Tensor Quantization type: NONE. Signifies unquantized tensor data - SNPE_UDO_QUANTIZATION_NONE = 0x00, UDO_QUANTIZATION_NONE = 0x00, - /// Tensor Quantization type: Tensorflow-style - SNPE_UDO_QUANTIZATION_TF = 0x01, UDO_QUANTIZATION_TF = 0x01, - SNPE_UDO_QUANTIZATION_QMN = 0x02, UDO_QUANTIZATION_QMN = 0x02, - SNPE_UDO_QUANTIZATION_LAST = 0xFFFFFFFF, UDO_QUANTIZATION_LAST = 0xFFFFFFFF -} SnpeUdo_QuantizationType_t; - -typedef SnpeUdo_QuantizationType_t Udo_QuantizationType_t; - -/** - * @brief A struct which is used to provide a version number using 3 values : major, minor, teeny - * - */ -typedef struct -{ - /// version field: major - for backward-incompatible changes - uint32_t major; - /// version field: minor - for backward-compatible feature updates - uint32_t minor; - /// version field: teeny - for minor bug-fixes and clean-up - uint32_t teeny; -} SnpeUdo_Version_t; - -typedef SnpeUdo_Version_t Udo_Version_t; - -/** - * @brief A struct returned from version query, contains the Library version and API version - * - */ -typedef struct -{ - /// Version of UDO library. Controlled by users - SnpeUdo_Version_t libVersion; - /// Version of SNPE UDO API used in compiling library. Determined by SNPE - SnpeUdo_Version_t apiVersion; -} SnpeUdo_LibVersion_t; - -/** - * @brief A struct returned from version query, contains the package version - * - */ -typedef struct -{ - /// Version of UDO API used in package. - Udo_Version_t apiVersion; -} Udo_PkgVersion_t; - -/** - * @brief A union to hold the value of a generic type. Allows defining a parameter struct - * in a generic way, with a "value" location that holds the data regardless of the type. - * - */ -typedef union -{ - /// value type: float - float floatValue; - /// value type: unsigned 32-bit integer - uint32_t uint32Value; - /// value type: signed 32-bit integer - int32_t int32Value; - /// value type: unsigned 16-bit integer - uint16_t uint16Value; - /// value type: signed 16-bit integer - int16_t int16Value; - /// value type: unsigned 8-bit integer - uint8_t uint8Value; - /// value type: signed 8-bit integer - int8_t int8Value; -} SnpeUdo_Value_t; - -typedef SnpeUdo_Value_t Udo_Value_t; - -/** - * @brief A struct which defines a scalar parameter : name, data type, and union of values - * - */ -typedef struct -{ - /// The parameter data type : float, int, etc. - SnpeUdo_DataType_t dataType; - /// a union of specified type which holds the data - SnpeUdo_Value_t dataValue; -} SnpeUdo_ScalarParam_t; - -typedef SnpeUdo_ScalarParam_t Udo_ScalarParam_t; - -/** - * @brief A struct which defines the quantization parameters in case of Tensorflow style quantization - * - */ -typedef struct -{ - /// minimum value of the quantization range of data - float minValue; - /// maximum value of the quantization range of data - float maxValue; -} SnpeUdo_TFQuantize_t; - -typedef SnpeUdo_TFQuantize_t Udo_TFQuantize_t; - -/** - * @brief A struct which defines the quantization type, and union of supported quantization structs - * - */ -typedef struct -{ - /// quantization type (only TF-style currently supported) - SnpeUdo_QuantizationType_t quantizeType; - union - { - /// TF-style min-max quantization ranges - SnpeUdo_TFQuantize_t TFParams; - }; -} SnpeUdo_QuantizeParams_t; - -typedef SnpeUdo_QuantizeParams_t Udo_QuantizeParams_t; - -/** - * @brief A struct which defines the datatype associated with a specified core-type - * This should be used to denote the datatypes for a single tensor info, depending - * on the intended execution core. - * - */ -typedef struct -{ - /// The IP Core - SnpeUdo_CoreType_t coreType; - /// The associated datatype for this coreType - SnpeUdo_DataType_t dataType; -} SnpeUdo_PerCoreDatatype_t; - -typedef SnpeUdo_PerCoreDatatype_t Udo_PerCoreDatatype_t; - -/** - * @brief A struct which defines a tensor parameter : name, data type, layout, quantization, more. - * Also holds a pointer to the tensor data. - * - */ -typedef struct -{ - /// The maximum allowable dimensions of the tensor. The memory held in - /// _tensorData_ is guaranteed to be large enough for this. - uint32_t* maxDimensions; - /// The current dimensions of the tensor. An operation may modify the current - /// dimensions of its output, to indicate cases where the output has been - /// "resized". - /// Note that for static parameters, the current and max dimensions must - /// match. - uint32_t* currDimensions; - /// Quantization params applicable to the tensor. Currently only supports - /// Tensorflow quantization style. - SnpeUdo_QuantizeParams_t quantizeParams; - /// Number of dimensions to the tensor: 3D, 4D, etc. - uint32_t tensorRank; - /// The parameter data type: float, int, etc. - SnpeUdo_DataType_t dataType; - /// The tensor layout type: NCHW, NHWC, etc. - SnpeUdo_TensorLayout_t layout; - /// Opaque pointer to tensor data. User may be required to re-interpret the pointer - /// based on core-specific definitions. - void* tensorData; -} SnpeUdo_TensorParam_t; - -typedef SnpeUdo_TensorParam_t Udo_TensorParam_t; - -/** - * @brief A struct which defines tensor information for activation tensors only - * - * It describes an activation tensor object using its name, the intended layout and the datatype - * it will take depending on the intended runtime core. The repeated field indicates that - * that the tensor info describes several input/output activation tensors, which all share the - * aforementioned properties. - */ -typedef struct -{ - /// The tensor name - SnpeUdo_String_t tensorName; - /// The tensor layout type: NCHW, NHWC, etc. - SnpeUdo_TensorLayout_t layout; - /// The per core datatype: {SNPE_UDO_DATATYPE, SNPE_UDO_CORE_TYPE} - SnpeUdo_PerCoreDatatype_t* perCoreDatatype; - /// A boolean field indicating that this tensorinfo will be repeated e.x for ops such as Concat or Split - bool repeated; - /// A boolean field indicating whether input is static or not. - bool isStatic; -} SnpeUdo_TensorInfo_t; - -typedef SnpeUdo_TensorInfo_t Udo_TensorInfo_t; - -/** - * @brief struct which defines a UDO parameter - a union of scalar, tensor and string parameters - * - */ -typedef struct -{ - /// Type is scalar or tensor - SnpeUdo_ParamType_t paramType; - /// The param name, for example : "offset", "activation_type" - SnpeUdo_String_t paramName; - union - { - /// scalar param value - SnpeUdo_ScalarParam_t scalarParam; - /// tensor param value - SnpeUdo_TensorParam_t tensorParam; - /// string param value - SnpeUdo_String_t stringParam; - }; -} SnpeUdo_Param_t; - -typedef SnpeUdo_Param_t Udo_Param_t; - -/** - * @brief A struct which defines Operation information which is specific for IP core (CPU, GPU, DSP ...) - * - */ -typedef struct -{ - /// The IP Core - SnpeUdo_CoreType_t udoCoreType; - /// Bitmask, defines supported internal calculation types (like FLOAT_32, etc) - /// Based on SnpeUdo_DataType - SnpeUdo_Bitmask_t operationCalculationTypes; -} SnpeUdo_OpCoreInfo_t; - -typedef SnpeUdo_OpCoreInfo_t Udo_OpCoreInfo_t; - -/** - * @brief A struct which defines the common and core-specific Operation information - * - */ -typedef struct -{ - /// Operation type - SnpeUdo_String_t operationType; - /// A bitmask describing which IP Cores (CPU, GPU, DSP ...) support this operation - /// Translated based on SnpeUdo_CoreType - SnpeUdo_Bitmask_t supportedByCores; - /// Number of static parameters defined by the op - uint32_t numOfStaticParams; - /// Array of static parameters. Can be scalar or tensor params - SnpeUdo_Param_t* staticParams; - /// Number of input tensors this op receives - uint32_t numOfInputs; - /// Array of input tensor names to this operation - SnpeUdo_String_t* inputNames; - /// Number of output tensors this op receives - uint32_t numOfOutputs; - /// Array of output tensor names to this operation - SnpeUdo_String_t* outputNames; - /// Number of cores that the op can execute on - uint32_t numOfCoreInfo; - /// Array of per-core information entries - SnpeUdo_OpCoreInfo_t* opPerCoreInfo; - /// Array of input tensor infos for this operation - SnpeUdo_TensorInfo_t* inputInfos; - /// Array of output tensor infos for this operation - SnpeUdo_TensorInfo_t* outputInfos; -} SnpeUdo_OperationInfo_t; - -typedef SnpeUdo_OperationInfo_t Udo_OperationInfo_t; - -/** - * @brief A struct which provides the implementation library info : type, name - * - */ -typedef struct -{ - /// Defines the IP Core that this implementation library is targeting - SnpeUdo_CoreType_t udoCoreType; - /// library name. will be looked at in the standard library path - SnpeUdo_String_t libraryName; -} SnpeUdo_LibraryInfo_t; - -typedef SnpeUdo_LibraryInfo_t Udo_LibraryInfo_t; - -/** - * @brief A struct returned by the registration library and contains information on the UDO package : - * name, operations, libraries, etc. - * - */ -typedef struct -{ - /// A string containing the package name - SnpeUdo_String_t packageName; - /// A bitmask describing supported IP cores (CPU, GPU, DSP ...) - /// Translated based on SnpeUdo_CoreType - SnpeUdo_Bitmask_t supportedCoreTypes; - /// The number of implementation libraries in the package - uint32_t numOfImplementationLib; - /// Array of implementation libraries names/types - SnpeUdo_LibraryInfo_t* implementationLib; - /// A string containing all operation types separated by space - SnpeUdo_String_t operationsString; - /// Number of supported operations - uint32_t numOfOperations; - /// Array of Operation info structs. Each entry describes one - /// Operation (name, params, inputs, outputs) - SnpeUdo_OperationInfo_t* operationsInfo; -} SnpeUdo_RegInfo_t; - -typedef SnpeUdo_RegInfo_t Udo_RegInfo_t; - -/** -* @brief A struct returned by the implementation library and contains information on the -* specific library: name, IP Core, operations, etc. -* -*/ -typedef struct -{ - /// Defines the IP Core that this implementation library is targeting - SnpeUdo_CoreType_t udoCoreType; - /// A string containing the package name - SnpeUdo_String_t packageName; - /// A string containing all operation types separated by space - SnpeUdo_String_t operationsString; - /// Number of supported operations - uint32_t numOfOperations; -} SnpeUdo_ImpInfo_t; - -typedef SnpeUdo_ImpInfo_t Udo_ImpInfo_t; - -/** - * @brief This struct defines an operation. It is used for validation - * or creation of an operation. - * In case of using it for creation, the static params which are tensors - * contain pointers to the real data (weights, for example), and input/output - * tensors also include pointers to the buffers used. - */ -typedef struct -{ - /// The IP Core that the operation is defined for - CPU, GPU, DSP... - SnpeUdo_CoreType_t udoCoreType; - /// Operation type - SnpeUdo_String_t operationType; - /// The number of static parameters provided in the staticParams array. - /// this number has to match the number provided by the UDO Registration library information - uint32_t numOfStaticParams; - /// Array of static parameters - SnpeUdo_Param_t* staticParams; - /// The number of input parameters provided in inputs array. - /// this number has to match the number provided by the UDO Registration library information - uint32_t numOfInputs; - /// Array of input tensors, providing layout, data type, sizes, etc - /// When used to create an operation, also contains the initial location of the data - SnpeUdo_TensorParam_t* inputs; - /// The number of output parameters provided in inputs array. - /// this number has to match the number provided by the UDO Registration library information - uint32_t numOfOutputs; - /// Array of output tensors, providing layout, data type, sizes, etc - /// When used to create an operation, also contains the initial location of the data - SnpeUdo_TensorParam_t* outputs; -} SnpeUdo_OpDefinition_t; - -typedef SnpeUdo_OpDefinition_t Udo_OpDefinition_t; - -/** @} */ /* end_addtogroup c_plus_plus_apis C++ */ - -#endif //SNPE_UDO_BASE_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SnpeUdo/UdoReg.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SnpeUdo/UdoReg.h deleted file mode 100644 index 2166be59..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SnpeUdo/UdoReg.h +++ /dev/null @@ -1,117 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2019-2020 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SNPE_UDO_REG_H -#define SNPE_UDO_REG_H - -#include "SnpeUdo/UdoShared.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * @brief Initialize the shared library's data structures. Calling any other - * library function before this one will result in an error being returned. - * - * @return Error code - */ -SnpeUdo_ErrorType_t -SnpeUdo_initRegLibrary(void); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_InitRegLibraryFunction_t)(void); - -/** - * @brief A function to query the API version of the UDO registration library. - * The function populates a SnpeUdo_LibVersion_t struct, which contains a SnpeUdo_Version_t - * struct for API version and library version. - * - * @param[in, out] version A pointer to struct which contains major, minor, teeny information for - * library and api versions. - * - * @return Error code - */ -SnpeUdo_ErrorType_t -SnpeUdo_getRegLibraryVersion(SnpeUdo_LibVersion_t** version); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_getRegLibraryVersion_t)(SnpeUdo_LibVersion_t** version); - -/** - * @brief Release the shared library's data structures, and invalidate any - * handles returned by the library. The behavior of any outstanding - * asynchronous calls made to this library when this function is called - * are undefined. All library functions (except SnpeUdo_InitRegLibrary) will - * return an error after this function has been successfully called. - * - * It should be possible to call SnpeUdo_InitRegLibrary after calling this - * function, and re-initialize the library. - * - * @return Error code - */ -SnpeUdo_ErrorType_t -SnpeUdo_terminateRegLibrary(void); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_TerminateRegLibraryFunction_t)(void); - - -/** - * @brief A function to query the info on the UDO set. - * The function populates a structure which contains information about - * the package and operations contained in it. - * - * @param[in, out] registrationInfo A struct which contains information on the set of UDOs - * - * @return Error code - * - */ -SnpeUdo_ErrorType_t -SnpeUdo_getRegInfo(SnpeUdo_RegInfo_t** registrationInfo); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_GetRegInfoFunction_t)(SnpeUdo_RegInfo_t** registrationInfo); - -/** - * @brief A function to validate that a set of params is supported by an operation - * The function receives an operation definition struct, and returns if this configuration is - * supported (e.g. if an operation can be created using this configuration) - * - * @param[in] opDefinition A struct of SnpeUdo_OpDefinition type, containing the information needed to - * validate that an operation can be created with this configuration. - * - * @return Error code, indicating is the operation can be created on this set or not. - * - */ -SnpeUdo_ErrorType_t -SnpeUdo_validateOperation(SnpeUdo_OpDefinition_t* opDefinition); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_ValidateOperationFunction_t)(SnpeUdo_OpDefinition_t* opDefinition); - -/** @} */ /* end_addtogroup c_plus_plus_apis C++ */ - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif //SNPE_UDO_REG_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SnpeUdo/UdoShared.h b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SnpeUdo/UdoShared.h deleted file mode 100644 index 816a8a74..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/SnpeUdo/UdoShared.h +++ /dev/null @@ -1,57 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2019-2021 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SNPE_UDO_SHARED_H -#define SNPE_UDO_SHARED_H - -#include "SnpeUdo/UdoBase.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * @brief A function to return the various versions as they relate to the UDO - * The function returns a struct containing the the following: - * libVersion: the version of the implementation library compiled for the UDO. Set by user - * apiVersion: the version of the UDO API used in compiling the implementation library. - * Set by SNPE - * - * @param[in, out] version A pointer to Version struct of type SnpeUdo_LibVersion_t - * - * @return Error code - * - */ -SnpeUdo_ErrorType_t -SnpeUdo_getVersion (SnpeUdo_LibVersion_t** version); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_GetVersionFunction_t) (SnpeUdo_LibVersion_t** version); - -typedef SnpeUdo_GetVersionFunction_t Udo_GetVersionFunction_t; - -#ifdef __cplusplus -} // extern "C" -#endif - -/** @} */ /* end_addtogroup c_plus_plus_apis C++ */ - -#endif // SNPE_UDO_SHARED_H diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/Wrapper.hpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/Wrapper.hpp deleted file mode 100644 index 5f908f15..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inc/zdl/Wrapper.hpp +++ /dev/null @@ -1,449 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#define SNPE_WRAPPER_TYPES - -#include -#include -#include -#include - -#include - -#include - - -#include "DlSystem/DlError.h" - -// Put type aliases in zdl::namespace -#define ALIAS_IN_ZDL_NAMESPACE(ns, type) namespace zdl{ namespace ns { using type = ::ns::type; }} - - -// Uncomment to print info from the Wrapper base class -//#define WRAPPER_DEBUG_PRINTS - - -#ifdef WRAPPER_DEBUG_PRINTS - -#ifdef _MSC_VER -#define WRAPPER_FUNCTION_NAME __FUNCTION__ -#define WRAPPER_TRACE() std::cout << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << std::endl -#define WRAPPER_ETRACE() std::cout << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << std::endl -#else -#define WRAPPER_FUNCTION_NAME __PRETTY_FUNCTION__ -#define WRAPPER_TRACE() std::cout << "\e[33m" << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << "\e[0m" << std::endl -#define WRAPPER_ETRACE() std::cout << "\e[31m" << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << "\e[0m" << std::endl -#endif - -#include -#else -#define WRAPPER_TRACE() do{}while(0) -#define WRAPPER_ETRACE() do{}while(0) -#endif - - -namespace WrapperDetail { - - -template -using GetterFuncType = MemberType(*)(HandleType); - -template -using SetterFuncType = Snpe_ErrorCode_t(*)(HandleType, MemberType); - - - -// Allow Wrappers to have members that require CAPI calls for access -template GetterFunc, - SetterFuncType SetterFunc -> -class GenericMemberReference{ - OwnerType& owner; -public: - - - ~GenericMemberReference() = default; - GenericMemberReference() = delete; - - GenericMemberReference(const GenericMemberReference&) = delete; - GenericMemberReference(GenericMemberReference&&) noexcept = default; - - GenericMemberReference(OwnerType& owner) - : owner{owner} - { } - explicit GenericMemberReference(OwnerType& owner, MemberType member) - : owner{owner} - { - operator=(member); - } - GenericMemberReference& operator=(MemberType member){ - SetterFunc(owner.handle(), member); - return *this; - } - - operator MemberType() const{ - return GetterFunc(owner.handle()); - } - - GenericMemberReference& - operator=(const GenericMemberReference& other){ - return operator=(other.operator MemberType()); - } - - MemberType operator()() const{ - return operator MemberType(); - } - -}; - -// Allow Wrappers to have members that require CAPI calls for access -template GetterFunc -> -class GenericConstMemberReference{ - - OwnerType& owner; - -public: - ~GenericConstMemberReference() = default; - GenericConstMemberReference() = delete; - - GenericConstMemberReference(const GenericConstMemberReference&) = delete; - GenericConstMemberReference(GenericConstMemberReference&&) noexcept = default; - - GenericConstMemberReference(OwnerType& owner) - : owner{owner} - { } - - operator MemberType() const{ - return GetterFunc(owner.handle()); - } - - - template::value,int>::Type=0> - operator const char*() const{ - thread_local std::string tlss; - tlss = operator MemberType(); - return tlss.c_str(); - } - - MemberType operator()() const{ - return operator MemberType(); - } - -}; - - - -// Allows returning references to literals through the CAPI's _Get and _Set functions -template -using GetterIndexedFuncType = MemberType(*)(HandleType, IndexType); - -template -using SetterIndexedFuncType = Snpe_ErrorCode_t(*)(HandleType, IndexType, MemberType); - -template GetterFunc, - SetterIndexedFuncType SetterFunc -> -class MemberIndexedReference{ - OwnerType& owner; - IndexType idx; - -public: - MemberIndexedReference(OwnerType& owner, IndexType idx) - : owner{owner}, - idx{idx} - { } - MemberIndexedReference(const MemberIndexedReference&) noexcept = default; - MemberIndexedReference(MemberIndexedReference&&) noexcept = default; - - MemberIndexedReference& operator=(const MemberIndexedReference&) noexcept = default; - MemberIndexedReference& operator=(MemberIndexedReference&&) noexcept = default; - - MemberIndexedReference operator=(MemberType member){ - SetterFunc(owner.handle(), idx, member); - return *this; - } - - operator MemberType() const{ - return GetterFunc(owner.handle(), idx); - } - -}; - - - -// Allow moving ownership of handles -template -struct HandleMover { - Handle handle; - bool isReference; -}; - -template -HandleMover moveHandle(Handle handle, bool isReference = false){ - return {handle, isReference}; -} - -// Virtual base class to allow for WrapperStorage to hold pointers to any Wrapper type -class WrapperBase{ -public: - virtual ~WrapperBase() = default; -}; - -// Storage type for Wrappers. Will have a set if the CAPI type is capable of creating reference handles -template -struct WrapperStorage{ - Handle handle; - bool isReference; - constexpr WrapperStorage(Handle handle = {}, bool isReference = false) noexcept - : handle{handle}, - isReference{isReference} - { } -}; - -template -struct WrapperStorage{ - Handle handle; - bool isReference; - mutable std::set> referencedObjects; - WrapperStorage(Handle handle = {}, bool isReference = false) noexcept - : handle{handle}, - isReference{isReference} - { } -}; - -// Allow a handle to be unbound from a Wrapper -struct HandleReleaser{ - template - static typename WrapperType::HandleType release(WrapperType& wrapper){ - auto toret = wrapper.m_Storage.handle; - wrapper.m_Storage.handle = {}; - return toret; - } -}; - -} // ns WrapperDetail - - - -// The base class for all Wrappers around the CAPI -// NOTE: This Wrapper class leverages the Curiously Recurring Template Pattern (CRTP) -template -class Wrapper : public WrapperDetail::WrapperBase{ - friend struct WrapperDetail::HandleReleaser; - // Allow certain types to access getHandle() and handle() - template - friend class Wrapper; - - template, - WrapperDetail::SetterIndexedFuncType> - friend class WrapperDetail::MemberIndexedReference; - - template> - friend class WrapperDetail::GenericConstMemberReference; - - template, WrapperDetail::SetterFuncType> - friend class WrapperDetail::GenericMemberReference; - - - -protected: - using HandleType = Handle; - using BaseType = Wrapper; - using DeleteFunctionType = Snpe_ErrorCode_t(*)(Handle); - - using StorageType = WrapperDetail::WrapperStorage; - - - template Getter> - static WrapperValueType CastingGetter(HandleType handle){ - return static_cast(Getter(handle)); - } - template Setter> - static Snpe_ErrorCode_t CastingSetter(HandleType handle, WrapperValueType value){ - return Setter(handle,static_cast(value)); - } - - - template - struct WrapperMemberReference{ - Derived& owner; - - WrapperMemberReference(Derived& owner) - : owner{owner} - { } - WrapperMemberReference(Derived& owner, const RlType& other) - : owner{owner} - { - operator=(other); - } - - WrapperMemberReference& operator=(const RlType& rl){ - Setter(getHandle(owner), getHandle(rl)); - return *this; - } - - operator RlType&() { - return *owner.template makeReference( Getter(getHandle(owner)) ); - } - operator RlType&() const { - return *owner.template makeReference( Getter(getHandle(owner)) ); - } - - RlType& operator()(){ - return operator RlType&(); - } - const RlType& operator()() const{ - return operator RlType&(); - } - }; - - // For Factory/Singleton types, we need a way for the deleter to do nothing - static Snpe_ErrorCode_t NoOpDeleter(Handle){ - return SNPE_SUCCESS; - } - - // Simplify calls to WrapperDetail::moveHandle. Can be removed, but will require updating all calls to moveHandle - template - static WrapperDetail::HandleMover moveHandle(H handle, bool isReference = false){ - return WrapperDetail::moveHandle(handle, isReference); - } - - - HandleType& handle() noexcept{ return m_Storage.handle; } - const HandleType& handle() const noexcept{ return m_Storage.handle; } - - bool isReference() const noexcept{ return m_Storage.isReference; } - - void Dtor(){ - if(!isReference() && !handle()){ - if(Derived::DeleteFunction != NoOpDeleter){ - WRAPPER_ETRACE(); - } - } - if(!isReference() && handle()){ - WRAPPER_TRACE(); -#ifdef WRAPPER_DEBUG_PRINTS - auto status = Derived::DeleteFunction(handle()); - if(status != SNPE_SUCCESS){ - WRAPPER_ETRACE(); - } -#else - Derived::DeleteFunction(handle()); -#endif - - handle() = nullptr; - } else { - WRAPPER_TRACE(); - } - } - -protected: - - // Only compile these if the class creates references. This will save memory and time - template::type=0> - void addReference(WrapperBase* wrapperBase) const{ // accesses mutable member - if(!wrapperBase){ - WRAPPER_ETRACE(); - } - m_Storage.referencedObjects.insert(std::unique_ptr(wrapperBase)); - } - - template::type=0> - T* makeReference(H referenceHandle) const{ - if(!referenceHandle){ - WRAPPER_ETRACE(); - return nullptr; - } - auto refObj = new T(moveHandle(referenceHandle, true)); - addReference(refObj); - return refObj; - } - - // This will be used to access another Wrapped type's handles once handle() is made protected - template - static OtherHandle getHandle(const Wrapper& otherObject){ - return otherObject.handle(); - } - - template - static OtherHandle getHandle(const Wrapper* otherObject){ - if(!otherObject) return {}; - return getHandle(*otherObject); - } - - template - static std::unique_ptr makeUnique(H handle){ - if(!handle) return {}; - return std::unique_ptr(new T(moveHandle(handle))); - } - - -public: - ~Wrapper(){ - Dtor(); - } -protected: - // Only derived types should have access to this - Wrapper(HandleType handle, bool isReference = false) - : m_Storage{handle, isReference} - { WRAPPER_TRACE(); } - -public: - // We should never have an empty wrapper - Wrapper() = delete; - - // Move semantics are essentially free for all wrapper types - Wrapper(Wrapper&& other) noexcept - : m_Storage{std::move(other.m_Storage)} - { - WRAPPER_TRACE(); - other.handle() = nullptr; - } - Wrapper(const Wrapper&) = delete; - - - Wrapper& operator=(Wrapper&& other) noexcept{ - WRAPPER_TRACE(); - if(this != &other){ - std::swap(m_Storage, other.m_Storage); - other.Dtor(); - } - return *this; - } - Wrapper& operator=(const Wrapper&) = delete; - - - // Allow a CAPI handle to be taken over by a Wrapper - Wrapper(WrapperDetail::HandleMover handleMover) noexcept - : Wrapper(handleMover.handle, handleMover.isReference) - { WRAPPER_TRACE(); } - -protected: - // Simplify Derived's move assignment operators - Derived& moveAssign(Derived&& other) noexcept{ WRAPPER_TRACE(); - return static_cast(operator=(std::move(other))); - } - - -private: - StorageType m_Storage; - -}; diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inference.cpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inference.cpp deleted file mode 100644 index 46a5916d..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inference.cpp +++ /dev/null @@ -1,193 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "android/log.h" - -#include "hpp/CheckRuntime.hpp" -#include "hpp/SetBuilderOptions.hpp" -#include "hpp/Util.hpp" -#include "LoadContainer.hpp" -#include "CreateUserBuffer.hpp" -#include "LoadInputTensor.hpp" - -#include -#include -#include - -std::unique_ptr snpe; - -std::mutex mtx; -static zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::Runtime_t::CPU; -static zdl::DlSystem::RuntimeList runtimeList; -bool useUserSuppliedBuffers = true; -bool useIntBuffer = false; - -bool execStatus_thread = false; -zdl::DlSystem::UserBufferMap inputMap, outputMap; -std::vector > snpeUserBackedInputBuffers, snpeUserBackedOutputBuffers; -std::unordered_map > applicationOutputBuffers; -std::unordered_map > applicationInputBuffers; -int bitWidth = 32; - - -#include -#include -#include - -std::string build_network(const uint8_t * dlc_buffer, const size_t dlc_size, const char runtime_arg) -{ - std::string outputLogger; - bool usingInitCaching = false; //shubham: TODO check with true - - std::unique_ptr container_snpe = nullptr ; - - container_snpe = loadContainerFromBuffer(dlc_buffer, dlc_size); - - if (container_snpe == nullptr) { - LOGE("Error while opening the container file."); - return "Error while opening the container file.\n"; - } - - runtimeList.clear(); - LOGI("runtime arg %c",runtime_arg); - zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::Runtime_t::CPU; - if (runtime_arg == 'D'){ - runtime = zdl::DlSystem::Runtime_t::DSP; - LOGI("Added DSP"); - } - else if (runtime_arg == 'G') - { - runtime = zdl::DlSystem::Runtime_t::GPU_FLOAT32_16_HYBRID; //can be written as GPU - LOGI("Added GPU"); - } - - if(runtime != zdl::DlSystem::Runtime_t::UNSET) - { - bool ret = runtimeList.add(checkRuntime(runtime)); - if(ret == false){ - LOGE("Cannot set runtime"); - return outputLogger + "\nCannot set runtime"; - } - } else { - return outputLogger + "\nCannot set runtime"; - } - - - mtx.lock(); - snpe = setBuilderOptions(container_snpe, runtime, runtimeList, useUserSuppliedBuffers, usingInitCaching); - mtx.unlock(); - - if (snpe == nullptr) { - LOGE("SNPE Prepare failed: Builder option failed"); - outputLogger += "Model Prepare failed"; - return outputLogger + "SNPE Prepare failed"; - } - - outputLogger += "\nModel Network Prepare success !!!\n"; - - //Creating Buffer - createInputBufferMap(inputMap, applicationInputBuffers, snpeUserBackedInputBuffers, snpe, useIntBuffer, bitWidth); - createOutputBufferMap(outputMap, applicationOutputBuffers, snpeUserBackedOutputBuffers, snpe, useIntBuffer, bitWidth); - return outputLogger; -} - -void executeonthread() -{ - LOGI("shubham thread is running"); - if(snpe== nullptr) - LOGE("SNPE IS NULL"); - execStatus_thread = snpe->execute(inputMap, outputMap); -} - -bool executeDLC(cv::Mat &inputimg, cv::Mat &outputimg, float &milli_time, Model *modelobj) { - - LOGI("execute_DLC"); - ATrace_beginSection("preprocessing"); - - struct timeval start_time, end_time; - float seconds, useconds; - - mtx.lock(); - assert(snpe != nullptr); - - if(!loadInputUserBuffer(applicationInputBuffers, snpe, inputimg, inputMap, bitWidth, modelobj)) - { - LOGE("Failed to load Input UserBuffer"); - mtx.unlock(); - return false; - } - - ATrace_endSection(); - gettimeofday(&start_time, NULL); - ATrace_beginSection("inference time"); - - std::thread t1(executeonthread); - LOGI("shubham waiting"); - t1.join(); - bool execStatus = execStatus_thread; -// bool execStatus = snpe->execute(inputMap, outputMap); - ATrace_endSection(); - ATrace_beginSection("postprocessing time"); - gettimeofday(&end_time, NULL); - seconds = end_time.tv_sec - start_time.tv_sec; //seconds - useconds = end_time.tv_usec - start_time.tv_usec; //milliseconds - milli_time = ((seconds) * 1000 + useconds/1000.0); - //LOGI("Inference time %f ms", milli_time); - - if(execStatus== true){ - LOGI("Exec status is true"); - } - else{ - LOGE("Exec status is false"); - mtx.unlock(); - return false; - } - - const auto& outputNamesOpt = snpe->getOutputTensorNames(); - const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; - - const char* name = outputNames.at(0); - - LOGI("outbut buffers: %s", name); - std::vector databuffer = applicationOutputBuffers.at(name); - std::vector dims; - auto bufferAttributesOpt = snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) throw std::runtime_error(std::string("Error obtaining attributes for input tensor ") + name); - - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - int num_dims = bufferShape.rank(); - for(int i=0;ipostprocess(outputimg); - - ATrace_endSection(); - mtx.unlock(); - return true; -} - diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inference_helper.cpp b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inference_helper.cpp deleted file mode 100644 index 0f8527d1..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/cpp/inference_helper.cpp +++ /dev/null @@ -1,290 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include -#include -#include -#include -#include "android/log.h" - -#include "zdl/SNPE/SNPE.hpp" -#include "zdl/SNPE/SNPEFactory.hpp" -#include "zdl/DlSystem/DlVersion.hpp" -#include "zdl/DlSystem/DlEnums.hpp" -#include "zdl/DlSystem/String.hpp" -#include "zdl/DlContainer/IDlContainer.hpp" -#include "zdl/SNPE/SNPEBuilder.hpp" -#include "zdl/DlSystem/ITensor.hpp" -#include "zdl/DlSystem/StringList.hpp" -#include "zdl/DlSystem/TensorMap.hpp" -#include "zdl/DlSystem/TensorShape.hpp" -#include "DlSystem/ITensorFactory.hpp" - -#include "hpp/LoadInputTensor.hpp" -#include "hpp/Util.hpp" -#include "inference.h" - -bool SetAdspLibraryPath(std::string nativeLibPath) { - nativeLibPath += ";/data/local/tmp/mv_dlc;/vendor/lib/rfsa/adsp;/vendor/dsp/cdsp;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp"; - - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "ADSP Lib Path = %s \n", nativeLibPath.c_str()); - std::cout << "ADSP Lib Path = " << nativeLibPath << std::endl; - - return setenv("ADSP_LIBRARY_PATH", nativeLibPath.c_str(), 1 /*override*/) == 0; -} - - -std::unique_ptr loadContainerFromBuffer(const uint8_t * buffer, const size_t size) -{ - std::unique_ptr container; - container = zdl::DlContainer::IDlContainer::open(buffer, size); - return container; -} - - -zdl::DlSystem::Runtime_t checkRuntime(zdl::DlSystem::Runtime_t runtime) -{ - static zdl::DlSystem::Version_t Version = zdl::SNPE::SNPEFactory::getLibraryVersion(); - - LOGI("SNPE Version = %s", Version.asString().c_str()); //Print Version number - - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(runtime)) { - LOGE("Selected runtime not present. Falling back to GPU."); - runtime = zdl::DlSystem::Runtime_t::GPU; - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(runtime)){ - LOGE("Selected runtime not present. Falling back to CPU."); - runtime = zdl::DlSystem::Runtime_t::CPU; - } - } - - return runtime; -} - -std::unique_ptr setBuilderOptions(std::unique_ptr & container, - zdl::DlSystem::Runtime_t runtime, - zdl::DlSystem::RuntimeList runtimeList, - bool useUserSuppliedBuffers, - bool useCaching) -{ - std::unique_ptr snpe; - zdl::SNPE::SNPEBuilder snpeBuilder(container.get()); - - if(runtimeList.empty()) - { - runtimeList.add(runtime); - } - - std::string platformOptionStr = "useAdaptivePD:ON"; -// if (isSignedStatus == UNSIGNED_PD) { -// //use unsignedPD feature for untrusted app. -// platformOptionStr += "unsignedPD:ON"; -// } - zdl::DlSystem::PlatformConfig platformConfig; - bool setSuccess = platformConfig.setPlatformOptions(platformOptionStr); - if (!setSuccess) - LOGE("=========> failed to set platformconfig: %s", platformOptionStr.c_str()); - else - LOGI("=========> platformconfig set: %s", platformOptionStr.c_str()); - - bool isValid = platformConfig.isOptionsValid(); - if (!isValid) - LOGE("=========> platformconfig option is invalid"); - else - LOGI("=========> platformconfig option: valid"); - - - zdl::DlSystem::StringList stringruntime = runtimeList.getRuntimeListNames(); - for (const char *name : stringruntime) - LOGI("runtime sh %s", name); - - snpe = snpeBuilder.setOutputLayers({}) - .setPerformanceProfile(zdl::DlSystem::PerformanceProfile_t::BURST) - .setExecutionPriorityHint( - zdl::DlSystem::ExecutionPriorityHint_t::HIGH) - .setRuntimeProcessorOrder(runtimeList) - .setUseUserSuppliedBuffers(useUserSuppliedBuffers) - .setPlatformConfig(platformConfig) - .setInitCacheMode(useCaching) - .build(); - - return snpe; -} - -// ==============================User Buffer func=================================== // -// ================================================================================= // - - -//CreateUserbuffer INPUT/OUTPUT -void createUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const char * name, - const bool isTfNBuffer, - int bitWidth) -{ - - auto bufferAttributesOpt = snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) throw std::runtime_error(std::string("Error obtaining attributes for input tensor ") + name); - - // calculate the size of buffer required by the input tensor - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - - size_t bufferElementSize = 0; - if (isTfNBuffer) { - bufferElementSize = bitWidth / 8; - } - else { - bufferElementSize = sizeof(float); - } - - // Calculate the stride based on buffer strides. - // Note: Strides = Number of bytes to advance to the next element in each dimension. - // For example, if a float tensor of dimension 2x4x3 is tightly packed in a buffer of 96 bytes, then the strides would be (48,12,4) - // Note: Buffer stride is usually known and does not need to be calculated. - -// 1x128x128x3 -// [196608,1536,12,4] - int num_dims = bufferShape.rank(); //bufferShape rank is generally 1 more than expected, as it add 1 for batchSize, so 320x320x3 will look like 1x320x320x3 - LOGI("num_dims %d",num_dims); - std::vector strides(num_dims); - - //stride [196608 1536 12 4] - //buffershape [ 1 128 128 3] - //stride 4*3*128 - strides[strides.size() - 1] = bufferElementSize; - size_t stride = strides[strides.size() - 1]; - for (size_t i = num_dims - 1; i > 0; i--) { - stride *= bufferShape[i]; - strides[i - 1] = stride; - // LOGI("\nstrides[%d]: %d",i-1,stride); - // LOGI("\nbuffershape[%d]: %d",i,bufferShape[i]); - } - - size_t bufSize=bufferElementSize; - for(int i=0;i userBufferEncoding; - if (isTfNBuffer) - userBufferEncoding = std::unique_ptr( - new zdl::DlSystem::UserBufferEncodingTfN(0, 1.0, bitWidth)); - else - userBufferEncoding = std::unique_ptr( - new zdl::DlSystem::UserBufferEncodingFloat()); - - // create user-backed storage to load input data onto it - applicationBuffers.emplace(name, std::vector(bufSize)); - - // create SNPE user buffer from the user-backed buffer - zdl::DlSystem::IUserBufferFactory &ubFactory = zdl::SNPE::SNPEFactory::getUserBufferFactory(); - snpeUserBackedBuffers.push_back( - ubFactory.createUserBuffer(applicationBuffers.at(name).data(), - bufSize, - strides, - userBufferEncoding.get())); - if (snpeUserBackedBuffers.back() == nullptr) - throw std::runtime_error(std::string("Error while creating user buffer.")); - - // add the user-backed buffer to the inputMap, which is later on fed to the network for execution - userBufferMap.add(name, snpeUserBackedBuffers.back().get()); - -} - -/* - Cretae OutPut Buffer Map - */ -void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - bool isTfNBuffer, - int bitWidth) -{ - //LOGI("Creating Output Buffer"); - const auto& outputNamesOpt = snpe->getOutputTensorNames(); - if (!outputNamesOpt) throw std::runtime_error("Error obtaining output tensor names"); - const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; - - // create SNPE user buffers for each application storage buffer - for (const char *name : outputNames) { - LOGI("Creating output buffer %s", name); - createUserBuffer(outputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name, isTfNBuffer, bitWidth); - } -} -/* - * Create Input Buffer Map - */ -void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - bool isTfNBuffer, - int bitWidth) { - //LOGI("Creating Input Buffer"); - const auto &inputNamesOpt = snpe->getInputTensorNames(); - if (!inputNamesOpt) throw std::runtime_error("Error obtaining input tensor names"); - const zdl::DlSystem::StringList &inputNames = *inputNamesOpt; - assert(inputNames.size() > 0); - - // create SNPE user buffers for each application storage buffer - for (const char *name: inputNames) { - LOGI("Creating Input Buffer = %s", name); - createUserBuffer(inputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name, - isTfNBuffer, bitWidth); - } -} - -//Preprocessing and loading in application Input Buffer -bool loadInputUserBuffer(std::unordered_map>& applicationBuffers, - std::unique_ptr& snpe, - cv::Mat &img, - zdl::DlSystem::UserBufferMap& inputMap, - int bitWidth, Model *modelobj) { - - // get input tensor names of the network that need to be populated - const auto &inputNamesOpt = snpe->getInputTensorNames(); - if (!inputNamesOpt) throw std::runtime_error("Error obtaining input tensor names"); - const zdl::DlSystem::StringList &inputNames = *inputNamesOpt; - assert(inputNames.size() > 0); - - if (inputNames.size()) LOGI("Preprocessing and loading in application Input Buffer"); - - - for (size_t j = 0; j < inputNames.size(); j++) { - const char *name = inputNames.at(j); - LOGI("Filling %s buffer ", name); - - if(bitWidth == 8 || bitWidth == 16) { - LOGE("bitwidth 8 and 16 are NOT DEFINED"); - return false; - } else { - - std::vector dims; - auto bufferAttributesOpt = snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) throw std::runtime_error(std::string("Error obtaining attributes for input tensor ") + name); - - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - int num_dims = bufferShape.rank(); - for(int i=0;ipreprocess(applicationBuffers.at(name), img, dims); //functions loads data in applicationBuffer - } - } - return true; -} diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/ic_launcher-playstore.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/ic_launcher-playstore.png deleted file mode 100644 index 3ac7521b..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/ic_launcher-playstore.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/java/com/qcom/aistack_superres/SNPEActivity.java b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/java/com/qcom/aistack_superres/SNPEActivity.java deleted file mode 100644 index 2771081b..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/java/com/qcom/aistack_superres/SNPEActivity.java +++ /dev/null @@ -1,328 +0,0 @@ -// -*- mode: java -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -package com.qcom.aistack_superres; - -import android.graphics.Bitmap; -import android.graphics.BitmapFactory; -import android.os.Bundle; -import android.view.MotionEvent; -import android.view.View; -import android.view.WindowManager; -import android.widget.AdapterView; -import android.widget.ArrayAdapter; -import android.widget.ImageView; -import android.widget.ProgressBar; -import android.widget.RadioButton; -import android.widget.RadioGroup; -import android.widget.Spinner; -import android.widget.TextView; -import android.widget.Toast; - -import androidx.appcompat.app.AppCompatActivity; - -import org.opencv.android.OpenCVLoader; - -import java.io.IOException; -import java.io.InputStream; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -public class SNPEActivity extends AppCompatActivity { - - static { - System.loadLibrary("ImageSuperResolution"); - OpenCVLoader.initDebug(); - } - - SNPEHelper mSnpeHelper; - Boolean mNetworkLoaded; - float infer_time=0.0f; - public static InputStream originalFile = null; - - //creating objects for UI element used in layout files (activity_snpe.xml) - TextView txt_stat, tx_pr, tx_out, tx_sug; - ImageView imageView, imageView2; - RadioGroup radioGroup; - Bitmap bmps = null; - Bitmap outbmps = null; - Spinner inputImageSpin; - Spinner modelspin; - String[] options = {"No Selection","Sample1.jpg","Sample2.jpg"}; //Image filenames on which model inference is made - String[] modeloptions = { "No Selection", "SESR", "ESRGAN", "XLSR", "quickSR_large", "quickSR_medium", "quickSR_small"}; - String[] modeldlcname = { "None", "sesr_quant_128_4.dlc", "esrgan_quant_128_4.dlc", "xlsr_quant_128_4.dlc", "quicksrnet_large_quant_128_4.dlc", "quicksrnet_medium_quant_128_4.dlc","quicksrnet_small_quant_128_4.dlc"}; - protected void executeRadioButton(int checkedId) { - ProgressBar progressBar; - progressBar = findViewById(R.id.indeterminateBar); - ExecutorService service = Executors.newSingleThreadExecutor(); - progressBar.setVisibility(View.VISIBLE); - getWindow().setFlags(WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE, - WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE); - service.execute(new Runnable() { - @Override - public void run() { - try { - boolean status = false; - String timestr = null; - switch (checkedId) { - case R.id.rb1: - // set text for your textview here - System.out.println("CPU instance running"); - - status = process(bmps,'C',modeldlcname[modelspin.getSelectedItemPosition()]); - timestr = "CPU inference time : " + infer_time + "milli sec"; - - break; - case R.id.rb2: - // set text for your textview here - System.out.println("GPU instance running"); - - status = process(bmps,'G',modeldlcname[modelspin.getSelectedItemPosition()]); - timestr = "GPU inference time : " + infer_time + "milli sec"; - - break; - case R.id.rb3: - System.out.println("DSP instance running"); - - status = process(bmps,'D',modeldlcname[modelspin.getSelectedItemPosition()]); - timestr = "DSP Inference time : " + infer_time + "milli sec"; - break; - default: - System.out.println("Do Nothing"); - break; - } - boolean final_status = status; - final String final_timestr = timestr; - runOnUiThread(new Runnable() { - @Override - public void run() { - txt_stat.setText(final_timestr); - progressBar.setVisibility(View.INVISIBLE); - getWindow().clearFlags(WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE); - if (final_status == true) { - imageView2.setImageBitmap(outbmps); - imageView2.setVisibility(View.VISIBLE); - txt_stat.setVisibility(View.VISIBLE); - tx_pr.setVisibility(View.INVISIBLE); - tx_out.setVisibility(View.VISIBLE); - tx_sug.setVisibility(View.VISIBLE); - } - } - }); - } - catch(Exception e) - { - getWindow().clearFlags(WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE); - e.printStackTrace(); - } - } - }); - } - @Override - protected void onCreate(Bundle savedInstanceState) { - super.onCreate(savedInstanceState); - setContentView(R.layout.activity_snpe); - txt_stat = findViewById(R.id.textView4); - imageView = findViewById(R.id.im1); - imageView2 = findViewById(R.id.im2); - radioGroup = findViewById(R.id.rg1); - inputImageSpin = findViewById((R.id.spinner)); - modelspin = findViewById((R.id.spinner7)); - tx_pr = findViewById(R.id.textView); - tx_out = findViewById(R.id.textView2); - tx_sug = findViewById(R.id.textView_suggest); - imageView2.setVisibility(View.INVISIBLE); - tx_out.setVisibility(View.INVISIBLE); - tx_sug.setVisibility(View.INVISIBLE); - - - imageView2.setOnTouchListener((view, motionEvent) -> { - switch (motionEvent.getAction()) { - case MotionEvent.ACTION_DOWN: { - imageView2.setVisibility(view.INVISIBLE); - System.out.println("MotionEvent.ACTION_DOWN"); - tx_out.setVisibility(view.INVISIBLE); - tx_pr.setVisibility(view.VISIBLE); - break; - } - case MotionEvent.ACTION_UP: { - imageView2.setVisibility(view.VISIBLE); - System.out.println("MotionEvent.ACTION_UP"); - tx_out.setVisibility(view.VISIBLE); - tx_pr.setVisibility(view.INVISIBLE); - break; - } - } - return false; - }); - - ArrayAdapter ad = new ArrayAdapter(this, android.R.layout.simple_spinner_item, options); - ad.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); - inputImageSpin.setAdapter(ad); - - ArrayAdapter ad7 = new ArrayAdapter(this, android.R.layout.simple_spinner_item, modeloptions); - ad7.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); - modelspin.setAdapter(ad7); - - // Listener to check the change in HW accelerator input in APP UI - radioGroup.setOnCheckedChangeListener(new RadioGroup.OnCheckedChangeListener() { - @Override - public void onCheckedChanged(RadioGroup group, int checkedId) { - if (!inputImageSpin.getSelectedItem().toString().equals("No Selection") && !modelspin.getSelectedItem().toString().equals("No Selection")){ - executeRadioButton(checkedId); - } - else if (checkedId!=-1 && inputImageSpin.getSelectedItem().toString().equals("No Selection") && modelspin.getSelectedItem().toString().equals("No Selection")){ - Toast.makeText(getApplicationContext(), "Please select model and image", Toast.LENGTH_SHORT).show(); - } - else if (checkedId!=-1 && inputImageSpin.getSelectedItem().toString().equals("No Selection")) - { - Toast.makeText(getApplicationContext(), "Please select image to model ", Toast.LENGTH_SHORT).show(); - } - else if(checkedId!=-1 && modelspin.getSelectedItem().toString().equals("No Selection")) - { - Toast.makeText(getApplicationContext(), "Please select appropriate model ", Toast.LENGTH_SHORT).show(); - } - } - }); - - modelspin.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() { - @Override - public void onItemSelected(AdapterView parent, View view, int position, long id) { - - // loading picture from assets... - if (!parent.getItemAtPosition(position).equals("No Selection") && !inputImageSpin.getSelectedItem().toString().equals("No Selection")) {//if no selection of image - txt_stat.setText("Stats"); - try { - originalFile = getAssets().open(inputImageSpin.getSelectedItem().toString()); - } catch (IOException e) { - e.printStackTrace(); - } - - // Convert input image to Bitmap - bmps = BitmapFactory.decodeStream(originalFile); - try { - // Set the input image in UI view - imageView.setImageBitmap(bmps); - System.out.println("modelspin: INPUT wxh:"+bmps.getWidth()+"-----"+bmps.getHeight()); - } catch (Exception e) { - e.printStackTrace(); - } - int checkedID_RB = radioGroup.getCheckedRadioButtonId(); - if (originalFile!=null && bmps!=null && checkedID_RB !=-1){ - executeRadioButton(checkedID_RB); - } - } - else if (!inputImageSpin.getSelectedItem().toString().equals("No Selection")) { - - try { - originalFile = getAssets().open(inputImageSpin.getSelectedItem().toString()); - // Set the input image in UI view - imageView.setImageBitmap(BitmapFactory.decodeStream(originalFile)); - imageView2.setImageResource(R.drawable.ic_launcher_background); - imageView2.setVisibility(view.INVISIBLE); - - } catch (Exception e) { - e.printStackTrace(); - } - - } - else{ - originalFile=null; - bmps=null; - imageView.setImageResource(R.drawable.ic_launcher_background); - imageView2.setImageResource(R.drawable.ic_launcher_background); - imageView2.setVisibility(view.INVISIBLE); - txt_stat.setText("Stats"); - radioGroup.clearCheck(); - } - } - @Override - public void onNothingSelected(AdapterView parent) { - System.out.println("Nothing"); - } - }); - - inputImageSpin.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() { - @Override - public void onItemSelected(AdapterView parent, View view, int position, long id) { - - // loading picture from assets... - if (!parent.getItemAtPosition(position).equals("No Selection") && !modelspin.getSelectedItem().toString().equals("No Selection")) {//if no selection of image - txt_stat.setText("Stats"); - try { - // loading picture from assets... - originalFile = getAssets().open((String) parent.getItemAtPosition(position)); - } catch (IOException e) { - e.printStackTrace(); - } - - // Convert input image to Bitmap - bmps = BitmapFactory.decodeStream(originalFile); - try { - // Set the input image in UI view - imageView.setImageBitmap(bmps); - System.out.println("INPUT wxh: "+bmps.getWidth()+"-----"+bmps.getHeight()); - } catch (Exception e) { - e.printStackTrace(); - } - int checkedID_RB = radioGroup.getCheckedRadioButtonId(); - if (originalFile!=null && bmps!=null && checkedID_RB !=-1){ - executeRadioButton(checkedID_RB); - } - } - //if only input image is selected - else if (!inputImageSpin.getSelectedItem().toString().equals("No Selection")) { - try { - originalFile = getAssets().open(inputImageSpin.getSelectedItem().toString()); - imageView.setImageBitmap(BitmapFactory.decodeStream(originalFile)); - imageView2.setImageResource(R.drawable.ic_launcher_background); - imageView2.setVisibility(view.INVISIBLE); - } catch (Exception e) { - e.printStackTrace(); - } - } - else{ - originalFile=null; - bmps=null; - imageView.setImageResource(R.drawable.ic_launcher_background); - imageView2.setImageResource(R.drawable.ic_launcher_background); - imageView2.setVisibility(view.INVISIBLE); - txt_stat.setText("Stats"); - radioGroup.clearCheck(); - } - } - @Override - public void onNothingSelected(AdapterView parent) { - System.out.println("Nothing"); - } - }); - } - - //Function to load model and get inference from it - public boolean process(Bitmap bmps, char runtime_var, String dlc_name) { - - mSnpeHelper = new SNPEHelper(getApplication()); - - mNetworkLoaded = mSnpeHelper.loadingMODELS(runtime_var, dlc_name); - - if (mNetworkLoaded == true) - { - outbmps = mSnpeHelper.snpeInference(bmps); - infer_time = mSnpeHelper.getInfer_time(); - } - - if (outbmps == null) - { - System.out.println("outbmps is null"); - return false; - } - return true; - } -} - diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/java/com/qcom/aistack_superres/SNPEHelper.java b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/java/com/qcom/aistack_superres/SNPEHelper.java deleted file mode 100644 index 7d20c1cc..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/java/com/qcom/aistack_superres/SNPEHelper.java +++ /dev/null @@ -1,92 +0,0 @@ -// -*- mode: java -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -package com.qcom.aistack_superres; - -import static android.graphics.Color.rgb; - -import android.app.Application; -import android.content.res.AssetManager; -import android.graphics.Bitmap; - -import org.opencv.android.Utils; -import org.opencv.core.Mat; - -public class SNPEHelper { - private final Application mApplication; - private AssetManager assetManager; - - private float infer_time=0; - - // Constructor - public SNPEHelper(Application application) { - mApplication = application; - } - public float getInfer_time() - {return infer_time;} - - //Native functions - public native String queryRuntimes(String a); - public native String initSNPE(AssetManager assetManager, char a, String dlc_name); - public native float inferSNPE(long inputmataddress, long outputmataddress); - - /** - * This method loads ML models on selected runtime - */ - public boolean loadingMODELS(char runtime_var, String dlc_name) { - - assetManager = mApplication.getAssets(); - String nativeDirPath = mApplication.getApplicationInfo().nativeLibraryDir; - String res_query = queryRuntimes(nativeDirPath); - System.out.println(res_query); - String init_str = initSNPE(assetManager, runtime_var, dlc_name); - System.out.println("RESULT:"+init_str); - - int success_count = init_str.split("success", -1).length -1; - - if(success_count==1) - { - System.out.println("Model built successfully"); - return true; - } - - return false; - } - - /* - This method makes inference on bitmap. - */ - public Bitmap snpeInference(Bitmap modelInputBitmap) { - - try{ - - Mat inputMat = new Mat(); - Utils.bitmapToMat(modelInputBitmap, inputMat); - - Mat outputMat = new Mat(); - - infer_time = inferSNPE(inputMat.getNativeObjAddr(), outputMat.getNativeObjAddr()); - - - if(infer_time==0.0) - System.out.println("ERROR"); - else - { - Bitmap outputBitmap = Bitmap.createBitmap(outputMat.cols(), outputMat.rows(), Bitmap.Config.ARGB_8888); - Utils.matToBitmap(outputMat,outputBitmap); - return outputBitmap; - } - }catch (Exception e) { - e.printStackTrace(); - } - return null; - } - - -} \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/jniLibs/arm64-v8a/ReadMe.txt b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/jniLibs/arm64-v8a/ReadMe.txt deleted file mode 100644 index b1b7342e..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/jniLibs/arm64-v8a/ReadMe.txt +++ /dev/null @@ -1,2 +0,0 @@ -User needs to place Qualcomm Neural Processing SDK files here. -Please refer to resolveDependencies.sh \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/drawable/ic_launcher_background.xml b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/drawable/ic_launcher_background.xml deleted file mode 100644 index a4f78de5..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/drawable/ic_launcher_background.xml +++ /dev/null @@ -1,170 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/layout/activity_snpe.xml b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/layout/activity_snpe.xml deleted file mode 100644 index 2e5ea83b..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/layout/activity_snpe.xml +++ /dev/null @@ -1,163 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-anydpi-v26/ic_launcher.xml b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-anydpi-v26/ic_launcher.xml deleted file mode 100644 index 67820c56..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-anydpi-v26/ic_launcher.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml deleted file mode 100644 index 67820c56..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-hdpi/ic_launcher.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-hdpi/ic_launcher.png deleted file mode 100644 index 13b569e0..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-hdpi/ic_launcher.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-hdpi/ic_launcher_foreground.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-hdpi/ic_launcher_foreground.png deleted file mode 100644 index 0598d7fb..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-hdpi/ic_launcher_foreground.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-hdpi/ic_launcher_round.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-hdpi/ic_launcher_round.png deleted file mode 100644 index f2e19f15..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-hdpi/ic_launcher_round.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-mdpi/ic_launcher.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-mdpi/ic_launcher.png deleted file mode 100644 index 87c49946..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-mdpi/ic_launcher.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-mdpi/ic_launcher_foreground.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-mdpi/ic_launcher_foreground.png deleted file mode 100644 index 6bedb4e0..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-mdpi/ic_launcher_foreground.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-mdpi/ic_launcher_round.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-mdpi/ic_launcher_round.png deleted file mode 100644 index 819346e2..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-mdpi/ic_launcher_round.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xhdpi/ic_launcher.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xhdpi/ic_launcher.png deleted file mode 100644 index 2ef975c2..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xhdpi/ic_launcher.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xhdpi/ic_launcher_foreground.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xhdpi/ic_launcher_foreground.png deleted file mode 100644 index 42cea5a1..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xhdpi/ic_launcher_foreground.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xhdpi/ic_launcher_round.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xhdpi/ic_launcher_round.png deleted file mode 100644 index 283e4b57..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xhdpi/ic_launcher_round.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxhdpi/ic_launcher.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxhdpi/ic_launcher.png deleted file mode 100644 index c77f3938..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxhdpi/ic_launcher.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxhdpi/ic_launcher_foreground.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxhdpi/ic_launcher_foreground.png deleted file mode 100644 index a31d0b89..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxhdpi/ic_launcher_foreground.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxhdpi/ic_launcher_round.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxhdpi/ic_launcher_round.png deleted file mode 100644 index a3c9baa8..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxhdpi/ic_launcher_round.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxxhdpi/ic_launcher.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxxhdpi/ic_launcher.png deleted file mode 100644 index 63fefe7c..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxxhdpi/ic_launcher.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxxhdpi/ic_launcher_foreground.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxxhdpi/ic_launcher_foreground.png deleted file mode 100644 index be33ca91..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxxhdpi/ic_launcher_foreground.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png deleted file mode 100644 index 5467f894..00000000 Binary files a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png and /dev/null differ diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values-night/themes.xml b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values-night/themes.xml deleted file mode 100644 index cc4fe14e..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values-night/themes.xml +++ /dev/null @@ -1,17 +0,0 @@ - - - - - \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/colors.xml b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/colors.xml deleted file mode 100644 index 742b3a6a..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/colors.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - #FFBB86FC - #FF6200EE - #FF3700B3 - #FF03DAC5 - #FF018786 - #FF000000 - #FFFFFFFF - \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/ic_launcher_background.xml b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/ic_launcher_background.xml deleted file mode 100644 index cfa9be08..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/ic_launcher_background.xml +++ /dev/null @@ -1,4 +0,0 @@ - - - #FFFFFF - \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/strings.xml b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/strings.xml deleted file mode 100644 index 1969d10c..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/strings.xml +++ /dev/null @@ -1,4 +0,0 @@ - - - Image Super Resolution - \ No newline at end of file diff --git a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/themes.xml b/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/themes.xml deleted file mode 100644 index cbd4ed94..00000000 --- a/ai-solutions/android/01-ImageSuperResolution/superresolution/src/main/res/values/themes.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/README.md b/ai-solutions/android/02-ImageEnhancement/README.md deleted file mode 100644 index 9ccaa6ad..00000000 --- a/ai-solutions/android/02-ImageEnhancement/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# Table of Contents - -- [Table of Contents](#table-of-contents) -- [Introduction](#introduction) - + [About "Image Super Resolution"](#about--image-super-resolution-) - + [Pre-Requisites](#pre-requisites) -- [Model Selection and DLC conversion](#model-selection-and-dlc-conversion) - + [Model Overview](#model-overview) - + [Steps to convert model to DLC](#steps-to-convert-model-to-dlc) -- [Source Overview](#source-overview) - + [Source Organization](#source-organization) - + [Code Implementation](#code-implementation) -- [Build APK file with Android Studio](#build-apk-file-with-android-studio) -- [Results](#results) - -# Introduction - -### About "Image Enhancement" - -- Current project is an sample Android application for AI-based Low-light Image Enhancement using [Qualcomm® Neural Processing SDK for AI](https://developer.qualcomm.com/sites/default/files/docs/snpe/index.html) framework. -- We have used 4 Models in this Solution -- This sample enhances a low-light image to make it brighter. -- DLC models take only fixed input size. -- If users intend to use a different model in this demo framework, **image pre/post processing will be needed**. -- Current pre/post processing is specific to the models used. - -### Pre-Requisites - -- Qualcomm® Neural Processing SDK for AI setup should be completed by following the guide here : https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html -- Android Studio to import sample project -- Android NDK to build native code -- Install opencv using ```pip install opencv-python``` - -# Model Selection and DLC conversion - -### Model Overview - -Please refer to Models repository for model overview - Add public link - -### Steps to convert model to DLC -Please refer to Models repository for model overview - Add public link - -# Source Overview - -### Source Organization - -- demo: Contains demo video, GIF -- enhancement: Contains source files in standard Android app format. -- app\src\main\assets : Contains Model binary DLC -- enhancement\src\main\java\com\qcom\enhancement : Application java source code -- enhancement\src\main\cpp : Application C++(native) source code -- sdk : Contains openCV sdk (Will be generated using _ResolveDependencies.sh_ ) - -### Code Implementation - -- Model Initialization - - `public boolean loadingMODELS(char runtime_var, String dlc_name)` - - runtime_var: Possible options are D, G, C. - - dlc_name: Name of the DLC. - -- Running Model - - - Following is the Java Function, that handles model execution. This function iternally calls sub functions to handle pre-processing and post-processing - - `inferSNPE(inputMat.getNativeObjAddr(), outputMat.getNativeObjAddr())` - - inputMat is opencv Matrix that contains input image. - - outputMat is the destination for the output image - - - C++ function that handles preprocessing for the input image. - - `preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) ` - - - C++ function that handles postprocessing after we receive input from model - - `postprocess(cv::Mat &outputimg)` - - - SNPE API function that runs the network and give result - - `snpe->execute(inputMap, outputMap);` - - -# Build APK file with Android Studio - -1. Clone this repo. -2. Generate DLC using the steps mentioned. -3. Run below script, from the directory where it is present, to resolve dependencies of this project. - - `bash resolveDependencies.sh` - - * This script will download opencv and paste to sdk directory, to enable OpenCv for android Java. - * This script will copy snpe-release.aar file from $SNPE_ROOT to "snpe-release" directory in Android project. - - **NOTE - If you are using SNPE version 2.11 or greater, please change following line in resolveDependencies.sh.** - ``` - From: cp $SNPE_ROOT/android/snpe-release.aar snpe-release - To : cp $SNPE_ROOT/lib/android/snpe-release.aar snpe-release - ``` - -4. Import folder VisionSolution3-ImageEnhancement as a project in Android Studio -5. Do gradle sync -6. Compile the project. -7. Output APK file should get generated : enhancement-debug.apk -8. Prepare the Qualcomm Innovators development kit(QIDK) to install the application (Do not run APK on emulator) -9. Install and test application : enhancement-debug.apk - -```java -adb install -r -t enhancement-debug.apk -``` - -10. launch the application - -Following is the basic "Image Enhancement" Android App - -1. Select one of the models -2. Select one of the given images from the drop-down list -3. Select the run-time to run the model (CPU, GPU or DSP) -4. Observe the result of model on screen -5. Also note the performance indicator for the particular run-time in mSec - -Same results for the application are shown below - -# Results - -- Demo video, and performance details as seen below: - -![Demo video.](demo/EnhancementDemo.gif) - -###### *Qualcomm Neural Processing SDK and Snapdragon are products of Qualcomm Technologies, Inc. and/or its subsidiaries. AIMET Model Zoo is a product of Qualcomm Innovation Center, Inc.* diff --git a/ai-solutions/android/02-ImageEnhancement/build.gradle b/ai-solutions/android/02-ImageEnhancement/build.gradle deleted file mode 100644 index 292efdaf..00000000 --- a/ai-solutions/android/02-ImageEnhancement/build.gradle +++ /dev/null @@ -1,26 +0,0 @@ -// Top-level build file where you can add configuration options common to all sub-projects/modules. -buildscript { - ext.kotlin_version = '1.6.10' - repositories { - google() - jcenter() - } - dependencies { - classpath 'com.android.tools.build:gradle:7.2.1' - classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" - // NOTE: Do not place your application dependencies here; they belong - // in the individual module build.gradle files - } -} - -allprojects { - repositories { - google() - jcenter() - } - -} - -task clean(type: Delete) { - delete rootProject.buildDir -} \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/demo/EnhancementDemo.gif b/ai-solutions/android/02-ImageEnhancement/demo/EnhancementDemo.gif deleted file mode 100644 index e4484caf..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/demo/EnhancementDemo.gif and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/demo/EnhancementDemo.mp4 b/ai-solutions/android/02-ImageEnhancement/demo/EnhancementDemo.mp4 deleted file mode 100644 index e95a627d..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/demo/EnhancementDemo.mp4 and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/.gitignore b/ai-solutions/android/02-ImageEnhancement/enhancement/.gitignore deleted file mode 100644 index 42afabfd..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/build \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/build.gradle b/ai-solutions/android/02-ImageEnhancement/enhancement/build.gradle deleted file mode 100644 index 8f29d3e6..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/build.gradle +++ /dev/null @@ -1,66 +0,0 @@ -apply plugin: 'com.android.application' - -android { - compileSdkVersion 30 - buildToolsVersion "30.0.3" - - defaultConfig { - applicationId "com.qcom.aistack_lowlightenhance" - minSdkVersion 26 - targetSdkVersion 30 - versionCode 1 - versionName "1.0" - - testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" - externalNativeBuild { - cmake { - cppFlags "-std=c++11 -frtti -fexceptions" - arguments "-DOpenCV_DIR=" + project(':sdk').projectDir + "/native/jni", - "-DANDROID_TOOLCHAIN=clang" - targets "ImageEnhancement" - } - ndk { - abiFilters 'arm64-v8a' - } - } - } - - packagingOptions { - pickFirst 'lib/x86/libc++_shared.so' - pickFirst 'lib/x86_64/libc++_shared.so' - pickFirst 'lib/arm64-v8a/libc++_shared.so' - pickFirst 'lib/armeabi-v7a/libc++_shared.so' - } - - buildTypes { - release { - minifyEnabled false - proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' - } - } - - compileOptions { - sourceCompatibility JavaVersion.VERSION_1_8 - targetCompatibility JavaVersion.VERSION_1_8 - } - ndkVersion '21.4.7075529' - externalNativeBuild { - cmake { - path file('src/main/cpp/CMakeLists.txt') - } - } -} - -dependencies { - implementation fileTree(dir: 'libs', include: ['*.jar']) - implementation project(path: ':sdk') - implementation 'androidx.appcompat:appcompat:1.2.0' - testImplementation 'junit:junit:4.12' - androidTestImplementation 'androidx.test.ext:junit:1.1.1' - androidTestImplementation 'androidx.test.espresso:espresso-core:3.2.0' - implementation 'com.google.android.material:material:1.2.1' - implementation 'androidx.constraintlayout:constraintlayout:2.0.4' - androidTestImplementation 'androidx.test.ext:junit:1.1.2' - androidTestImplementation 'com.android.support.test:rules:1.0.2' - androidTestImplementation 'androidx.test.espresso:espresso-core:3.3.0' -} \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/proguard-rules.pro b/ai-solutions/android/02-ImageEnhancement/enhancement/proguard-rules.pro deleted file mode 100644 index 64b4a059..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/proguard-rules.pro +++ /dev/null @@ -1,21 +0,0 @@ -# Add project specific ProGuard rules here. -# You can control the set of applied configuration files using the -# proguardFiles setting in build.gradle. -# -# For more details, see -# http://developer.android.com/guide/developing/tools/proguard.html - -# If your project uses WebView with JS, uncomment the following -# and specify the fully qualified class name to the JavaScript interface -# class: -#-keepclassmembers class fqcn.of.javascript.interface.for.webview { -# public *; -#} - -# Uncomment this to preserve the line number information for -# debugging stack traces. -#-keepattributes SourceFile,LineNumberTable - -# If you keep the line number information, uncomment this to -# hide the original source file name. -#-renamesourcefileattribute SourceFile \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/AndroidManifest.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/AndroidManifest.xml deleted file mode 100644 index c0b58218..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/AndroidManifest.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - - - - - - \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/assets/Sample1.jpg b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/assets/Sample1.jpg deleted file mode 100644 index 583f4dbd..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/assets/Sample1.jpg and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/assets/Sample2.jpg b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/assets/Sample2.jpg deleted file mode 100644 index 7fae3ae6..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/assets/Sample2.jpg and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/CMakeLists.txt b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/CMakeLists.txt deleted file mode 100644 index 4a021d35..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/CMakeLists.txt +++ /dev/null @@ -1,68 +0,0 @@ - -# For more information about using CMake with Android Studio, read the -# documentation: https://d.android.com/studio/projects/add-native-code.html - -# Sets the minimum version of CMake required to build the native library. - -cmake_minimum_required(VERSION 3.18.1) - -# Declares and names the project. - -project("ImageEnhancement") - -# Creates and names a library, sets it as either STATIC -# or SHARED, and provides the relative paths to its source code. -# You can define multiple libraries, and CMake builds them for you. -# Gradle automatically packages shared libraries with your APK. - -###OPENCV -#find_package(OpenCV REQUIRED) ##FAILED, cannot find libcpufeatures.so -#set(OpenCV_STATIC on) -#set(OpenCV_DIR C:/Users/shubgoya/Desktop/SNPEworkspace/github_workspace/HRNET_posenet/opencv45/native/jni) -find_package(OpenCV REQUIRED) -#INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) - - -###INCLUDE_DIRECTORIES -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/zdl) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/hpp) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) - - -add_library( # Sets the name of the library. - ImageEnhancement - - # Sets the library as a shared library. - SHARED - - # Provides a relative path to your source file(s). - inference.cpp inference_helper.cpp Model.h ImageEnhancement.cpp - MBLLEN.h MBLLEN.cpp RUAS.h RUAS.cpp SCI.cpp SCI.h StableLLVE.h StableLLVE.cpp ZeroDCE.h ZeroDCE.cpp - ) - -# Searches for a specified prebuilt library and stores the path as a -# variable. Because CMake includes system libraries in the search path by -# default, you only need to specify the name of the public NDK library -# you want to add. CMake verifies that the library exists before -# completing its build. - -find_library( # Sets the name of the path variable. - log-lib - - # Specifies the name of the NDK library that - # you want CMake to locate. - log ) - -# Specifies libraries CMake should link to your target library. You -# can link multiple libraries, such as libraries you define in this -# build script, prebuilt third-party libraries, or system libraries. - -target_link_libraries( # Specifies the target library. - ImageEnhancement - - # Links the target library to the log library - # included in the NDK. - ${CMAKE_CURRENT_SOURCE_DIR}/../jniLibs/arm64-v8a/libSNPE.so - - ${log-lib} ${OpenCV_LIBS}) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/MBLLEN.cpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/MBLLEN.cpp deleted file mode 100644 index cb80e69b..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/MBLLEN.cpp +++ /dev/null @@ -1,42 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shivmahe on 9/5/2023. -// - -#include "MBLLEN.h" -void MBLLEN::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("MBLLEN PREPROCESS is called"); - float * accumulator = reinterpret_cast (&dest_buffer[0]); - cv::Mat resized_img; - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_CUBIC); - LOGI("input image SIZE width%d::%d height%d::%d",dims[1],resized_img.cols, dims[2],resized_img.rows); - - //opencv read in BGRA by default, converting to BGR - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - -}; - -#endif //SUPERRESOLUTION_MBLLEN_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/Model.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/Model.h deleted file mode 100644 index 9b9f86b3..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/Model.h +++ /dev/null @@ -1,51 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubgoya on 8/2/2023. -// - - - -#ifndef SUPERRESOLUTION_MODEL_H -#define SUPERRESOLUTION_MODEL_H - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "android/log.h" - - -#include -#include -#include -#define LOG_TAG "SNPE_INF" -#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__) -#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__) - - -class Model { - -public: - virtual void preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) = 0; - virtual void postprocess(cv::Mat &outputimg) = 0; - virtual void msg() = 0; - -}; - - -#endif //SUPERRESOLUTION_MODEL_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/RUAS.cpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/RUAS.cpp deleted file mode 100644 index 9e26326f..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/RUAS.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shivmahe on 9/5/2023. -// - -#include "RUAS.h" -void RUAS::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("RUAS PREPROCESS is called"); - float * accumulator = reinterpret_cast (&dest_buffer[0]); - cv::Mat resized_img; - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_CUBIC); - LOGI("input image SIZE width%d::%d height%d::%d",dims[1],resized_img.cols, dims[2],resized_img.rows); - - //opencv read in BGRA by default, converting to BGR - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - - -}; - - -#endif //SUPERRESOLUTION_RUAS_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/SCI.cpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/SCI.cpp deleted file mode 100644 index e6a95469..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/SCI.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shivmahe on 9/5/2023. -// - -#include "SCI.h" -void SCI::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("SCI PREPROCESS is called"); - float * accumulator = reinterpret_cast (&dest_buffer[0]); - cv::Mat resized_img; - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_CUBIC); - - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - - -}; - - -#endif //SUPERRESOLUTION_SCI_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/StableLLVE.cpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/StableLLVE.cpp deleted file mode 100644 index 5ae5803a..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/StableLLVE.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shivmahe on 9/5/2023. -// - -#include "StableLLVE.h" -void StableLLVE::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("StableLLVE PREPROCESS is called"); - float * accumulator = reinterpret_cast (&dest_buffer[0]); - cv::Mat resized_img; - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR); - LOGI("input image SIZE width%d::%d height%d::%d",dims[1],resized_img.cols, dims[2],resized_img.rows); - - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - - -}; - - -#endif //SUPERRESOLUTION_STABLELLVE_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/ZeroDCE.cpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/ZeroDCE.cpp deleted file mode 100644 index 27a2155a..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/ZeroDCE.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shivmahe on 9/5/2023. -// - -#include "ZeroDCE.h" -void ZeroDCE::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("ZeroDCE PREPROCESS is called"); - float * accumulator = reinterpret_cast (&dest_buffer[0]); - cv::Mat resized_img; - cv::resize(img,resized_img,cv::Size(dims[2],dims[1]),cv::INTER_CUBIC); - LOGI("input image SIZE width%d::%d height%d::%d",dims[1],resized_img.cols, dims[2],resized_img.rows); - cvtColor(resized_img, resized_img, CV_BGRA2RGB); - LOGI("num of channels: %d",resized_img.channels()); - int lim = resized_img.rows*resized_img.cols*3; - for(int idx = 0; idx &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(cv::Mat &outputimg); - void msg(); - - -}; - - -#endif //SUPERRESOLUTION_ZERODCE_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/imageEnhancement.cpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/imageEnhancement.cpp deleted file mode 100644 index 5f770de4..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/imageEnhancement.cpp +++ /dev/null @@ -1,194 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -using namespace cv; -#include -#include -#include -#include -#include - -#include "hpp/inference.h" -#include "hpp/Util.hpp" - -#include "zdl/SNPE/SNPE.hpp" -#include "zdl/SNPE/SNPEFactory.hpp" -#include "MBLLEN.h" -#include "RUAS.h" -#include "SCI.h" -#include "StableLLVE.h" -#include "ZeroDCE.h" -#include - -Model *modelobj; - -extern "C" JNIEXPORT jstring JNICALL -Java_com_qcom_aistack_1lowlightenhance_SNPEHelper_queryRuntimes( - JNIEnv* env, - jobject /* this */, - jstring native_dir_path) { - const char *cstr = env->GetStringUTFChars(native_dir_path, nullptr); - env->ReleaseStringUTFChars(native_dir_path, cstr); - - std::string runT_Status; - std::string nativeLibPath = std::string(cstr); - if (!SetAdspLibraryPath(nativeLibPath)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "Failed to set ADSP Library Path\n"); - - runT_Status += "\nFailed to set ADSP Library Path\nTerminating"; - return env->NewStringUTF(runT_Status.c_str()); - } - - // ====================================================================================== // - runT_Status = "Querying Runtimes : \n\n"; - // DSP unsignedPD check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::DSP,zdl::DlSystem::RuntimeCheckOption_t::UNSIGNEDPD_CHECK)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "UnsignedPD DSP runtime : Absent\n"); - runT_Status += "UnsignedPD DSP runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "UnsignedPD DSP runtime : Present\n"); - runT_Status += "UnsignedPD DSP runtime : Present\n"; - } - // DSP signedPD check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::DSP)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "DSP runtime : Absent\n"); - runT_Status += "DSP runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "DSP runtime : Present\n"); - runT_Status += "DSP runtime : Present\n"; - } - // GPU check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::GPU)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "GPU runtime : Absent\n"); - runT_Status += "GPU runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "GPU runtime : Present\n"); - runT_Status += "GPU runtime : Present\n"; - } - // CPU check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::CPU)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "CPU runtime : Absent\n"); - runT_Status += "CPU runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "CPU runtime : Present\n"); - runT_Status += "CPU runtime : Present\n"; - } - - return env->NewStringUTF(runT_Status.c_str()); -} - - -//initializing network -extern "C" -JNIEXPORT jstring JNICALL -Java_com_qcom_aistack_1lowlightenhance_SNPEHelper_initSNPE(JNIEnv *env, jobject thiz, jobject asset_manager, jchar runtime, jstring jdlc_name) { - - LOGI("Reading SNPE DLC ..."); - std::string result; - - const char *cstr = env->GetStringUTFChars(jdlc_name, 0); - AAssetManager* mgr = AAssetManager_fromJava(env, asset_manager); - AAsset* asset_model = AAssetManager_open(mgr, cstr, AASSET_MODE_UNKNOWN); - - //Changing PrePost for MBLLEN - if(strcmp(cstr,"quant_mbllen_640_480_212_8550.dlc")==0){ - LOGI("mbllen_Q dlc"); - modelobj = new MBLLEN(); - } - - //Changing PrePost for RUAS - else if(strcmp(cstr,"ruas_Q.dlc")==0){ - LOGI("ruas_Q dlc"); - modelobj = new RUAS(); - } - - //Changing PrePost for SCI - else if(strcmp(cstr,"sci_difficult_Q.dlc")==0){ - LOGI("sci_difficult_Q dlc"); - modelobj = new SCI(); - } - - //Changing PrePost for StableLLVE - else if(strcmp(cstr,"StableLLVE_Q.dlc")==0){ - LOGI("StableLLVE_Q dlc"); - modelobj = new StableLLVE(); - } - - //Changing PrePost for ZeroDCE - else if(strcmp(cstr,"quant_zeroDCE_640_480_212_8550_out80.dlc")==0){ - LOGI("zero_dce_Q dlc"); - modelobj = new ZeroDCE(); - } - - else - { - LOGE("Model pre and post is not defined"); - return NULL; - } - - modelobj->msg(); - env->ReleaseStringUTFChars(jdlc_name, cstr); - - if (NULL == asset_model) { - LOGE("Failed to load ASSET, needed to load DLC\n"); - result = "Failed to load ASSET, needed to load DLC\n"; - return env->NewStringUTF(result.c_str()); - } - - long dlc_size = AAsset_getLength(asset_model); - LOGI("DLC Size = %ld MB\n", dlc_size / (1024 * 1024)); - result += "DLC Size = " + std::to_string(dlc_size); - char* dlc_buffer = (char*) malloc(sizeof(char) * dlc_size); - AAsset_read(asset_model, dlc_buffer, dlc_size); - - result += "\n\nBuilding Models DLC Network:\n"; - result += build_network(reinterpret_cast(dlc_buffer), dlc_size, runtime); - - return env->NewStringUTF(result.c_str()); -} - -//inference -extern "C" -JNIEXPORT jfloat JNICALL -Java_com_qcom_aistack_1lowlightenhance_SNPEHelper_inferSNPE(JNIEnv *env, jobject thiz, jlong inputMat, - jlong outputMat) { - - LOGI("infer SNPE S"); - - cv::Mat &inputimg = *(cv::Mat*) inputMat; - - //BGR -> RGB - cvtColor(inputimg,inputimg,CV_BGR2RGB); - - cv::Mat &outputimg = *(cv::Mat*) outputMat; - - float milli_time; - - bool status = executeDLC(inputimg, outputimg, milli_time, modelobj); - - if(status == false) - { - LOGE("fatal ERROR"); - return 0; - } - else { - LOGI("status is TRUE"); - LOGI("rows: %d cols: %d", outputimg.rows, outputimg.cols); - } - - LOGI("infer SNPE E"); - LOGI("milli_time: %f",milli_time); - return milli_time; - -} \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/CheckRuntime.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/CheckRuntime.hpp deleted file mode 100644 index 07538cd0..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/CheckRuntime.hpp +++ /dev/null @@ -1,17 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef CHECKRUNTIME_H -#define CHECKRUNTIME_H - -#include "SNPE/SNPEFactory.hpp" - -zdl::DlSystem::Runtime_t checkRuntime(zdl::DlSystem::Runtime_t runtime); -bool checkGLCLInteropSupport(); - -#endif diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/CreateUserBuffer.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/CreateUserBuffer.hpp deleted file mode 100644 index b880b033..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/CreateUserBuffer.hpp +++ /dev/null @@ -1,59 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2020 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include"inference.h" -#include -#include -#include -#include "SNPE/SNPE.hpp" -#include "DlSystem/IUserBuffer.hpp" -#include "DlSystem/UserBufferMap.hpp" - -typedef unsigned int GLuint; - -// Helper function to fill a single entry of the UserBufferMap with the given user-backed buffer -void createUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const char * name, - const bool isTfNBuffer, - int bitWidth); - - -// Create a UserBufferMap of the SNPE network outputs -void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const bool isTfNBuffer, - int bitWidth); - -// Create a UserBufferMap of the SNPE network inputs -void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const bool isTfNBuffer, - int bitWidth); - -//// Create a UserBufferMap of the SNPE network outputs -//void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, -// std::unordered_map>& applicationBuffers, -// std::vector>& snpeUserBackedBuffers, -// std::unique_ptr& snpe, -// const bool isTfNBuffer, -// int bitWidth); -// -//// Create a UserBufferMap of the SNPE network inputs -//void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, -// std::unordered_map>& applicationBuffers, -// std::vector>& snpeUserBackedBuffers, -// std::unique_ptr& snpe, -// const bool isTfNBuffer, -// int bitWidth); diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/LoadContainer.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/LoadContainer.hpp deleted file mode 100644 index 85bf622a..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/LoadContainer.hpp +++ /dev/null @@ -1,19 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef LOADCONTAINER_H -#define LOADCONTAINER_H - -#include - -#include "DlContainer/IDlContainer.hpp" - -std::unique_ptr loadContainerFromFile(std::string containerPath); -std::unique_ptr loadContainerFromBuffer(const uint8_t * buffer, const size_t size); - -#endif diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/LoadInputTensor.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/LoadInputTensor.hpp deleted file mode 100644 index 7aec3b24..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/LoadInputTensor.hpp +++ /dev/null @@ -1,27 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef LOADINPUTTENSOR_H -#define LOADINPUTTENSOR_H - -#include -#include -#include - -#include "SNPE/SNPE.hpp" -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/TensorMap.hpp" -#include "../../Model.h" - - -bool loadInputUserBuffer(std::unordered_map>& applicationBuffers, - std::unique_ptr& snpe, - cv::Mat &model_input, - zdl::DlSystem::UserBufferMap& inputMap, - int bitWidth, Model *modelobj); -#endif diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/SetBuilderOptions.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/SetBuilderOptions.hpp deleted file mode 100644 index 3b760147..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/SetBuilderOptions.hpp +++ /dev/null @@ -1,25 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SETBUILDEROPTIONS_H -#define SETBUILDEROPTIONS_H - -#include "DlSystem/RuntimeList.hpp" -#include "SNPE/SNPE.hpp" -#include "DlSystem/DlEnums.hpp" -//#include "DlSystem/UDLFunc.hpp" -#include "DlContainer/IDlContainer.hpp" -#include "DlSystem/PlatformConfig.hpp" - -std::unique_ptr setBuilderOptions(std::unique_ptr & container, - zdl::DlSystem::Runtime_t runtime, - zdl::DlSystem::RuntimeList runtimeList, - bool useUserSuppliedBuffers, - bool useCaching); - -#endif //SETBUILDEROPTIONS_H \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/Util.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/Util.hpp deleted file mode 100644 index 346e7ac0..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/Util.hpp +++ /dev/null @@ -1,41 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef UTIL_H -#define UTIL_H - -#include -#include -#include -#include - -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/TensorShape.hpp" - -template Container& split(Container& result, const typename Container::value_type & s, typename Container::value_type::value_type delimiter ) -{ - result.clear(); - std::istringstream ss( s ); - while (!ss.eof()) - { - typename Container::value_type field; - getline( ss, field, delimiter ); - if (field.empty()) continue; - result.push_back( field ); - } - return result; -} - - -cv::Mat get_affine_transform(int dst_w, int dst_h, int inv, double center[], double scale[]); -//void getcenterscale(int image_width, int image_height, double center[2], double scale[2]); -void getcenterscale(int image_width, int image_height, double center[2], double scale[2],float bottom, float left, float top, float right); -float** getCoords(std::vector buff, double center[], double scale[]); - -#endif - diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/inference.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/inference.h deleted file mode 100644 index 1903cc74..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/hpp/inference.h +++ /dev/null @@ -1,54 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubpate on 12/11/2021. -// - -#ifndef NATIVEINFERENCE_INFERENCE_H -#define NATIVEINFERENCE_INFERENCE_H - -#include "zdl/DlSystem/TensorShape.hpp" -#include "zdl/DlSystem/TensorMap.hpp" -#include "zdl/DlSystem/TensorShapeMap.hpp" -#include "zdl/DlSystem/IUserBufferFactory.hpp" -#include "zdl/DlSystem/IUserBuffer.hpp" -#include "zdl/DlSystem/UserBufferMap.hpp" -#include "zdl/DlSystem/IBufferAttributes.hpp" - -#include "zdl/DlSystem/StringList.hpp" - -#include "zdl/SNPE/SNPE.hpp" -#include "zdl/SNPE/SNPEFactory.hpp" -#include "zdl/DlSystem/DlVersion.hpp" -#include "zdl/DlSystem/DlEnums.hpp" -#include "zdl/DlSystem/String.hpp" -#include "zdl/DlContainer/IDlContainer.hpp" -#include "zdl/SNPE/SNPEBuilder.hpp" - -#include "zdl/DlSystem/ITensor.hpp" -#include "zdl/DlSystem/ITensorFactory.hpp" - -#include -#include "android/log.h" - -#include - -#include "../../Model.h" - -#define LOG_TAG "SNPE_INF" -#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__) -#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__) - -std::string build_network(const uint8_t * dlc_buffer, const size_t dlc_size, const char runtime_arg); -bool SetAdspLibraryPath(std::string nativeLibPath); - -bool executeDLC(cv::Mat &inputimg, cv::Mat &outputimg, float &milli_time, Model *modelobj); - -#endif //NATIVEINFERENCE_INFERENCE_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/IDiagLog.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/IDiagLog.h deleted file mode 100644 index 9a084071..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/IDiagLog.h +++ /dev/null @@ -1,102 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _DIAGLOG_IDIAGLOG_H_ -#define _DIAGLOG_IDIAGLOG_H_ - -#include "DiagLog/Options.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE IDiagLog handle - */ -typedef void* Snpe_IDiagLog_Handle_t; - -/** - * @brief . - * - * Sets the options after initialization occurs. - * - * @param[in] handle : Handle to access IDiagLog - * @param[in] loggingOptions : The options to set up diagnostic logging. - * - * @return Error code if the options could not be set. Ensure logging is not started/ - * SNPE_SUCCESS otherwise - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_SetOptions(Snpe_IDiagLog_Handle_t handle, Snpe_Options_Handle_t loggingOptionsHandle); - -/** - * @brief . - * - * Gets the curent options for the diag logger. - * - * @param[in] handle : Handle to access IDiagLog - * @return Handle to access DiagLog options. - */ -SNPE_API -Snpe_Options_Handle_t Snpe_IDiagLog_GetOptions(Snpe_IDiagLog_Handle_t handle); - -/** - * @brief . - * - * @param[in] handle : Handle to access IDiagLog - * @param[in] mask : Allows for setting the log mask once diag logging has started - * @return SNPE_SUCCESS if the level was set successfully. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_SetDiagLogMask(Snpe_IDiagLog_Handle_t handle, const char* mask) ; - -/** - * @brief . - * - * Enables logging. - * - * Logging should be started prior to the instantiation of other SNPE_APIs - * to ensure all events are captured. - * - * @param[in] handle : Handle to access IDiagLog - * @return SNPE_SUCCESS if diagnostic logging started successfully. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_Start(Snpe_IDiagLog_Handle_t handle); - -/** - * @brief Disables logging. - * - * @param[in] handle : Handle to access IDiagLog - * - * @return SNPE_SUCCESS if logging stopped successfully. Error code otherwise. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_Stop(Snpe_IDiagLog_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DIAGLOG_IDIAGLOG_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/IDiagLog.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/IDiagLog.hpp deleted file mode 100644 index 64b81eba..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/IDiagLog.hpp +++ /dev/null @@ -1,133 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include -#include - -#include "Options.hpp" -#include "DlSystem/String.hpp" - -#include "DiagLog/IDiagLog.h" - - -namespace DiagLog{ -class IDiagLog : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static Snpe_ErrorCode_t InvalidDeleteCall(Snpe_IDiagLog_Handle_t ){ - return SNPE_ERRORCODE_CAPI_DELETE_FAILURE; - } - - static constexpr DeleteFunctionType DeleteFunction{InvalidDeleteCall}; - - class OptionsInternal : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_Options_Delete}; - public: - OptionsInternal() - : BaseType(Snpe_Options_Create()) - { } - - explicit OptionsInternal(const Options& options) - : BaseType(Snpe_Options_Create()) - { - setDiagLogMask(options.DiagLogMask.c_str()); - setLogFileDirectory(options.LogFileDirectory.c_str()); - setLogFileName(options.LogFileName.c_str()); - setLogFileRotateCount(options.LogFileRotateCount); - setLogFileReplace(options.LogFileReplace); - } - - const char* getDiagLogMask() const{ - return Snpe_Options_GetDiagLogMask(handle()); - } - void setDiagLogMask(const char* diagLogMask){ - Snpe_Options_SetDiagLogMask(handle(), diagLogMask); - } - - const char* getLogFileDirectory() const{ - return Snpe_Options_GetLogFileDirectory(handle()); - } - void setLogFileDirectory(const char* logFileDirectory){ - Snpe_Options_SetLogFileDirectory(handle(), logFileDirectory); - } - - const char* getLogFileName() const{ - return Snpe_Options_GetLogFileName(handle()); - } - void setLogFileName(const char* logFileName){ - Snpe_Options_SetLogFileName(handle(), logFileName); - } - - uint32_t getLogFileRotateCount() const{ - return Snpe_Options_GetLogFileRotateCount(handle()); - } - void setLogFileRotateCount(uint32_t logFileRotateCount){ - Snpe_Options_SetLogFileRotateCount(handle(), logFileRotateCount); - } - - bool getLogFileReplace() const{ - return Snpe_Options_GetLogFileReplace(handle()); - } - void setLogFileReplace(bool logFileReplace){ - Snpe_Options_SetLogFileReplace(handle(), logFileReplace); - } - - explicit operator Options() const{ - return { - getDiagLogMask(), - getLogFileDirectory(), - getLogFileName(), - getLogFileRotateCount(), - getLogFileReplace() - }; - } - - }; - - - -public: - bool setOptions(const Options& loggingOptions){ - OptionsInternal optionsInternal(loggingOptions); - return SNPE_SUCCESS == Snpe_IDiagLog_SetOptions(handle(), getHandle(optionsInternal)); - } - Options getOptions() const{ - OptionsInternal optionsInternal(moveHandle(Snpe_IDiagLog_GetOptions(handle()))); - return Options(optionsInternal); - } - - bool setDiagLogMask(const std::string& mask){ - return SNPE_SUCCESS == Snpe_IDiagLog_SetDiagLogMask(handle(), mask.c_str()); - } - bool setDiagLogMask(const DlSystem::String& mask){ - return setDiagLogMask(static_cast(mask)); - } - - bool start(void){ - return SNPE_SUCCESS == Snpe_IDiagLog_Start(handle()); - } - bool stop(void){ - return SNPE_SUCCESS == Snpe_IDiagLog_Stop(handle()); - } - -}; - -} // ns DiagLog - -ALIAS_IN_ZDL_NAMESPACE(DiagLog, IDiagLog) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/Options.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/Options.h deleted file mode 100644 index ad641cca..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/Options.h +++ /dev/null @@ -1,164 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _DIAGLOG_OPTIONS_H_ -#define _DIAGLOG_OPTIONS_H_ - -#include - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE Options handle - */ -typedef void* Snpe_Options_Handle_t; - - -SNPE_API -Snpe_Options_Handle_t Snpe_Options_Create(); - -/** - * Destroys/frees a Options - * - * @param[in] handle : Handle to access Options object - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_Options_Delete(Snpe_Options_Handle_t handle); - -/** - * Gets DiagLogMask - * diagLogMask: Enables diag logging only on the specified area mask - * - * @param[in] handle : Handle to access Options object - * @return diagLogMask as a const char* - */ -SNPE_API -const char* Snpe_Options_GetDiagLogMask(Snpe_Options_Handle_t handle); - -/** - * Sets DiagLogMask - * diagLogMask: Enables diag logging only on the specified area mask - * - * @param[in] handle : Handle to access Options object - * @param[in] diagLogMask : specific area where logging needs to be enabed - */ -SNPE_API -void Snpe_Options_SetDiagLogMask(Snpe_Options_Handle_t handle, const char* diagLogMask); - -/** - * Gets logFileDirectory - * logFileDirectory: The path to the directory where log files will be written. - * The path may be relative or absolute. Relative paths are interpreted - * - * @param[in] handle : Handle to access Options object - * @return logFileDirectory as a const char* - */ -SNPE_API -const char* Snpe_Options_GetLogFileDirectory(Snpe_Options_Handle_t handle); - -/** - * Sets logFileDirectory - * logFileDirectory: The path to the directory where log files will be written. - * The path may be relative or absolute. Relative paths are interpreted - * - * @param[in] handle : Handle to access Options object - * @param[in] logFileDirectory : path for saving the log files - */ -SNPE_API -void Snpe_Options_SetLogFileDirectory(Snpe_Options_Handle_t handle, const char* logFileDirectory); - - -/** - * Gets logFileName - * logFileName: The name used for log files. If this value is empty then BaseName will be - * used as the default file name. - * - * @param[in] handle : Handle to access Options object - * @return logFileName as a const char* - */ -SNPE_API -const char* Snpe_Options_GetLogFileName(Snpe_Options_Handle_t handle); - -/** - * Sets logFileName - * logFileName: The name used for log files. If this value is empty then BaseName will be - * used as the default file name. - * - * @param[in] handle : Handle to access Options object - * @param[in] logFileName : name of log file - */ -SNPE_API -void Snpe_Options_SetLogFileName(Snpe_Options_Handle_t handle, const char* logFileName); - -/** - * Gets the maximum number of log files to create. If set to 0 no log rotation - * will be used and the log file name specified will be used each time, overwriting - * any existing log file that may exist. - * - * @param[in] handle : Handle to access options object. - * @return max log files to create - */ -SNPE_API -uint32_t Snpe_Options_GetLogFileRotateCount(Snpe_Options_Handle_t handle); - -/** - * Sets the maximum number of log files to create. If set to 0 no log rotation - * will be used and the log file name specified will be used each time, overwriting - * any existing log file that may exist. - * - * @param[in] handle : Handle to access options object. - * @param[in] logFileRotateCount : max log files to create - */ -SNPE_API -void Snpe_Options_SetLogFileRotateCount(Snpe_Options_Handle_t handle, uint32_t logFileRotateCount); - -/** - * If the log file already exists, control whether it will be replaced - * - * @param[in] handle : Handle to access options object - * @return 1 if log file will be replaced, 0 otherwise - */ -SNPE_API -int Snpe_Options_GetLogFileReplace(Snpe_Options_Handle_t handle); - -/** - * If the log file already exists, control whether it will be replaced - * - * @param[in] handle : Handle to access options object - * @param[in] logFileReplace : 1 if log file to be replaced, 0 otherwise - */ -SNPE_API -void Snpe_Options_SetLogFileReplace(Snpe_Options_Handle_t handle, int logFileReplace); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DIAGLOG_OPTIONS_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/Options.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/Options.hpp deleted file mode 100644 index c9ad48b6..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DiagLog/Options.hpp +++ /dev/null @@ -1,50 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include -#include - -#include "DiagLog/IDiagLog.h" - - -namespace DiagLog { - -class Options -{ -public: - Options( - std::string diagLogMask = "", - std::string logFileDirectory = "diaglogs", - std::string logFileName = "DiagLog", - uint32_t logFileRotateCount = 20, - bool logFileReplace = true - ) - : DiagLogMask(std::move(diagLogMask)), - LogFileDirectory(std::move(logFileDirectory)), - LogFileName(std::move(logFileName)), - LogFileRotateCount(logFileRotateCount), - LogFileReplace(logFileReplace) - { - // Solves the empty string problem with multiple std libs - DiagLogMask.reserve(1); - } - - std::string DiagLogMask; - std::string LogFileDirectory; - std::string LogFileName; - uint32_t LogFileRotateCount; - - bool LogFileReplace; -}; - -} // ns DiagLog - -ALIAS_IN_ZDL_NAMESPACE(DiagLog, Options) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlContainer/DlContainer.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlContainer/DlContainer.h deleted file mode 100644 index 6ce7cd25..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlContainer/DlContainer.h +++ /dev/null @@ -1,185 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_CONTAINER_DLCONTAINER_H -#define DL_CONTAINER_DLCONTAINER_H - -#ifdef __cplusplus -#include // uint8_t -#include // size_t -#else -#include -#include -#endif - -#include "DlSystem/DlError.h" -#include "DlSystem/StringList.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE DlcRecord handle - */ -typedef void* Snpe_DlcRecord_Handle_t; - -/** - * Constructs a DlcRecord and returns a handle to it - * - * @return the handle to the created DlcRecord - */ -SNPE_API -Snpe_DlcRecord_Handle_t Snpe_DlcRecord_Create(); - -/** - * Constructs a DlcRecord with a provided name and returns a handle to it - * - * @param[in] name : the name of the record - * - * @return the handle to the created DlcRecord - */ -SNPE_API -Snpe_DlcRecord_Handle_t Snpe_DlcRecord_CreateName(const char* name); - - -/** - * Destroys/frees a DlcRecord - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_DlcRecord_Delete(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * Gets the size of a DlcRecord in bytes - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return the size of the DlcRecord in bytes - */ -SNPE_API -size_t Snpe_DlcRecord_Size(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * Gets a pointer to the start of the DlcRecord's data - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return uint8_t pointer to the DlcRecord's data - */ -SNPE_API -uint8_t* Snpe_DlcRecord_Data(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * Gets the name of the DlcRecord - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return the record's name - */ -SNPE_API -const char* Snpe_DlcRecord_Name(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * A typedef to indicate a SNPE DlContainer handle - */ -typedef void* Snpe_DlContainer_Handle_t; - -/** - * Destroys/frees a DlContainer - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_DlContainer_Delete(Snpe_DlContainer_Handle_t dlContainerHandle); - - -/** - * Initializes a container from a container archive file. - * - * @param[in] filename Container archive file path. - * - * @return Status of container open call - */ -SNPE_API -Snpe_DlContainer_Handle_t Snpe_DlContainer_Open(const char* filename); - -/** - * Initializes a container from a byte buffer. - * - * @param[in] buffer Byte buffer holding the contents of an archive - * file. - * - * @param[in] size Size of the byte buffer. - * - * @return A Snpe_DlContainer_Handle_t to access the dlContainer - */ -SNPE_API -Snpe_DlContainer_Handle_t Snpe_DlContainer_OpenBuffer(const uint8_t* buffer, const size_t size); - -/** - * Get the record catalog for a container. - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * - * @return A Snpe_StringListHandle_t that holds the record names of the DlContainer - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_DlContainer_GetCatalog(Snpe_DlContainer_Handle_t dlContainerHandle); - -/** - * Get a record from a container by name. - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * @param[in] recordName : Name of the record to fetch. - * - * @return A Snpe_DlcRecordHandle_t that owns the record read from the DlContainer - */ -SNPE_API -Snpe_DlcRecord_Handle_t Snpe_DlContainer_GetRecord(Snpe_DlContainer_Handle_t dlContainerHandle, const char* recordName); - -/** - * Save the container to an archive on disk. This function will save the - * container if the filename is different from the file that it was opened - * from, or if at least one record was modified since the container was - * opened. - * - * It will truncate any existing file at the target path. - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * @param[in] filename : Container archive file path. - * - * @return indication of success/failure - */ -SNPE_API -Snpe_ErrorCode_t Snpe_DlContainer_Save(Snpe_DlContainer_Handle_t dlContainerHandle, const char* filename); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_CONTAINER_DLCONTAINER_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlContainer/IDlContainer.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlContainer/IDlContainer.hpp deleted file mode 100644 index 482dbd02..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlContainer/IDlContainer.hpp +++ /dev/null @@ -1,146 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include -#include -#include - -#include "Wrapper.hpp" -#include "DlSystem/String.hpp" - -#include "DlContainer/DlContainer.h" -#include "DlSystem/StringList.hpp" - - - -namespace DlContainer { - -struct DlcRecord -{ - std::string name; - std::vector data; - - DlcRecord() - : name{}, - data{} - { } - - DlcRecord( DlcRecord&& other ) noexcept - : name(std::move(other.name)), - data(std::move(other.data)) - { } - DlcRecord(const std::string& new_name) - : name(new_name), - data() - { - if(name.empty()) { - name.reserve(1); - } - } - DlcRecord(const DlcRecord&) = delete; -}; - - -class IDlContainer : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_DlContainer_Delete}; - - template - void getCatalog_(std::set& catalog) const{ - DlSystem::StringList sl(moveHandle(Snpe_DlContainer_GetCatalog(handle()))); - for(auto s : sl){ - catalog.emplace(s); - } - } - - - class DlcRecordInternal : public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_DlcRecord_Delete}; - public: - DlcRecordInternal() - : BaseType(Snpe_DlcRecord_Create()) - { } - explicit DlcRecordInternal(const std::string& name) - : BaseType(Snpe_DlcRecord_CreateName(name.c_str())) - { } - - uint8_t* getData(){ - return Snpe_DlcRecord_Data(handle()); - } - size_t size() const{ - return Snpe_DlcRecord_Size(handle()); - } - const char* getName(){ - return Snpe_DlcRecord_Name(handle()); - } - }; - - -public: - static std::unique_ptr open(const std::string& filename) noexcept{ - return makeUnique(Snpe_DlContainer_Open(filename.c_str())); - } - - static std::unique_ptr open(const uint8_t* buffer, const size_t size) noexcept{ - return makeUnique(Snpe_DlContainer_OpenBuffer(buffer, size)); - - } - static std::unique_ptr open(const std::vector& buffer) noexcept{ - return open(buffer.data(), buffer.size()); - } - static std::unique_ptr open(const DlSystem::String &filename) noexcept{ - return open(static_cast(filename)); - } - - - void getCatalog(std::set& catalog) const{ - return getCatalog_(catalog); - } - void getCatalog(std::set& catalog) const{ - return getCatalog_(catalog); - } - - bool getRecord(const std::string& name, DlcRecord& record) const{ - auto h = Snpe_DlContainer_GetRecord(handle(), name.c_str()); - if(!h) return false; - DlcRecordInternal internal(moveHandle(h)); - auto data = internal.getData(); - - record.name.assign(internal.getName()); - record.data.assign(data, data+internal.size()); - return true; - } - - bool getRecord(const DlSystem::String& name, DlcRecord& record) const{ - return getRecord(static_cast(name), record); - } - - bool save(const std::string& filename){ - return Snpe_DlContainer_Save(handle(), filename.c_str()); - } - - bool save(const DlSystem::String& filename){ - return save(static_cast(filename)); - } -}; - - -} // ns DlContainer - -ALIAS_IN_ZDL_NAMESPACE(DlContainer, DlcRecord) -ALIAS_IN_ZDL_NAMESPACE(DlContainer, IDlContainer) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlEnums.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlEnums.h deleted file mode 100644 index 85a0f4d3..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlEnums.h +++ /dev/null @@ -1,267 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _DL_ENUMS_H_ -#define _DL_ENUMS_H_ - -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * Enumeration of supported target runtimes. - */ -typedef enum -{ - /// Special value indicating the property is unset. - SNPE_RUNTIME_UNSET = -1, - /// Run the processing on Snapdragon CPU. - /// Data: float 32bit - /// Math: float 32bit - SNPE_RUNTIME_CPU_FLOAT32 = 0, - /// Default legacy enum to retain backward compatibility. - /// CPU = CPU_FLOAT32 - SNPE_RUNTIME_CPU = SNPE_RUNTIME_CPU_FLOAT32, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 32bit - SNPE_RUNTIME_GPU_FLOAT32_16_HYBRID = 1, - /// Default legacy enum to retain backward compatibility. - /// GPU = GPU_FLOAT32_16_HYBRID - SNPE_RUNTIME_GPU = SNPE_RUNTIME_GPU_FLOAT32_16_HYBRID, - - /// Run the processing on the Hexagon DSP. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - SNPE_RUNTIME_DSP_FIXED8_TF = 2, - /// Default legacy enum to retain backward compatibility. - /// DSP = DSP_FIXED8_TF - SNPE_RUNTIME_DSP = SNPE_RUNTIME_DSP_FIXED8_TF, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 16bit - SNPE_RUNTIME_GPU_FLOAT16 = 3, - - /// Run the processing on Snapdragon AIX+HVX. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - SNPE_RUNTIME_AIP_FIXED8_TF = 5, - SNPE_RUNTIME_AIP_FIXED_TF = SNPE_RUNTIME_AIP_FIXED8_TF -} Snpe_Runtime_t; - -/** - * Enumeration of runtime available check options. - */ -typedef enum -{ - /// Perform standard runtime available check - SNPE_RUNTIME_CHECK_OPTION_DEFAULT = 2, - /// Perform standard runtime available check - SNPE_RUNTIME_CHECK_OPTION_NORMAL_CHECK = 0, - /// Perform basic runtime available check, may be runtime specific - SNPE_RUNTIME_CHECK_OPTION_BASIC_CHECK = 1, - /// Perform unsignedPD runtime available check - SNPE_RUNTIME_CHECK_OPTION_UNSIGNEDPD_CHECK = 2, -} Snpe_RuntimeCheckOption_t; - -/** - * Enumeration of various performance profiles that can be requested. - */ -typedef enum -{ - /// Run in a standard mode. - /// This mode will be deprecated in the future and replaced with BALANCED. - SNPE_PERFORMANCE_PROFILE_DEFAULT = 0, - /// Run in a balanced mode. - SNPE_PERFORMANCE_PROFILE_BALANCED = 0, - - /// Run in high performance mode - SNPE_PERFORMANCE_PROFILE_HIGH_PERFORMANCE = 1, - - /// Run in a power sensitive mode, at the expense of performance. - SNPE_PERFORMANCE_PROFILE_POWER_SAVER = 2, - - /// Use system settings. SNPE makes no calls to any performance related APIs. - SNPE_PERFORMANCE_PROFILE_SYSTEM_SETTINGS = 3, - - /// Run in sustained high performance mode - SNPE_PERFORMANCE_PROFILE_SUSTAINED_HIGH_PERFORMANCE = 4, - - /// Run in burst mode - SNPE_PERFORMANCE_PROFILE_BURST = 5, - - /// Run in lower clock than POWER_SAVER, at the expense of performance. - SNPE_PERFORMANCE_PROFILE_LOW_POWER_SAVER = 6, - - /// Run in higher clock and provides better performance than POWER_SAVER. - SNPE_PERFORMANCE_PROFILE_HIGH_POWER_SAVER = 7, - - /// Run in lower balanced mode - SNPE_PERFORMANCE_PROFILE_LOW_BALANCED = 8, - - /// Run in lowest clock at the expense of performance - SNPE_PERFORMANCE_PROFILE_EXTREME_POWER_SAVER = 9, - -} Snpe_PerformanceProfile_t; - -/** - * Enumeration of various profilngLevels that can be requested. - */ -typedef enum -{ - /// No profiling. - /// Collects no runtime stats in the DiagLog - SNPE_PROFILING_LEVEL_OFF = 0, - - /// Basic profiling - /// Collects some runtime stats in the DiagLog - SNPE_PROFILING_LEVEL_BASIC = 1, - - /// Detailed profiling - /// Collects more runtime stats in the DiagLog, including per-layer statistics - /// Performance may be impacted - SNPE_PROFILING_LEVEL_DETAILED = 2, - - /// Moderate profiling - /// Collects more runtime stats in the DiagLog, no per-layer statistics - SNPE_PROFILING_LEVEL_MODERATE = 3, - - /// Linting profiling - /// HTP exclusive profiling level that collects in-depth performance metrics - /// for each op in the graph including main thread execution time and time spent - /// on parallel background ops - SNPE_PROFILING_LEVEL_LINTING = 4 - -} Snpe_ProfilingLevel_t; - -/** - * Enumeration of various execution priority hints. - */ -typedef enum -{ - /// Normal priority - SNPE_EXECUTION_PRIORITY_NORMAL = 0, - - /// Higher than normal priority - SNPE_EXECUTION_PRIORITY_HIGH = 1, - - /// Lower priority - SNPE_EXECUTION_PRIORITY_LOW = 2, - - /// Between Normal and High priority - SNPE_EXECUTION_PRIORITY_NORMAL_HIGH = 3 - -} Snpe_ExecutionPriorityHint_t; - -/** - * Enumeration that lists the supported image encoding formats. - */ -typedef enum -{ - /// For unknown image type. Also used as a default value for ImageEncoding_t. - SNPE_IMAGE_ENCODING_UNKNOWN = 0, - - /// The RGB format consists of 3 bytes per pixel: one byte for - /// Red, one for Green, and one for Blue. The byte ordering is - /// endian independent and is always in RGB byte order. - SNPE_IMAGE_ENCODING_RGB = 1, - - /// The ARGB32 format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering depends on the - /// underlying CPU. For little endian CPUs, the byte order is BGRA. - /// For big endian CPUs, the byte order is ARGB. - SNPE_IMAGE_ENCODING_ARGB32 = 2, - - /// The RGBA format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering is endian independent - /// and is always in RGBA byte order. - SNPE_IMAGE_ENCODING_RGBA = 3, - - /// The GRAYSCALE format is for 8-bit grayscale. - SNPE_IMAGE_ENCODING_GRAYSCALE = 4, - - /// NV21 is the Android version of YUV. The Chrominance is down - /// sampled and has a subsampling ratio of 4:2:0. Note that this - /// image format has 3 channels, but the U and V channels - /// are subsampled. For every four Y pixels there is one U and one V pixel. @newpage - SNPE_IMAGE_ENCODING_NV21 = 5, - - /// The BGR format consists of 3 bytes per pixel: one byte for - /// Red, one for Green and one for Blue. The byte ordering is - /// endian independent and is always BGR byte order. - SNPE_IMAGE_ENCODING_BGR = 6 -} Snpe_ImageEncoding_t; - -/** - * Enumeration that lists the supported LogLevels that can be set by users. - */ -typedef enum -{ - /// Enumeration variable to be used by user to set logging level to FATAL. - SNPE_LOG_LEVEL_FATAL = 0, - - /// Enumeration variable to be used by user to set logging level to ERROR. - SNPE_LOG_LEVEL_ERROR = 1, - - /// Enumeration variable to be used by user to set logging level to WARN. - SNPE_LOG_LEVEL_WARN = 2, - - /// Enumeration variable to be used by user to set logging level to INFO. - SNPE_LOG_LEVEL_INFO = 3, - - /// Enumeration variable to be used by user to set logging level to VERBOSE. - SNPE_LOG_LEVEL_VERBOSE = 4 -} Snpe_LogLevel_t; - -/** - * Enumeration that list the supported data types for buffers - */ -typedef enum -{ - /// Unspecified - SNPE_IO_BUFFER_DATATYPE_UNSPECIFIED = 0, - - /// 32-bit floating point - SNPE_IO_BUFFER_DATATYPE_FLOATING_POINT_32 = 1, - - /// 16-bit floating point - SNPE_IO_BUFFER_DATATYPE_FLOATING_POINT_16 = 2, - - /// 8-bit fixed point - SNPE_IO_BUFFER_DATATYPE_FIXED_POINT_8 = 3, - - /// 16-bit fixed point - SNPE_IO_BUFFER_DATATYPE_FIXED_POINT_16 = 4 -} Snpe_IOBufferDataType_t; - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DL_ENUMS_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlEnums.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlEnums.hpp deleted file mode 100644 index 9158f594..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlEnums.hpp +++ /dev/null @@ -1,266 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -namespace DlSystem { -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * Enumeration of supported target runtimes. - */ -enum class Runtime_t -{ - /// Special value indicating the property is unset. - UNSET = -1, - /// Run the processing on Snapdragon CPU. - /// Data: float 32bit - /// Math: float 32bit - CPU_FLOAT32 = 0, - /// Default legacy enum to retain backward compatibility. - /// CPU = CPU_FLOAT32 - CPU = CPU_FLOAT32, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 32bit - GPU_FLOAT32_16_HYBRID = 1, - /// Default legacy enum to retain backward compatibility. - /// GPU = GPU_FLOAT32_16_HYBRID - GPU = GPU_FLOAT32_16_HYBRID, - - /// Run the processing on the Hexagon DSP. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - DSP_FIXED8_TF = 2, - /// Default legacy enum to retain backward compatibility. - /// DSP = DSP_FIXED8_TF - DSP = DSP_FIXED8_TF, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 16bit - GPU_FLOAT16 = 3, - - /// Run the processing on Snapdragon AIX+HVX. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - AIP_FIXED8_TF = 5, - AIP_FIXED_TF = AIP_FIXED8_TF, - - /// Any new enums should be added above this line - NUM_RUNTIME_TARGETS -}; - -/** - * Enumeration of runtime available check options. - */ -enum class RuntimeCheckOption_t -{ - /// Perform standard runtime available check - NORMAL_CHECK = 0, - /// Perform basic runtime available check, may be runtime specific - BASIC_CHECK = 1, - /// Perform unsignedPD runtime available check - UNSIGNEDPD_CHECK = 2, - /// Perform standard runtime available check - DEFAULT = 2, - /// Any new enums should be added above this line - NUM_RUNTIMECHECK_OPTIONS -}; - -/** - * Enumeration of various performance profiles that can be requested. - */ -enum class PerformanceProfile_t -{ - /// Run in a standard mode. - /// This mode will be deprecated in the future and replaced with BALANCED. - DEFAULT = 0, - /// Run in a balanced mode. - BALANCED = 0, - - /// Run in high performance mode - HIGH_PERFORMANCE = 1, - - /// Run in a power sensitive mode, at the expense of performance. - POWER_SAVER = 2, - - /// Use system settings. SNPE makes no calls to any performance related APIs. - SYSTEM_SETTINGS = 3, - - /// Run in sustained high performance mode - SUSTAINED_HIGH_PERFORMANCE = 4, - - /// Run in burst mode - BURST = 5, - - /// Run in lower clock than POWER_SAVER, at the expense of performance. - LOW_POWER_SAVER = 6, - - /// Run in higher clock and provides better performance than POWER_SAVER. - HIGH_POWER_SAVER = 7, - - /// Run in lower balanced mode - LOW_BALANCED = 8, - - /// Run in lowest clock at the expense of performance - EXTREME_POWER_SAVER = 9, - - /// Any new enums should be added above this line - NUM_PERF_PROFILES -}; - -/** - * Enumeration of various profilngLevels that can be requested. - */ -enum class ProfilingLevel_t -{ - /// No profiling. - /// Collects no runtime stats in the DiagLog - OFF = 0, - - /// Basic profiling - /// Collects some runtime stats in the DiagLog - BASIC = 1, - - /// Detailed profiling - /// Collects more runtime stats in the DiagLog, including per-layer statistics - /// Performance may be impacted - DETAILED = 2, - - /// Moderate profiling - /// Collects more runtime stats in the DiagLog, no per-layer statistics - MODERATE = 3, - - /// Linting profiling - /// HTP exclusive profiling level that collects in-depth performance metrics - /// for each op in the graph including main thread execution time and time spent - /// on parallel background ops - LINTING = 4 -}; - -/** - * Enumeration of various execution priority hints. - */ -enum class ExecutionPriorityHint_t -{ - /// Normal priority - NORMAL = 0, - - /// Higher than normal priority - HIGH = 1, - - /// Lower priority - LOW = 2, - - /// Between Normal and High priority - NORMAL_HIGH = 3, - - /// Any new enums should be added above this line - NUM_EXECUTION_PRIORITY_HINTS -}; - -/** @} */ /* end_addtogroup c_plus_plus_apis C++*/ - -/** - * Enumeration that lists the supported image encoding formats. - */ -enum class ImageEncoding_t -{ - /// For unknown image type. Also used as a default value for ImageEncoding_t. - UNKNOWN = 0, - - /// The RGB format consists of 3 bytes per pixel: one byte for - /// Red, one for Green, and one for Blue. The byte ordering is - /// endian independent and is always in RGB byte order. - RGB = 1, - - /// The ARGB32 format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering depends on the - /// underlying CPU. For little endian CPUs, the byte order is BGRA. - /// For big endian CPUs, the byte order is ARGB. - ARGB32 = 2, - - /// The RGBA format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering is endian independent - /// and is always in RGBA byte order. - RGBA = 3, - - /// The GRAYSCALE format is for 8-bit grayscale. - GRAYSCALE = 4, - - /// NV21 is the Android version of YUV. The Chrominance is down - /// sampled and has a subsampling ratio of 4:2:0. Note that this - /// image format has 3 channels, but the U and V channels - /// are subsampled. For every four Y pixels there is one U and one V pixel. @newpage - NV21 = 5, - - /// The BGR format consists of 3 bytes per pixel: one byte for - /// Red, one for Green and one for Blue. The byte ordering is - /// endian independent and is always BGR byte order. - BGR = 6 -}; - -/** - * Enumeration that lists the supported LogLevels that can be set by users. - */ -enum class LogLevel_t -{ - /// Enumeration variable to be used by user to set logging level to FATAL. - LOG_FATAL = 0, - - /// Enumeration variable to be used by user to set logging level to ERROR. - LOG_ERROR = 1, - - /// Enumeration variable to be used by user to set logging level to WARN. - LOG_WARN = 2, - - /// Enumeration variable to be used by user to set logging level to INFO. - LOG_INFO = 3, - - /// Enumeration variable to be used by user to set logging level to VERBOSE. - LOG_VERBOSE = 4, - - /// Any new enums should be added above this line - NUM_LOG_LEVELS -}; - -enum class IOBufferDataType_t : int -{ - UNSPECIFIED = 0, - FLOATING_POINT_32 = 1, - FLOATING_POINT_16 = 2, - FIXED_POINT_8 = 3, - FIXED_POINT_16 = 4, - INT_32 = 5, - UINT_32 = 6, - INT_8 = 7, - UINT_8 = 8, - INT_16 = 9, - UINT_16 = 10, - BOOL_8 = 11, - INT_64 = 12, - UINT_64 = 13 -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, Runtime_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, RuntimeCheckOption_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, PerformanceProfile_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ProfilingLevel_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ExecutionPriorityHint_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ImageEncoding_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, LogLevel_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IOBufferDataType_t) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlError.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlError.h deleted file mode 100644 index f8c216ea..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlError.h +++ /dev/null @@ -1,299 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _DL_ERROR_H_ -#define _DL_ERROR_H_ - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * Enumeration of error codes - */ -typedef enum -{ - /// Indicate success: SNPE_SUCCESS = 0 - SNPE_SUCCESS = 0, - - // C API Error Codes - // This is a temporary place for them. We still have to figure out how to manage - // passing error codes from the C API to C++ if we want to use things like SetLastError - SNPE_ERRORCODE_CAPI_CREATE_FAILURE = 10, - SNPE_ERRORCODE_CAPI_HANDLEGEN_FAILURE = 11, - SNPE_ERRORCODE_CAPI_DELETE_FAILURE = 12, - SNPE_ERRORCODE_CAPI_BAD_HANDLE = 13, - SNPE_ERRORCODE_CAPI_BAD_ARGUMENT = 14, - SNPE_ERRORCODE_CAPI_BAD_ALLOC = 15, - - // System config errors - SNPE_ERRORCODE_CONFIG_MISSING_PARAM = 100, - SNPE_ERRORCODE_CONFIG_INVALID_PARAM = 101, - SNPE_ERRORCODE_CONFIG_MISSING_FILE = 102, - SNPE_ERRORCODE_CONFIG_NNCONFIG_NOT_SET = 103, - SNPE_ERRORCODE_CONFIG_NNCONFIG_INVALID = 104, - SNPE_ERRORCODE_CONFIG_WRONG_INPUT_NAME = 105, - SNPE_ERRORCODE_CONFIG_INCORRECT_INPUT_DIMENSIONS = 106, - SNPE_ERRORCODE_CONFIG_DIMENSIONS_MODIFICATION_NOT_SUPPORTED = 107, - SNPE_ERRORCODE_CONFIG_BOTH_OUTPUT_LAYER_TENSOR_NAMES_SET = 108, - - SNPE_ERRORCODE_CONFIG_NNCONFIG_ONLY_TENSOR_SUPPORTED = 120, - SNPE_ERRORCODE_CONFIG_NNCONFIG_ONLY_USER_BUFFER_SUPPORTED = 121, - - // DlSystem errors - SNPE_ERRORCODE_DLSYSTEM_MISSING_BUFFER = 200, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_CAST_FAILED = 201, - SNPE_ERRORCODE_DLSYSTEM_FIXED_POINT_PARAM_INVALID = 202, - SNPE_ERRORCODE_DLSYSTEM_SIZE_MISMATCH = 203, - SNPE_ERRORCODE_DLSYSTEM_NAME_NOT_FOUND = 204, - SNPE_ERRORCODE_DLSYSTEM_VALUE_MISMATCH = 205, - SNPE_ERRORCODE_DLSYSTEM_INSERT_FAILED = 206, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_FILE_READ_FAILED = 207, - SNPE_ERRORCODE_DLSYSTEM_DIAGLOG_FAILURE = 208, - SNPE_ERRORCODE_DLSYSTEM_LAYER_NOT_SET = 209, - SNPE_ERRORCODE_DLSYSTEM_WRONG_NUMBER_INPUT_BUFFERS = 210, - SNPE_ERRORCODE_DLSYSTEM_RUNTIME_TENSOR_SHAPE_MISMATCH = 211, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_MISSING = 212, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_ITERATION_UNSUPPORTED = 213, - SNPE_ERRORCODE_DLSYSTEM_BUFFER_MANAGER_MISSING = 214, - SNPE_ERRORCODE_DLSYSTEM_RUNTIME_BUFFER_SOURCE_UNSUPPORTED = 215, - SNPE_ERRORCODE_DLSYSTEM_BUFFER_CAST_FAILED = 216, - SNPE_ERRORCODE_DLSYSTEM_WRONG_TRANSITION_TYPE = 217, - SNPE_ERRORCODE_DLSYSTEM_LAYER_ALREADY_REGISTERED = 218, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_DIM_INVALID = 219, - - SNPE_ERRORCODE_DLSYSTEM_BUFFERENCODING_UNKNOWN = 240, - SNPE_ERRORCODE_DLSYSTEM_BUFFER_INVALID_PARAM = 241, - - // DlContainer errors - SNPE_ERRORCODE_DLCONTAINER_MODEL_PARSING_FAILED = 300, - SNPE_ERRORCODE_DLCONTAINER_UNKNOWN_LAYER_CODE = 301, - SNPE_ERRORCODE_DLCONTAINER_MISSING_LAYER_PARAM = 302, - SNPE_ERRORCODE_DLCONTAINER_LAYER_PARAM_NOT_SUPPORTED = 303, - SNPE_ERRORCODE_DLCONTAINER_LAYER_PARAM_INVALID = 304, - SNPE_ERRORCODE_DLCONTAINER_TENSOR_DATA_MISSING = 305, - SNPE_ERRORCODE_DLCONTAINER_MODEL_LOAD_FAILED = 306, - SNPE_ERRORCODE_DLCONTAINER_MISSING_RECORDS = 307, - SNPE_ERRORCODE_DLCONTAINER_INVALID_RECORD = 308, - SNPE_ERRORCODE_DLCONTAINER_WRITE_FAILURE = 309, - SNPE_ERRORCODE_DLCONTAINER_READ_FAILURE = 310, - SNPE_ERRORCODE_DLCONTAINER_BAD_CONTAINER = 311, - SNPE_ERRORCODE_DLCONTAINER_BAD_DNN_FORMAT_VERSION = 312, - SNPE_ERRORCODE_DLCONTAINER_UNKNOWN_AXIS_ANNOTATION = 313, - SNPE_ERRORCODE_DLCONTAINER_UNKNOWN_SHUFFLE_TYPE = 314, - SNPE_ERRORCODE_DLCONTAINER_TEMP_FILE_FAILURE = 315, - - // Network errors - SNPE_ERRORCODE_NETWORK_EMPTY_NETWORK = 400, - SNPE_ERRORCODE_NETWORK_CREATION_FAILED = 401, - SNPE_ERRORCODE_NETWORK_PARTITION_FAILED = 402, - SNPE_ERRORCODE_NETWORK_NO_OUTPUT_DEFINED = 403, - SNPE_ERRORCODE_NETWORK_MISMATCH_BETWEEN_NAMES_AND_DIMS = 404, - SNPE_ERRORCODE_NETWORK_MISSING_INPUT_NAMES = 405, - SNPE_ERRORCODE_NETWORK_MISSING_OUTPUT_NAMES = 406, - SNPE_ERRORCODE_NETWORK_EXECUTION_FAILED = 407, - - // Host runtime errors - SNPE_ERRORCODE_HOST_RUNTIME_TARGET_UNAVAILABLE = 500, - - // CPU runtime errors - SNPE_ERRORCODE_CPU_LAYER_NOT_SUPPORTED = 600, - SNPE_ERRORCODE_CPU_LAYER_PARAM_NOT_SUPPORTED = 601, - SNPE_ERRORCODE_CPU_LAYER_PARAM_INVALID = 602, - SNPE_ERRORCODE_CPU_LAYER_PARAM_COMBINATION_INVALID = 603, - SNPE_ERRORCODE_CPU_BUFFER_NOT_FOUND = 604, - SNPE_ERRORCODE_CPU_NETWORK_NOT_SUPPORTED = 605, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_CPU_UDO_OPERATION_FAILED = 606, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // CPU fixed-point runtime errors - SNPE_ERRORCODE_CPU_FXP_LAYER_NOT_SUPPORTED = 700, - SNPE_ERRORCODE_CPU_FXP_LAYER_PARAM_NOT_SUPPORTED = 701, - SNPE_ERRORCODE_CPU_FXP_LAYER_PARAM_INVALID = 702, - SNPE_ERRORCODE_CPU_FXP_OPTION_INVALID = 703, - - // GPU runtime errors - SNPE_ERRORCODE_GPU_LAYER_NOT_SUPPORTED = 800, - SNPE_ERRORCODE_GPU_LAYER_PARAM_NOT_SUPPORTED = 801, - SNPE_ERRORCODE_GPU_LAYER_PARAM_INVALID = 802, - SNPE_ERRORCODE_GPU_LAYER_PARAM_COMBINATION_INVALID = 803, - SNPE_ERRORCODE_GPU_KERNEL_COMPILATION_FAILED = 804, - SNPE_ERRORCODE_GPU_CONTEXT_NOT_SET = 805, - SNPE_ERRORCODE_GPU_KERNEL_NOT_SET = 806, - SNPE_ERRORCODE_GPU_KERNEL_PARAM_INVALID = 807, - SNPE_ERRORCODE_GPU_OPENCL_CHECK_FAILED = 808, - SNPE_ERRORCODE_GPU_OPENCL_FUNCTION_ERROR = 809, - SNPE_ERRORCODE_GPU_BUFFER_NOT_FOUND = 810, - SNPE_ERRORCODE_GPU_TENSOR_DIM_INVALID = 811, - SNPE_ERRORCODE_GPU_MEMORY_FLAGS_INVALID = 812, - SNPE_ERRORCODE_GPU_UNEXPECTED_NUMBER_OF_IO = 813, - SNPE_ERRORCODE_GPU_LAYER_PROXY_ERROR = 814, - SNPE_ERRORCODE_GPU_BUFFER_IN_USE = 815, - SNPE_ERRORCODE_GPU_BUFFER_MODIFICATION_ERROR = 816, - SNPE_ERRORCODE_GPU_DATA_ARRANGEMENT_INVALID = 817, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_GPU_UDO_OPERATION_FAILED = 818, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - // DSP runtime errors - SNPE_ERRORCODE_DSP_LAYER_NOT_SUPPORTED = 900, - SNPE_ERRORCODE_DSP_LAYER_PARAM_NOT_SUPPORTED = 901, - SNPE_ERRORCODE_DSP_LAYER_PARAM_INVALID = 902, - SNPE_ERRORCODE_DSP_LAYER_PARAM_COMBINATION_INVALID = 903, - SNPE_ERRORCODE_DSP_STUB_NOT_PRESENT = 904, - SNPE_ERRORCODE_DSP_LAYER_NAME_TRUNCATED = 905, - SNPE_ERRORCODE_DSP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 906, - SNPE_ERRORCODE_DSP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 907, - SNPE_ERRORCODE_DSP_RUNTIME_COMMUNICATION_ERROR = 908, - SNPE_ERRORCODE_DSP_RUNTIME_INVALID_PARAM_ERROR = 909, - SNPE_ERRORCODE_DSP_RUNTIME_SYSTEM_ERROR = 910, - SNPE_ERRORCODE_DSP_RUNTIME_CRASHED_ERROR = 911, - SNPE_ERRORCODE_DSP_BUFFER_SIZE_ERROR = 912, - SNPE_ERRORCODE_DSP_UDO_EXECUTE_ERROR = 913, - SNPE_ERRORCODE_DSP_UDO_LIB_NOT_REGISTERED_ERROR = 914, - SNPE_ERRORCODE_DSP_UDO_INVALID_QUANTIZATION_TYPE_ERROR = 915, - - // Model validataion errors - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_NOT_SUPPORTED = 1000, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_NOT_SUPPORTED = 1001, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_INVALID = 1002, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_MISSING = 1003, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_COMBINATION_INVALID = 1004, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_ORDERING_INVALID = 1005, - SNPE_ERRORCODE_MODEL_VALIDATION_INVALID_CONSTRAINT = 1006, - SNPE_ERRORCODE_MODEL_VALIDATION_MISSING_BUFFER = 1007, - SNPE_ERRORCODE_MODEL_VALIDATION_BUFFER_REUSE_NOT_SUPPORTED = 1008, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_COULD_NOT_BE_ASSIGNED = 1009, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_MODEL_VALIDATION_UDO_LAYER_FAILED = 1010, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // UDL errors - SNPE_ERRORCODE_UDL_LAYER_EMPTY_UDL_NETWORK = 1100, - SNPE_ERRORCODE_UDL_LAYER_PARAM_INVALID = 1101, - SNPE_ERRORCODE_UDL_LAYER_INSTANCE_MISSING = 1102, - SNPE_ERRORCODE_UDL_LAYER_SETUP_FAILED = 1103, - SNPE_ERRORCODE_UDL_EXECUTE_FAILED = 1104, - SNPE_ERRORCODE_UDL_BUNDLE_INVALID = 1105, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_UDO_REGISTRATION_FAILED = 1106, - SNPE_ERRORCODE_UDO_GET_PACKAGE_FAILED = 1107, - SNPE_ERRORCODE_UDO_GET_IMPLEMENTATION_FAILED = 1108, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // Dependent library errors - SNPE_ERRORCODE_STD_LIBRARY_ERROR = 1200, - - // Unknown exception (catch (...)), Has no component attached to this - SNPE_ERRORCODE_UNKNOWN_EXCEPTION = 1210, - - // Storage Errors - SNPE_ERRORCODE_STORAGE_INVALID_KERNEL_REPO = 1300, - -#ifdef DNN_RUNTIME_HAVE_AIP_RUNTIME - // AIP runtime errors - SNPE_ERRORCODE_AIP_LAYER_NOT_SUPPORTED = 1400, - SNPE_ERRORCODE_AIP_LAYER_PARAM_NOT_SUPPORTED = 1401, - SNPE_ERRORCODE_AIP_LAYER_PARAM_INVALID = 1402, - SNPE_ERRORCODE_AIP_LAYER_PARAM_COMBINATION_INVALID = 1403, - SNPE_ERRORCODE_AIP_STUB_NOT_PRESENT = 1404, - SNPE_ERRORCODE_AIP_LAYER_NAME_TRUNCATED = 1405, - SNPE_ERRORCODE_AIP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 1406, - SNPE_ERRORCODE_AIP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 1407, - SNPE_ERRORCODE_AIP_RUNTIME_COMMUNICATION_ERROR = 1408, - SNPE_ERRORCODE_AIP_RUNTIME_INVALID_PARAM_ERROR = 1409, - SNPE_ERRORCODE_AIP_RUNTIME_SYSTEM_ERROR = 1410, - SNPE_ERRORCODE_AIP_RUNTIME_TENSOR_MISSING = 1411, - SNPE_ERRORCODE_AIP_RUNTIME_TENSOR_SHAPE_MISMATCH = 1412, - SNPE_ERRORCODE_AIP_RUNTIME_BAD_AIX_RECORD = 1413, -#endif // DNN_RUNTIME_HAVE_AIP_RUNTIME - - // DlCaching errors - SNPE_ERRORCODE_DLCACHING_INVALID_METADATA = 1500, - SNPE_ERRORCODE_DLCACHING_INVALID_INITBLOB = 1501, - - // Infrastructure Errors - SNPE_ERRORCODE_INFRA_CLUSTERMGR_INSTANCE_INVALID = 1600, - SNPE_ERRORCODE_INFRA_CLUSTERMGR_EXECUTE_SYNC_FAILED = 1601, - - // Memory Errors - SNPE_ERRORCODE_MEMORY_CORRUPTION_ERROR = 1700 - -} Snpe_ErrorCode_t; - - - -/** - * Clear the last error code - */ -SNPE_API void Snpe_ErrorCode_clearLastErrorCode(); - -/** -* Returns the error code of the last error encountered. -* -* @return The error code. -* -* @note The returned error code is significant only when the return -* value of the call indicated an error. -*/ -SNPE_API Snpe_ErrorCode_t Snpe_ErrorCode_getLastErrorCode(); - -/** -* Returns the error string of the last error encountered. -* -* @return The error string. -* -* @note The returned error string is significant only when the return -* value of the call indicated an error. -*/ -SNPE_API const char* Snpe_ErrorCode_GetLastErrorString(); - -/** - * Returns the info string of the last error encountered. - */ -SNPE_API const char* Snpe_ErrorCode_getLastInfoString(); - -/** - * Returns the uint32_t representation of the error code enum. - * - * @param[in] code The error code to be converted. - * - * @return uint32_t representation of the error code. - */ -SNPE_API uint32_t Snpe_ErrorCode_enumToUInt32(Snpe_ErrorCode_t code); - - -#ifdef __cplusplus -} // extern "C" -#endif - - -#endif // _DL_ERROR_H_ - diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlError.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlError.hpp deleted file mode 100644 index 55dc2140..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlError.hpp +++ /dev/null @@ -1,261 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include - -#include "DlSystem/DlError.h" - - -namespace DlSystem { - -enum class ErrorCode : uint32_t { - NONE = 0, - - // C API Error Codes - // This is a temporary place for them. We still have to figure out how to manage - // passing error codes from the C API to C++ if we want to use things like SetLastError - SNPE_CAPI_CREATE_FAILURE = 10, - SNPE_CAPI_HANDLEGEN_FAILURE = 11, - SNPE_CAPI_DELETE_FAILURE = 12, - SNPE_CAPI_BAD_HANDLE = 13, - SNPE_CAPI_BAD_ARGUMENT = 14, - SNPE_CAPI_BAD_ALLOC = 15, - - - // System config errors - SNPE_CONFIG_MISSING_PARAM = 100, - SNPE_CONFIG_INVALID_PARAM = 101, - SNPE_CONFIG_MISSING_FILE = 102, - SNPE_CONFIG_NNCONFIG_NOT_SET = 103, - SNPE_CONFIG_NNCONFIG_INVALID = 104, - SNPE_CONFIG_WRONG_INPUT_NAME = 105, - SNPE_CONFIG_INCORRECT_INPUT_DIMENSIONS = 106, - SNPE_CONFIG_DIMENSIONS_MODIFICATION_NOT_SUPPORTED = 107, - SNPE_CONFIG_BOTH_OUTPUT_LAYER_TENSOR_NAMES_SET = 108, - - SNPE_CONFIG_NNCONFIG_ONLY_TENSOR_SUPPORTED = 120, - SNPE_CONFIG_NNCONFIG_ONLY_USER_BUFFER_SUPPORTED = 121, - - // DlSystem errors - SNPE_DLSYSTEM_MISSING_BUFFER = 200, - SNPE_DLSYSTEM_TENSOR_CAST_FAILED = 201, - SNPE_DLSYSTEM_FIXED_POINT_PARAM_INVALID = 202, - SNPE_DLSYSTEM_SIZE_MISMATCH = 203, - SNPE_DLSYSTEM_NAME_NOT_FOUND = 204, - SNPE_DLSYSTEM_VALUE_MISMATCH = 205, - SNPE_DLSYSTEM_INSERT_FAILED = 206, - SNPE_DLSYSTEM_TENSOR_FILE_READ_FAILED = 207, - SNPE_DLSYSTEM_DIAGLOG_FAILURE = 208, - SNPE_DLSYSTEM_LAYER_NOT_SET = 209, - SNPE_DLSYSTEM_WRONG_NUMBER_INPUT_BUFFERS = 210, - SNPE_DLSYSTEM_RUNTIME_TENSOR_SHAPE_MISMATCH = 211, - SNPE_DLSYSTEM_TENSOR_MISSING = 212, - SNPE_DLSYSTEM_TENSOR_ITERATION_UNSUPPORTED = 213, - SNPE_DLSYSTEM_BUFFER_MANAGER_MISSING = 214, - SNPE_DLSYSTEM_RUNTIME_BUFFER_SOURCE_UNSUPPORTED = 215, - SNPE_DLSYSTEM_BUFFER_CAST_FAILED = 216, - SNPE_DLSYSTEM_WRONG_TRANSITION_TYPE = 217, - SNPE_DLSYSTEM_LAYER_ALREADY_REGISTERED = 218, - SNPE_DLSYSTEM_TENSOR_DIM_INVALID = 219, - - SNPE_DLSYSTEM_BUFFERENCODING_UNKNOWN = 240, - SNPE_DLSYSTEM_BUFFER_INVALID_PARAM = 241, - - // DlContainer errors - SNPE_DLCONTAINER_MODEL_PARSING_FAILED = 300, - SNPE_DLCONTAINER_UNKNOWN_LAYER_CODE = 301, - SNPE_DLCONTAINER_MISSING_LAYER_PARAM = 302, - SNPE_DLCONTAINER_LAYER_PARAM_NOT_SUPPORTED = 303, - SNPE_DLCONTAINER_LAYER_PARAM_INVALID = 304, - SNPE_DLCONTAINER_TENSOR_DATA_MISSING = 305, - SNPE_DLCONTAINER_MODEL_LOAD_FAILED = 306, - SNPE_DLCONTAINER_MISSING_RECORDS = 307, - SNPE_DLCONTAINER_INVALID_RECORD = 308, - SNPE_DLCONTAINER_WRITE_FAILURE = 309, - SNPE_DLCONTAINER_READ_FAILURE = 310, - SNPE_DLCONTAINER_BAD_CONTAINER = 311, - SNPE_DLCONTAINER_BAD_DNN_FORMAT_VERSION = 312, - SNPE_DLCONTAINER_UNKNOWN_AXIS_ANNOTATION = 313, - SNPE_DLCONTAINER_UNKNOWN_SHUFFLE_TYPE = 314, - SNPE_DLCONTAINER_TEMP_FILE_FAILURE = 315, - - // Network errors - SNPE_NETWORK_EMPTY_NETWORK = 400, - SNPE_NETWORK_CREATION_FAILED = 401, - SNPE_NETWORK_PARTITION_FAILED = 402, - SNPE_NETWORK_NO_OUTPUT_DEFINED = 403, - SNPE_NETWORK_MISMATCH_BETWEEN_NAMES_AND_DIMS = 404, - SNPE_NETWORK_MISSING_INPUT_NAMES = 405, - SNPE_NETWORK_MISSING_OUTPUT_NAMES = 406, - SNPE_NETWORK_EXECUTION_FAILED = 407, - - // Host runtime errors - SNPE_HOST_RUNTIME_TARGET_UNAVAILABLE = 500, - - // CPU runtime errors - SNPE_CPU_LAYER_NOT_SUPPORTED = 600, - SNPE_CPU_LAYER_PARAM_NOT_SUPPORTED = 601, - SNPE_CPU_LAYER_PARAM_INVALID = 602, - SNPE_CPU_LAYER_PARAM_COMBINATION_INVALID = 603, - SNPE_CPU_BUFFER_NOT_FOUND = 604, - SNPE_CPU_NETWORK_NOT_SUPPORTED = 605, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_CPU_UDO_OPERATION_FAILED = 606, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // CPU fixed-point runtime errors - SNPE_CPU_FXP_LAYER_NOT_SUPPORTED = 700, - SNPE_CPU_FXP_LAYER_PARAM_NOT_SUPPORTED = 701, - SNPE_CPU_FXP_LAYER_PARAM_INVALID = 702, - SNPE_CPU_FXP_OPTION_INVALID = 703, - - // GPU runtime errors - SNPE_GPU_LAYER_NOT_SUPPORTED = 800, - SNPE_GPU_LAYER_PARAM_NOT_SUPPORTED = 801, - SNPE_GPU_LAYER_PARAM_INVALID = 802, - SNPE_GPU_LAYER_PARAM_COMBINATION_INVALID = 803, - SNPE_GPU_KERNEL_COMPILATION_FAILED = 804, - SNPE_GPU_CONTEXT_NOT_SET = 805, - SNPE_GPU_KERNEL_NOT_SET = 806, - SNPE_GPU_KERNEL_PARAM_INVALID = 807, - SNPE_GPU_OPENCL_CHECK_FAILED = 808, - SNPE_GPU_OPENCL_FUNCTION_ERROR = 809, - SNPE_GPU_BUFFER_NOT_FOUND = 810, - SNPE_GPU_TENSOR_DIM_INVALID = 811, - SNPE_GPU_MEMORY_FLAGS_INVALID = 812, - SNPE_GPU_UNEXPECTED_NUMBER_OF_IO = 813, - SNPE_GPU_LAYER_PROXY_ERROR = 814, - SNPE_GPU_BUFFER_IN_USE = 815, - SNPE_GPU_BUFFER_MODIFICATION_ERROR = 816, - SNPE_GPU_DATA_ARRANGEMENT_INVALID = 817, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_GPU_UDO_OPERATION_FAILED = 818, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - // DSP runtime errors - SNPE_DSP_LAYER_NOT_SUPPORTED = 900, - SNPE_DSP_LAYER_PARAM_NOT_SUPPORTED = 901, - SNPE_DSP_LAYER_PARAM_INVALID = 902, - SNPE_DSP_LAYER_PARAM_COMBINATION_INVALID = 903, - SNPE_DSP_STUB_NOT_PRESENT = 904, - SNPE_DSP_LAYER_NAME_TRUNCATED = 905, - SNPE_DSP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 906, - SNPE_DSP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 907, - SNPE_DSP_RUNTIME_COMMUNICATION_ERROR = 908, - SNPE_DSP_RUNTIME_INVALID_PARAM_ERROR = 909, - SNPE_DSP_RUNTIME_SYSTEM_ERROR = 910, - SNPE_DSP_RUNTIME_CRASHED_ERROR = 911, - SNPE_DSP_BUFFER_SIZE_ERROR = 912, - SNPE_DSP_UDO_EXECUTE_ERROR = 913, - SNPE_DSP_UDO_LIB_NOT_REGISTERED_ERROR = 914, - SNPE_DSP_UDO_INVALID_QUANTIZATION_TYPE_ERROR = 915, - SNPE_DSP_RUNTIME_INVALID_RPC_DRIVER = 916, - SNPE_DSP_RUNTIME_RPC_PERMISSION_ERROR = 917, - SNPE_DSP_RUNTIME_DSP_FILE_OPEN_ERROR = 918, - - // Model validataion errors - SNPE_MODEL_VALIDATION_LAYER_NOT_SUPPORTED = 1000, - SNPE_MODEL_VALIDATION_LAYER_PARAM_NOT_SUPPORTED = 1001, - SNPE_MODEL_VALIDATION_LAYER_PARAM_INVALID = 1002, - SNPE_MODEL_VALIDATION_LAYER_PARAM_MISSING = 1003, - SNPE_MODEL_VALIDATION_LAYER_PARAM_COMBINATION_INVALID = 1004, - SNPE_MODEL_VALIDATION_LAYER_ORDERING_INVALID = 1005, - SNPE_MODEL_VALIDATION_INVALID_CONSTRAINT = 1006, - SNPE_MODEL_VALIDATION_MISSING_BUFFER = 1007, - SNPE_MODEL_VALIDATION_BUFFER_REUSE_NOT_SUPPORTED = 1008, - SNPE_MODEL_VALIDATION_LAYER_COULD_NOT_BE_ASSIGNED = 1009, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_MODEL_VALIDATION_UDO_LAYER_FAILED = 1010, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // UDL errors - SNPE_UDL_LAYER_EMPTY_UDL_NETWORK = 1100, - SNPE_UDL_LAYER_PARAM_INVALID = 1101, - SNPE_UDL_LAYER_INSTANCE_MISSING = 1102, - SNPE_UDL_LAYER_SETUP_FAILED = 1103, - SNPE_UDL_EXECUTE_FAILED = 1104, - SNPE_UDL_BUNDLE_INVALID = 1105, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_UDO_REGISTRATION_FAILED = 1106, - SNPE_UDO_GET_PACKAGE_FAILED = 1107, - SNPE_UDO_GET_IMPLEMENTATION_FAILED = 1108, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // Dependent library errors - SNPE_STD_LIBRARY_ERROR = 1200, - - // Unknown exception (catch (...)), Has no component attached to this - SNPE_UNKNOWN_EXCEPTION = 1210, - - // Storage Errors - SNPE_STORAGE_INVALID_KERNEL_REPO = 1300, - -#ifdef DNN_RUNTIME_HAVE_AIP_RUNTIME - // AIP runtime errors - SNPE_AIP_LAYER_NOT_SUPPORTED = 1400, - SNPE_AIP_LAYER_PARAM_NOT_SUPPORTED = 1401, - SNPE_AIP_LAYER_PARAM_INVALID = 1402, - SNPE_AIP_LAYER_PARAM_COMBINATION_INVALID = 1403, - SNPE_AIP_STUB_NOT_PRESENT = 1404, - SNPE_AIP_LAYER_NAME_TRUNCATED = 1405, - SNPE_AIP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 1406, - SNPE_AIP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 1407, - SNPE_AIP_RUNTIME_COMMUNICATION_ERROR = 1408, - SNPE_AIP_RUNTIME_INVALID_PARAM_ERROR = 1409, - SNPE_AIP_RUNTIME_SYSTEM_ERROR = 1410, - SNPE_AIP_RUNTIME_TENSOR_MISSING = 1411, - SNPE_AIP_RUNTIME_TENSOR_SHAPE_MISMATCH = 1412, - SNPE_AIP_RUNTIME_BAD_AIX_RECORD = 1413, - SNPE_AIP_AXIS_QUANT_UNSUPPORTED = 1414, - -#endif // DNN_RUNTIME_HAVE_AIP_RUNTIME - - // DlCaching errors - SNPE_DLCACHING_INVALID_METADATA = 1500, - SNPE_DLCACHING_INVALID_INITBLOB = 1501, - - // Infrastructure Errors - SNPE_INFRA_CLUSTERMGR_INSTANCE_INVALID = 1600, - SNPE_INFRA_CLUSTERMGR_EXECUTE_SYNC_FAILED = 1601, - - // Memory Errors - SNPE_MEMORY_CORRUPTION_ERROR = 1700 - -}; - - -inline ErrorCode getLastErrorCode(){ - return static_cast(Snpe_ErrorCode_getLastErrorCode()); -} - -inline const char* getLastErrorString(){ - return Snpe_ErrorCode_GetLastErrorString(); -} - -inline const char* getLastInfoString(){ - return Snpe_ErrorCode_getLastInfoString(); -} - - -inline uint32_t enumToUInt32(ErrorCode code){ - return Snpe_ErrorCode_enumToUInt32(static_cast(code)); -} - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ErrorCode); - - -namespace zdl{ namespace DlSystem { - inline ErrorCode getLastErrorCode() { return ::DlSystem::getLastErrorCode() ; } - inline const char* getLastErrorString() { return ::DlSystem::getLastErrorString() ; } - inline const char* getLastInfoString() { return ::DlSystem::getLastInfoString() ; } - inline uint32_t enumToUInt32(ErrorCode code){ return ::DlSystem::enumToUInt32(code); } -}} // ns zdl::DlSystem diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlOptional.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlOptional.hpp deleted file mode 100644 index e7bbf666..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlOptional.hpp +++ /dev/null @@ -1,244 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include -#include - - -//============================================================================== -// -// Copyright (c) 2016, 2020 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -//#include -#include -//#include - - -namespace DlSystem { - - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * @brief . - * - * Class to manage a value that may or may not exist. The boolean value - * of the Optional class is true if the object contains a value and false - * if it does not contain a value. - * - * The class must be evaluated and confirmed as true (containing a value) - * before being dereferenced. - */ -template -class Optional { -public: - enum class LIFECYCLE { - NONE = 0, - REFERENCE_OWNED = 1, - POINTER_OWNED = 2, - POINTER_NOT_OWNED = 3 - }; - - struct ReferenceCount { - size_t count = 0; - - void increment() { count++; } - - size_t decrement() { - if (count > 0) { - count--; - } - return count; - } - }; - - using U = typename std::remove_pointer::type; - - /** - * The default constructor is set to not have any value, and is - * therefore evaluated as false. - */ - // Do not explicit it so we can return {} - Optional() { - m_Type = LIFECYCLE::NONE; - } - - /** - * Construct an Optional class using an object. - * @param[in] Reference to an object v - * @param[out] Optional instance of object v - */ - template - Optional (const T& v, typename std::enable_if::value>::type* = 0) - : m_Type(LIFECYCLE::REFERENCE_OWNED) { - try { - m_StoragePtr = new T(v); - } catch (...) { - m_StoragePtr = nullptr; - m_Type = LIFECYCLE::NONE; - } - } - - template - Optional(U* v, LIFECYCLE type, typename std::enable_if::value>::type* = 0) - : m_Type(type) { - switch (m_Type) { - case LIFECYCLE::POINTER_OWNED: - m_StoragePtr = v; - m_Count = new ReferenceCount(); - m_Count->increment(); - break; - case LIFECYCLE::POINTER_NOT_OWNED: - m_StoragePtr = v; - break; - case LIFECYCLE::REFERENCE_OWNED: - throw std::bad_exception(); - case LIFECYCLE::NONE: - break; - } - } - - Optional(const Optional &other) : m_Type(other.m_Type), m_Count(other.m_Count) { - if (isReference()) { - m_StoragePtr = new U(*other.m_StoragePtr); - } else if (isPointer()) { - m_StoragePtr = other.m_StoragePtr; - if (isOwned()) { - m_Count->increment(); - } - } - } - - Optional& operator=(const Optional& other) noexcept { - Optional tmp(other); - swap(std::move(tmp)); - return *this; - } - - Optional(Optional&& other) noexcept { - swap(std::move(other)); - } - - Optional& operator=(Optional&& other) noexcept { - swap(std::move(other)); - return *this; - } - - ~Optional() { - if (isOwned()) { - if (isReference() || (isPointer() && m_Count->decrement() == 0)) { - delete m_StoragePtr; - delete m_Count; - } - } - } - - /** - * Boolean value of Optional class is only true when there exists a value. - */ - operator bool() const noexcept { return isValid(); } - - bool operator!() const noexcept { return !isValid(); } - - /** - * Get reference of Optional object - * @warning User must validate Optional has value before. - */ - const T& operator*() { return this->GetReference(); } - - /** - * Get reference of Optional object - * @warning User must validate Optional has value before. - */ - const T& operator*() const { return this->GetReference(); } - - operator T&() { return this->GetReference(); } - - T operator->() { - T self = this->GetReference(); - return self; - } - - void release(){ - if(isOwned() && isPointer()){ - m_Type = LIFECYCLE::POINTER_NOT_OWNED; - if(m_Count && m_Count->decrement() == 0){ - delete m_Count; - m_Count = nullptr; - } - } - } -private: - void swap(Optional&& other) { - m_Type = other.m_Type; - m_StoragePtr = other.m_StoragePtr; - m_Count = other.m_Count; - - other.m_Type = LIFECYCLE::NONE; - other.m_StoragePtr = nullptr; - other.m_Count = nullptr; - } - - template - typename std::enable_if::value, const Q&>::type GetReference() const noexcept { - if (!isReference()) std::terminate(); - return *static_cast(m_StoragePtr); - } - - template - typename std::enable_if::value, const Q&>::type GetReference() const noexcept { - if (!isPointer()) std::terminate(); - return static_cast(m_StoragePtr); - } - - template - typename std::enable_if::value, Q&>::type GetReference() noexcept { - if (!isReference()) std::terminate(); - return *m_StoragePtr; - } - - template - typename std::enable_if::value, Q&>::type GetReference() noexcept { - if (!isPointer()) std::terminate(); - return m_StoragePtr; - } - - bool isPointer() const { - return m_Type == LIFECYCLE::POINTER_OWNED || m_Type == LIFECYCLE::POINTER_NOT_OWNED; - } - - bool isOwned() const { - return m_Type == LIFECYCLE::REFERENCE_OWNED || m_Type == LIFECYCLE::POINTER_OWNED; - } - - bool isReference() const { - return m_Type == LIFECYCLE::REFERENCE_OWNED; - } - - bool isValid() const { - return m_Type != LIFECYCLE::NONE; - } - - U* m_StoragePtr = nullptr; - LIFECYCLE m_Type; - ReferenceCount *m_Count = nullptr; -}; - -} // ns DlSystem - - - -namespace zdl { namespace DlSystem { template using Optional = ::DlSystem::Optional; }} diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlVersion.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlVersion.h deleted file mode 100644 index fac01d1c..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlVersion.h +++ /dev/null @@ -1,122 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - - -/** - * @file - */ - -#ifndef _DL_VERSION_H_ -#define _DL_VERSION_H_ - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" -#include - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A class that contains the different portions of a version number. - * A typedef to indicate a SNPE DlVersion handle - */ -typedef void* Snpe_DlVersion_Handle_t; - -/** - * Construct a DlVersion - * - * @return a handle to the created DlVersion - */ -SNPE_API -Snpe_DlVersion_Handle_t Snpe_DlVersion_Create(); - - -/** - * Destroys/frees DlVersion - * - * @param[in] handle : Handle to access DlVersion - * - * @return SNPE_SUCCESS if Delete operation successful. -*/ -SNPE_API -Snpe_ErrorCode_t Snpe_DlVersion_Delete(Snpe_DlVersion_Handle_t handle); - -/** - * Get the major version number. - * @param[in] handle : Handle to access DlVersion - * @return Major version - */ -SNPE_API -int32_t Snpe_DlVersion_GetMajor(Snpe_DlVersion_Handle_t handle); - -/** - * Get the minor version number. - * @param[in] handle : Handle to access DlVersion - * @return Minor version - */ -SNPE_API -int32_t Snpe_DlVersion_GetMinor(Snpe_DlVersion_Handle_t handle); - -/** - * Get the teeny version number. - * @param[in] handle : Handle to access DlVersion - * @return Teeny version - */ -SNPE_API -int32_t Snpe_DlVersion_GetTeeny(Snpe_DlVersion_Handle_t handle); - -/** - * Get the string holding information about the build version. - * - * @param[in] handle : Handle to access DlVersion - * @return Build information - */ -SNPE_API -const char* Snpe_DlVersion_GetBuild(Snpe_DlVersion_Handle_t handle); - -/** - * @brief Returns a string in the form Major.Minor.Teeny.Build - * - * @param[in] handle : Handle to access DlVersion - * @return A formatted char* holding the version information. - * - * @note the returned string will be invalidated by subsequent calls to this function - */ -SNPE_API -const char* Snpe_DlVersion_ToString(Snpe_DlVersion_Handle_t handle); - -/** - * @brief Create a DlVersion from a string - * - * @param stringValue The formatted DlVersion string - * - * @return A handle to the created DlVersion - */ -SNPE_API -Snpe_DlVersion_Handle_t Snpe_DlVersion_FromString(const char* stringValue); - - - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DL_VERSION_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlVersion.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlVersion.hpp deleted file mode 100644 index 7badab1f..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/DlVersion.hpp +++ /dev/null @@ -1,118 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include - -#include "Wrapper.hpp" -#include "String.hpp" - -#include "DlSystem/DlVersion.h" -#include "SNPE/SNPEUtil.h" - - -namespace DlSystem { - -class Version_t : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_DlVersion_Delete}; - - template - using MajorReference = WrapperDetail::GenericConstMemberReference; - - template - using MinorReference = WrapperDetail::GenericConstMemberReference; - - template - using TeenyReference = WrapperDetail::GenericConstMemberReference; - - - static std::string BuildGetter(Snpe_DlVersion_Handle_t handle){ - return Snpe_DlVersion_GetBuild(handle); - } - - template - using BuildReference = WrapperDetail::GenericConstMemberReference; - - - static const std::string& toString(int32_t Major, int32_t Minor, int32_t Teeny, const std::string& Build){ - thread_local std::string toret; - - toret = std::to_string(Major); - toret += '.'; - toret += std::to_string(Minor); - toret += '.'; - toret += std::to_string(Teeny); - if(!Build.empty()){ - toret += '.'; - toret += Build; - } - - return toret; - } - -public: - Version_t() - : BaseType(Snpe_DlVersion_Create()) - { } - - Version_t(int32_t Major, int32_t Minor, int32_t Teeny, const std::string& Build) - : BaseType(Snpe_DlVersion_FromString(toString(Major, Minor, Teeny, Build).c_str())) - { } - - - /// Holds the major version number. Changes in this value indicate - /// major changes that break backward compatibility. - MajorReference Major{*this}; - - /// Holds the minor version number. Changes in this value indicate - /// minor changes made to library that are backwards compatible - /// (such as additions to the interface). - MinorReference Minor{*this}; - - /// Holds the teeny version number. Changes in this value indicate - /// changes such as bug fixes and patches made to the library that - /// do not affect the interface. - TeenyReference Teeny{*this}; - - /// This string holds information about the build version. - BuildReference Build{*this}; - - - static Version_t fromString(const std::string& stringValue){ - return moveHandle(Snpe_DlVersion_FromString(stringValue.c_str())); - } - - /** - * @brief Returns a string in the form Major.Minor.Teeny.Build - * - * @return A formatted string holding the version information. - */ - std::string toString() const{ - return Snpe_DlVersion_ToString(handle()); - } - - /** - * @brief Returns a string in the form Major.Minor.Teeny.Build - * - * @return A formatted string holding the version information. - */ - String asString() const{ - return String(toString()); - } -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, Version_t) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.h deleted file mode 100644 index 96453ef9..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.h +++ /dev/null @@ -1,117 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _IBUFFER_ATTRIBUTES_H -#define _IBUFFER_ATTRIBUTES_H - -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE IBufferAttributes handle - */ -typedef void* Snpe_IBufferAttributes_Handle_t; - - -/** - * @brief Gets the buffer's element size, in bytes - * - * This can be used to compute the memory size required - * to back this buffer. - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Element size, in bytes - */ -SNPE_API -size_t Snpe_IBufferAttributes_GetElementSize(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the element's encoding type - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return encoding type - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_IBufferAttributes_GetEncodingType(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the number of elements in each dimension - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Dimension size, in terms of number of elements - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_IBufferAttributes_GetDims(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the alignment requirement of each dimension - * - * Alignment per each dimension is expressed as an multiple, for - * example, if one particular dimension can accept multiples of 8, - * the alignment will be 8. - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Alignment in each dimension, in terms of multiple of - * number of elements - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_IBufferAttributes_GetAlignments(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the buffer encoding returned from the network responsible - * for generating this buffer. Depending on the encoding type, this will - * be an instance of an encoding type specific derived class. - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Derived user buffer encoding object. - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_IBufferAttributes_GetEncoding_Ref(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Destroys the IBufferAttributes object - * - * @param[handle] handle : Handle to access IBufferAttributes - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IBufferAttributes_Delete(Snpe_IBufferAttributes_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _IBUFFER_ATTRIBUTES_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.hpp deleted file mode 100644 index 2a86fcec..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.hpp +++ /dev/null @@ -1,85 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include -#include "TensorShape.hpp" - -#include "DlSystem/IBufferAttributes.h" -#include "IUserBuffer.hpp" - -namespace DlSystem { - - -class IBufferAttributes : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_IBufferAttributes_Delete}; -public: - - size_t getElementSize() const noexcept{ - return Snpe_IBufferAttributes_GetElementSize(handle()); - } - - UserBufferEncoding::ElementType_t getEncodingType() const noexcept{ - return static_cast(Snpe_IBufferAttributes_GetEncodingType(handle())); - } - - TensorShape getDims() const{ - return moveHandle(Snpe_IBufferAttributes_GetDims(handle())); - } - - TensorShape getAlignments() const{ - return moveHandle(Snpe_IBufferAttributes_GetAlignments(handle())); - } - - UserBufferEncoding* getEncoding() const{ - auto h = Snpe_IBufferAttributes_GetEncoding_Ref(handle()); - switch(Snpe_UserBufferEncoding_GetElementType(h)){ - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32: - return makeReference(h); - - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8: - return makeReference(h); - - default: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN: - return makeReference(h); - } - } - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IBufferAttributes) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.h deleted file mode 100644 index a3c3c623..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.h +++ /dev/null @@ -1,156 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_IOBUFFER_DATATYPE_MAP_H -#define DL_SYSTEM_IOBUFFER_DATATYPE_MAP_H - -#include - -#include "DlSystem/DlError.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE IOBufferDataTypeMap handle - */ -typedef void* Snpe_IOBufferDataTypeMap_Handle_t; - -/** - * @brief . - * - * Creates a new Buffer Data type map - * - */ -SNPE_API -Snpe_IOBufferDataTypeMap_Handle_t Snpe_IOBufferDataTypeMap_Create(); - -/** - * @brief Destroys the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IOBufferDataTypeMap_Delete(Snpe_IOBufferDataTypeMap_Handle_t handle); -/** - * @brief Adds a name and the corresponding buffer data type - * to the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - * @param[in] bufferDataType : data type of the buffer - * - * @note If a buffer with the same name already exists, no new - * buffer is added. - */ -SNPE_API -Snpe_ErrorCode_t -Snpe_IOBufferDataTypeMap_Add(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name, Snpe_IOBufferDataType_t bufferDataType); - -/** - * @brief Removes a buffer name from the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IOBufferDataTypeMap_Remove(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name); - -/** - * @brief Returns the type of the named buffer - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - * @return The type of the buffer, or UNSPECIFIED if the buffer does not exist - * - */ -SNPE_API -Snpe_IOBufferDataType_t Snpe_IOBufferDataTypeMap_GetBufferDataType(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name); - -/** - * @brief Returns the type of the first buffer - * - * @param handle : Handle to access the IOBufferDataType map - * - * @return The type of the first buffer, or SNPE_IO_BUFFER_DATATYPE_UNSPECIFIED if the map is empty. - */ -SNPE_API -Snpe_IOBufferDataType_t Snpe_IOBufferDataTypeMap_GetBufferDataTypeOfFirst(Snpe_IOBufferDataTypeMap_Handle_t handle); - -/** - * @brief Returns the size of the buffer type map. - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @return The size of the map - * - */ -SNPE_API -size_t Snpe_IOBufferDataTypeMap_Size(Snpe_IOBufferDataTypeMap_Handle_t handle); - -/** - * @brief Checks the existence of the named buffer in the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - * @return 1 if the named buffer exists, 0 otherwise. - * - */ -SNPE_API -int Snpe_IOBufferDataTypeMap_Find(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name); - -/** - * @brief Resets the map - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IOBufferDataTypeMap_Clear(Snpe_IOBufferDataTypeMap_Handle_t handle); - -/** - * @brief Checks whether the map is empty - * - * @return 1 if the map is empty, 0 otherwise. - * - */ -SNPE_API -int Snpe_IOBufferDataTypeMap_Empty(Snpe_IOBufferDataTypeMap_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_IOBUFFER_DATATYPE_MAP_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.hpp deleted file mode 100644 index c39d3320..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.hpp +++ /dev/null @@ -1,69 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include - -#include "DlEnums.hpp" - - -#include "DlSystem/IOBufferDataTypeMap.h" - -namespace DlSystem { - -class IOBufferDataTypeMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_IOBufferDataTypeMap_Delete}; - -public: - - IOBufferDataTypeMap() - : BaseType(Snpe_IOBufferDataTypeMap_Create()) - { } - - void add(const char* name, IOBufferDataType_t bufferDataType){ - Snpe_IOBufferDataTypeMap_Add(handle(), name, static_cast(bufferDataType)); - } - - void remove(const char* name){ - Snpe_IOBufferDataTypeMap_Remove(handle(), name); - } - - IOBufferDataType_t getBufferDataType(const char* name){ - return static_cast(Snpe_IOBufferDataTypeMap_GetBufferDataType(handle(), name)); - } - - IOBufferDataType_t getBufferDataType(){ - return static_cast(Snpe_IOBufferDataTypeMap_GetBufferDataTypeOfFirst(handle())); - } - - size_t size() const{ - return Snpe_IOBufferDataTypeMap_Size(handle()); - } - - bool find(const char* name) const{ - return Snpe_IOBufferDataTypeMap_Find(handle(), name); - } - - void clear(){ - Snpe_IOBufferDataTypeMap_Clear(handle()); - } - - bool empty() const{ - return Snpe_IOBufferDataTypeMap_Empty(handle()); - } -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IOBufferDataTypeMap) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensor.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensor.h deleted file mode 100644 index 913f3bdc..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensor.h +++ /dev/null @@ -1,118 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _DL_SYSTEM_ITENSOR_H_ -#define _DL_SYSTEM_ITENSOR_H_ - -#include - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlError.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * Represents a tensor which holds n-dimensional data. It is important to - * understand how the tensor data is represented in memory - * relative to the tensor dimensions. Tensors store data in - * memory in row-major order (i.e. the last tensor dimension is - * the fastest varying one). For example, if you have a two - * dimensional tensor with 3 rows and 2 columns (i.e. the tensor - * dimensions are 3,2 as returned in tensor dimension vectors) - * with the following data in terms rows and columns: - * - * | 1 2 |
- * | 3 4 |
- * | 5 6 |
- * - * This data would be stored in memory as 1,2,3,4,5,6. - */ -typedef void* Snpe_ITensor_Handle_t; - - -/** - * Destroys/frees an ITensor - * - * @param[in] userBufferHandle : Handle to access the IUserBuffer - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_ITensor_Delete(Snpe_ITensor_Handle_t iTensorHandle); - -/** - * Returns a tensor iterator pointing to the beginning - * of the data in the tensor. - * - * @param[in] tensorHandle : Handle to access ITensor - * - * @return The tensor data as a void pointer. - */ -SNPE_API -void* Snpe_ITensor_GetData(Snpe_ITensor_Handle_t tensorHandle); - -/** - * @brief Gets the shape of this tensor. - * - * The last element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying dimension, etc. - * - * @param[in] tensorHandle : Handle to access ITensor - * - * @return A TensorShape handle holding the tensor dimensions. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_ITensor_GetShape(Snpe_ITensor_Handle_t tensorHandle); - -/** - * Returns the element size of the data in the tensor - * (discounting strides). This is how big a buffer would - * need to be to hold the tensor data contiguously in - * memory. - * - * @param[in] tensorHandle : Handle to access ITensor - * - * @return The size of the tensor (in elements). - */ -SNPE_API -size_t Snpe_ITensor_GetSize(Snpe_ITensor_Handle_t tensorHandle); - -SNPE_API -int Snpe_ITensor_IsQuantized(Snpe_ITensor_Handle_t tensorHandle); - -SNPE_API -float Snpe_ITensor_GetDelta(Snpe_ITensor_Handle_t tensorHandle); - -SNPE_API -float Snpe_ITensor_GetOffset(Snpe_ITensor_Handle_t tensorHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DL_SYSTEM_ITENSOR_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensor.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensor.hpp deleted file mode 100644 index 4785a39d..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensor.hpp +++ /dev/null @@ -1,95 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "TensorShape.hpp" -#include "ITensorItr.hpp" - -#include "DlSystem/ITensor.h" - - -namespace DlSystem { - - -class ITensor : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_ITensor_Delete}; - - template - T* getData(){ - return static_cast(Snpe_ITensor_GetData(handle())); - } - - template - const T* getData() const{ - return static_cast(Snpe_ITensor_GetData(handle())); - } - -public: - using iterator = DlSystem::ITensorItr; - using const_iterator = DlSystem::ITensorItr; - - - iterator begin(){ - return iterator(getData()); - } - - const_iterator begin() const{ - return const_iterator(getData()); - } - - const_iterator cbegin() const{ - return begin(); - } - - iterator end(){ - return begin() + getSize(); - } - - const_iterator end() const{ - return cbegin() + getSize(); - } - - const_iterator cend() const{ - return end(); - } - - TensorShape getShape() const{ - return moveHandle(Snpe_ITensor_GetShape(handle())); - } - - size_t getSize() const{ - return Snpe_ITensor_GetSize(handle()); - } - - // Serialize to std::ostream is no longer supported - void serialize(std::ostream &output) const = delete; - - bool isQuantized() const{ - return Snpe_ITensor_IsQuantized(handle()); - } - - float GetDelta() const{ - return Snpe_ITensor_GetDelta(handle()); - } - - float GetOffset() const{ - return Snpe_ITensor_GetOffset(handle()); - } -}; - - -} //ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ITensor) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensorFactory.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensorFactory.hpp deleted file mode 100644 index 5ef1e9d3..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensorFactory.hpp +++ /dev/null @@ -1,52 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "ITensor.hpp" - -#include - - -#include "SNPE/SNPEUtil.h" - -namespace DlSystem{ -// NOTE: These factories use a different handle type because they are singletons -// Never copy this pattern unless you're also implementing a singleton -class ITensorFactory : public Wrapper{ - friend BaseType; - - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - -public: - ITensorFactory() - : BaseType(nullptr) - { } - - - std::unique_ptr createTensor(const TensorShape &shape) noexcept{ - return makeUnique(Snpe_Util_CreateITensor(getHandle(shape))); - } - - // Create from std::istream is no longer supported - std::unique_ptr createTensor(std::istream &input) noexcept = delete; - - std::unique_ptr createTensor(const TensorShape &shape, - const unsigned char *data, - size_t dataSize) noexcept{ - auto handle = Snpe_Util_CreateITensorDataSize(getHandle(shape), data, dataSize); - return makeUnique(handle); - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ITensorFactory) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensorItr.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensorItr.hpp deleted file mode 100644 index 801aa217..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensorItr.hpp +++ /dev/null @@ -1,199 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include - -#include "Wrapper.hpp" -#include "ITensorItrImpl.hpp" - -namespace DlSystem{ - -template -class ITensorItr{ -public: - using iterator_category = std::bidirectional_iterator_tag; - using pointer = typename std::conditional::type; - using value_type = float; - using difference_type = std::ptrdiff_t; - using reference = typename std::conditional::type; - - - ITensorItr() = delete; - virtual ~ITensorItr() = default; - - explicit ITensorItr(pointer data) noexcept - : m_Impl{nullptr}, - m_IsTrivial{true}, - m_Data{data}, - m_DataStart{data} - { } - - ITensorItr(std::unique_ptr impl, - bool isTrivial = false, - float* data = nullptr) - : m_Impl(impl->clone()), - m_IsTrivial(isTrivial), - m_Data(data), - m_DataStart(data) - { } - - ITensorItr(const ITensorItr& itr) - : m_Impl(itr.m_Impl ? itr.m_Impl->clone() : nullptr), - m_IsTrivial(itr.m_IsTrivial), - m_Data(itr.m_Data), - m_DataStart(itr.m_DataStart) - { } - - ITensorItr(ITensorItr&& itr) noexcept - : m_Impl(std::move(itr.m_Impl)), - m_IsTrivial(itr.m_IsTrivial), - m_Data(itr.m_Data), - m_DataStart(itr.m_DataStart) - { } - - ITensorItr& operator=(const ITensorItr& other){ - if (this == &other) return *this; - - m_Impl = other.m_Impl ? other.m_Impl->clone() : nullptr; - m_IsTrivial = other.m_IsTrivial; - m_Data = other.m_Data; - m_DataStart = other.m_DataStart; - return *this; - } - ITensorItr& operator=(ITensorItr&& other) noexcept{ - if(this != &other){ - m_Impl = std::move(other.m_Impl); - m_IsTrivial = other.m_IsTrivial; - m_Data = other.m_Data; - m_DataStart = other.m_DataStart; - } - return *this; - } - - inline ITensorItr& operator++(){ - if (m_IsTrivial){ - m_Data++; - } else { - m_Impl->increment(); - } - return *this; - } - inline ITensorItr operator++(int){ - ITensorItr tmp(*this); - operator++(); - return tmp; - } - inline ITensorItr& operator--(){ - if (m_IsTrivial){ - m_Data--; - } else { - m_Impl->decrement(); - } - return *this; - } - inline ITensorItr operator--(int){ - ITensorItr tmp(*this); - operator--(); - return tmp; - } - inline ITensorItr& operator+=(int rhs){ - if (m_IsTrivial){ - m_Data += rhs; - } else { - m_Impl->increment(rhs); - } - return *this; - } - inline friend ITensorItr operator+(ITensorItr lhs, int rhs){ - lhs += rhs; - return lhs; - } - inline ITensorItr& operator-=(int rhs){ - if (m_IsTrivial){ - m_Data -= rhs; - } else { - m_Impl->decrement(rhs); - } - return *this; - } - inline friend ITensorItr operator-(ITensorItr lhs, int rhs){ - lhs -= rhs; - return lhs; - } - - inline size_t operator-(const ITensorItr& rhs){ - if (m_IsTrivial) return (m_Data - m_DataStart) - (rhs.m_Data - rhs.m_DataStart); - return m_Impl->getPosition() - rhs.m_Impl->getPosition(); - } - - inline friend bool operator<(const ITensorItr& lhs, const ITensorItr& rhs){ - if (lhs.m_IsTrivial) return lhs.m_Data < rhs.m_Data; - return lhs.m_Impl->dataPointer() < rhs.m_Impl->dataPointer(); - } - inline friend bool operator>(const ITensorItr& lhs, const ITensorItr& rhs){ - return rhs < lhs; - } - inline friend bool operator<=(const ITensorItr& lhs, const ITensorItr& rhs){ - return !(lhs > rhs); - } - inline friend bool operator>=(const ITensorItr& lhs, const ITensorItr& rhs){ - return !(lhs < rhs); - } - - inline bool operator==(const ITensorItr& rhs) const{ - if (m_IsTrivial) return m_Data == rhs.m_Data; - return m_Impl->dataPointer() == rhs.m_Impl->dataPointer(); - } - inline bool operator!=(const ITensorItr& rhs) const{ - return !operator==(rhs); - } - - inline reference operator[](size_t idx){ - if (m_IsTrivial) return *(m_DataStart + idx); - return m_Impl->getReferenceAt(idx); - } - inline reference operator*(){ - if (m_IsTrivial) return *m_Data; - return m_Impl->getReference(); - } - inline reference operator->(){ - return *(*this); - } - inline float* dataPointer() const{ - if (m_IsTrivial) return m_Data; - return m_Impl->dataPointer(); - } - - -protected: - std::unique_ptr<::DlSystem::ITensorItrImpl> m_Impl; - bool m_IsTrivial = false; - pointer m_Data = nullptr; - pointer m_DataStart = nullptr; -}; - - -inline void fill(ITensorItr first, ITensorItr end, float val){ - std::fill(first, end, val); -} -template -OutItr copy(InItr first, InItr last, OutItr result){ - return std::copy(first, last, result); -} - -} // ns DlSystem - - -// ALIAS_IN_ZDL_NAMESPACE -namespace zdl{ namespace DlSystem{ - template - using ITensorItr = ::DlSystem::ITensorItr; -}} diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensorItrImpl.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensorItrImpl.hpp deleted file mode 100644 index 6b9a497b..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/ITensorItrImpl.hpp +++ /dev/null @@ -1,32 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once -#include "Wrapper.hpp" - -namespace DlSystem { - -class ITensorItrImpl { -public: - ITensorItrImpl() = default; - virtual ~ITensorItrImpl() = default; - - virtual float getValue() const = 0; - virtual float& getReference() = 0; - virtual float& getReferenceAt(size_t idx) = 0; - virtual float* dataPointer() const = 0; - virtual void increment(int incVal = 1) = 0; - virtual void decrement(int decVal = 1) = 0; - virtual size_t getPosition() = 0; - virtual std::unique_ptr clone() = 0; - -private: - ITensorItrImpl& operator=(const ITensorItrImpl& other) = delete; - ITensorItrImpl(const ITensorItrImpl& other) = delete; -}; - -} // ns DlSystem diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.h deleted file mode 100644 index fc4cc316..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.h +++ /dev/null @@ -1,714 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _IUSER_BUFFER_H -#define _IUSER_BUFFER_H - -#include -#include - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlError.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE UserByfferEncoding handle - */ -typedef void* Snpe_UserBufferEncoding_Handle_t; - -/** - * @brief . - * - * An enum class of all supported element types in a IUserBuffer - */ -//enum class Snpe_UserBufferEncoding_ElementType_t -typedef enum -{ - /// Unknown element type. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN = 0, - - /// Each element is presented by float. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT = 1, - - /// Each element is presented by an unsigned int. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT = 2, - - /// Each element is presented by float16. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16 = 3, - - /// Each element is presented by an 8-bit quantized value. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8 = 10, - - /// Each element is presented by an 16-bit quantized value. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16 = 11, - - /// Each element is presented by Int32 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32 = 12, - - /// Each element is presented by UInt32 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32 = 13, - - /// Each element is presented by Int8 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8 = 14, - - /// Each element is presented by UInt8 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8 = 15, - - /// Each element is presented by Int16 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT16 = 16, - - /// Each element is presented by UInt16 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT16 = 17, - - /// Each element is present by Bool8 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8 = 18, - - /// Each element is present by Int64 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT64 = 19, - - /// Each element is present by UInt64 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT64 = 20 - -}Snpe_UserBufferEncoding_ElementType_t; - - -/** - * @brief Retrieves the element type - * - * @param[in] userBufferEncodingHandle : Handle to access userBufferEncoding - * - * @return Element type - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncoding_GetElementType(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access userBufferEncoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncoding_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys/frees a UserBufferEncoding - * - * @param[in] userBufferEncodingHandle : Handle to access UserBufferEncoding - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncoding_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - - -/** - * @brief . - * - * A base class buffer source type - * - * @note User buffer from CPU support all kinds of runtimes; - * User buffer from GLBUFFER support only GPU runtime. - */ -typedef void* Snpe_UserBufferSource_Handle_t; - -typedef enum -{ - /// Unknown buffer source type. - SNPE_USERBUFFERSOURCE_SOURCETYPE_UNKNOWN = 0, - - /// The network inputs are from CPU buffer. - SNPE_USERBUFFERSOURCE_SOURCETYPE_CPU = 1, - - /// The network inputs are from OpenGL buffer. - SNPE_USERBUFFERSOURCE_SOURCETYPE_GLBUFFER = 2 -}Snpe_UserBufferSource_SourceType_t; - -/** - * @brief Retrieves the source type - * - * @param[in] userBufferSourceHandle : Handle to access userBufferSource - * - * @return Source type - */ -SNPE_API -Snpe_UserBufferSource_SourceType_t Snpe_UserBufferSource_GetSourceType(Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -/** - * @brief Destroys/frees a UserBufferSource - * - * @param[in] userBufferSourceHandle : Handle to access UserBufferSource - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferSource_Delete(Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -/** - * @brief . - * - * An source type where input data is delivered from OpenGL buffer - */ -SNPE_API -Snpe_UserBufferSource_Handle_t Snpe_UserBufferSourceGLBuffer_Create(); - -/** - * @brief Destroys the userBuffer - * - * @param[in] userBufferSourceHandle : Handle to access the UserBuffer - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferSourceGLBuffer_Delete(Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -// Encoding 8 Bit -/** - * @brief . - * - * An encoding type where each element is represented by an unsigned int. - * - * Userbuffer size assumes uint8 encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 1 = 6 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUnsigned8Bit_Create(); - -/** - * @brief Copy Constructor for UserBufferEncodingUnsigned8Bit - * - * An encoding type where each element is represented by an unsigned int. - * - * Userbuffer size assumes uint8 encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 1 = 6 bytes in memory). - * - * @param[in] otherHandle : a handle to another UserBufferEncodingUnsigned8Bit to copy - * - * @return a handle to the UserBufferEncodingUnsigned8Bit - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUnsigned8Bit_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - -/** - * @brief Destroys the encodingUnsigned8Bit - * - * @param[in] userBufferEncodingHandle : Handle to access the encodingUnsigned8Bit - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingUnsigned8Bit_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingUnsigned8Bit_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - - -// Encoding Float -/** - * @brief . - * - * An encoding type where each element is represented by a float. - * - * Userbuffer size assumes float encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 4 = 24 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloat_Create(); - -/** - * @brief Copy Constructor for UserBufferEncodingFloat - * - * An encoding type where each element is represented by a float. - * - * Userbuffer size assumes float encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 4 = 24 bytes in memory). - * - * @param[in] otherHandle : a handle to another UserBufferEncodingFloat to copy - * - * @return a handle to the constructed UserBufferEncodingFloat - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloat_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - -/** - * @brief Destroys the encodingFloat - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingFloat_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingFloat_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -// Encoding FloatN -/** - * @brief . - * - * An encoding type where each element is represented by a float N - * - * Userbuffer size assumes float N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloatN_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingFloatN - * - * An encoding type where each element is represented by a float N - * - * Userbuffer size assumes float N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - * - * @param[in] otherHandle : a handle to another UserBufferEncodingFloatN to copy - * - * @return a handle to the constructed UserBufferEncodingFloatN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloatN_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - - -/** - * @brief Destroys the encodingFloatN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingFloatN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingFloatN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - - -/** - * @brief Get the Float type corresponding to a given bitwidth - * - * @param width bitwidth of Float type - * - * @return ElementType corresponding to a Float of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingFloatN_GetTypeFromWidth(uint8_t width); - -/** - * @brief . - * - * An encoding type where each element is represented by tfN, which is an - * N-bit quantized value, which has an exact representation of 0.0 - * - * Userbuffer size assumes tf N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingTfN_Create(uint64_t stepFor0, float stepSize, uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingTfN - * - * An encoding type where each element is represented by tfN, which is an - * N-bit quantized value, which has an exact representation of 0.0 - * - * Userbuffer size assumes tf N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - * @param otherHandle the UserBufferEncodingTfN to copy - * @return a handle to a newly constructed UserBufferEncodingTfN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingTfN_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - -/** - * @brief Destroys the encodingTfN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingTfN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingTfN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Sets the step value that represents 0 - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @param[in] stepExactly0 : The step value that represents 0 - * - */ -SNPE_API -void Snpe_UserBufferEncodingTfN_SetStepExactly0(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle, uint64_t stepExactly0); - -/** - * @brief Sets the float value that each step represents - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @param[in] quantizedStepSize : The float value of each step size - * - */ -SNPE_API -void Snpe_UserBufferEncodingTfN_SetQuantizedStepSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle, float quantizedStepSize); - -/** - * @brief Retrieves the step that represents 0.0 - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Step value - */ -SNPE_API -uint64_t Snpe_UserBufferEncodingTfN_GetStepExactly0(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the step size - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Step size - */ -SNPE_API -float Snpe_UserBufferEncodingTfN_GetQuantizedStepSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * Calculates the minimum floating point value that - * can be represented with this encoding. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Minimum representable floating point value - */ -SNPE_API -float Snpe_UserBufferEncodingTfN_GetMin(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * Calculates the maximum floating point value that - * can be represented with this encoding. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Maximum representable floating point value - */ -SNPE_API -float Snpe_UserBufferEncodingTfN_GetMax(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Get the tfN type corresponding to a given bitwidth - * - * @param width bitwidth of tfN type - * - * @return ElementType corresponding to a tfN of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingTfN_GetTypeFromWidth(uint8_t width); - -// Encoding Int N -/** - * @brief . - * - * An encoding type where each element is represented by a Int - * - * Userbuffer size assumes int N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingIntN_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingIntN - * - * An encoding type where each element is represented by a Int - * - * Userbuffer size assumes int N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - * @param otherHandle the UserBufferEncodingIntN to copy - * @return a handle to a newly constructed UserBufferEncodingIntN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingIntN_CreateCopy(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys the encodingIntN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingIntN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingIntN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Get the int type corresponding to a given bitwidth - * - * @param width bitwidth of int type - * - * @return ElementType corresponding to a int of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingIntN_GetTypeFromWidth(uint8_t bWidth); - -// Encoding Uint N -/** - * @brief . - * - * An encoding type where each element is represented by a Uint - * - * Userbuffer size assumes uint N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUintN_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingUintN - * - * An encoding type where each element is represented by a Uint - * - * Userbuffer size assumes uint N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - * @param otherHandle the UserBufferEncodingUintN to copy - * @return a handle to a newly constructed UserBufferEncodingUintN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUintN_CreateCopy(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys the encodingUintN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingUintN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingUintN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Get the uint type corresponding to a given bitwidth - * - * @param width bitwidth of uint type - * - * @return ElementType corresponding to a uint of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingUintN_GetTypeFromWidth(uint8_t bWidth); - - -// Encoding Bool -/** - * @brief . - * - * An encoding type where each element is represented by a Bool - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingBool_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingBool - * - * An encoding type where each element is represented by a bool - * - * @param otherHandle the UserBufferEncodingBool to copy - * @return a handle to a newly constructed UserBufferEncodingBool - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingBool_CreateCopy(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys the encodingBool - * - * @param[in] userBufferHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingBool_Delete(Snpe_UserBufferEncoding_Handle_t userBufferHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingBool_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferHandle); - - - -/** - * A typedef to indicate a SNPE IUserBuffer handle - * UserBuffer contains a pointer and info on how to walk it and interpret its content. - */ -typedef void* Snpe_IUserBuffer_Handle_t; - -/** - * Destroys/frees an IUserBuffer - * - * @param[in] userBufferHandle : Handle to access the IUserBuffer - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IUserBuffer_Delete(Snpe_IUserBuffer_Handle_t userBufferHandle); - - -/** - * @brief Retrieves the total number of bytes between elements in each dimension if - * the buffer were to be interpreted as a multi-dimensional array. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @warning Do not modify the TensorShape returned by reference. Treat it as a const reference. - * - * @return A const reference to the number of bytes between elements in each dimension. - * e.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would - * return strides of [24, 8, 4]. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_IUserBuffer_GetStrides_Ref(Snpe_IUserBuffer_Handle_t userBufferHandle); - -/** - * @brief Retrieves the size of the buffer, in bytes. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @return Size of the underlying buffer, in bytes. - */ -SNPE_API -size_t Snpe_IUserBuffer_GetSize(Snpe_IUserBuffer_Handle_t userBufferHandle); - -/** - * @brief Retrieves the size of the inference data in the buffer, in bytes. - * - * The inference results from a dynamic-sized model may not be exactly the same size - * as the UserBuffer provided to SNPE. This function can be used to get the amount - * of output inference data, which may be less or greater than the size of the UserBuffer. - * - * If the inference results fit in the UserBuffer, getOutputSize() would be less than - * or equal to getSize(). But if the inference results were more than the capacity of - * the provided UserBuffer, the results would be truncated to fit the UserBuffer. But, - * getOutputSize() would be greater than getSize(), which indicates a bigger buffer - * needs to be provided to SNPE to hold all of the inference results. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @return Size required for the buffer to hold all inference results, which can be less - * or more than the size of the buffer, in bytes. - */ -SNPE_API -size_t Snpe_IUserBuffer_GetOutputSize(Snpe_IUserBuffer_Handle_t userBufferHandle); - -/** - * @brief Changes the underlying memory that backs the UserBuffer. - * - * This can be used to avoid creating multiple UserBuffer objects - * when the only thing that differs is the memory location. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @param[in] buffer : Pointer to the memory location - * - * @return Whether the set succeeds. - */ -SNPE_API -int Snpe_IUserBuffer_SetBufferAddress(Snpe_IUserBuffer_Handle_t userBufferHandle, void* buffer); - -/** - * @brief Gets a reference to the data encoding object of - * the underlying buffer - * - * This is necessary when the UserBuffer is re-used, and the encoding - * parameters can change. For example, each input can be quantized with - * different step sizes. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @return Data encoding meta-data - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_IUserBuffer_GetEncoding_Ref(Snpe_IUserBuffer_Handle_t userBufferHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _IUSER_BUFFER_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.hpp deleted file mode 100644 index 727c195b..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.hpp +++ /dev/null @@ -1,390 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include -#include "TensorShape.hpp" - -#include "DlSystem/IUserBuffer.h" - - -namespace DlSystem { - - -class UserBufferEncoding: public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferEncoding_Delete}; -protected: - UserBufferEncoding(HandleType handle) - : BaseType(handle) - { } -public: - - virtual ~UserBufferEncoding() = default; - - UserBufferEncoding(UserBufferEncoding&& other) noexcept - : BaseType(std::move(other)) - { } - - enum class ElementType_t - { - /// Unknown element type. - UNKNOWN = 0, - - /// Each element is presented by 32-bit float. - FLOAT = 1, - - /// Each element is presented by an unsigned int. - UNSIGNED8BIT = 2, - - /// Each element is presented by 16-bit float. - FLOAT16 = 3, - - /// Each element is presented by an 8-bit quantized value. - TF8 = 10, - - /// Each element is presented by an 16-bit quantized value. - TF16 = 11, - - /// Each element is presented by Int32 - INT32 = 12, - - /// Each element is presented by UInt32 - UINT32 = 13, - - /// Each element is presented by Int8 - INT8 = 14, - - /// Each element is presented by UInt8 - UINT8 = 15, - - /// Each element is presented by Int16 - INT16 = 16, - - /// Each element is presented by UInt16 - UINT16 = 17, - - // Each element is presented by Bool8 - BOOL8 = 18, - - // Each element is presented by Int64 - INT64 = 19, - - // Each element is presented by UInt64 - UINT64 = 20 - }; - - ElementType_t getElementType() const noexcept{ - return static_cast(Snpe_UserBufferEncoding_GetElementType(handle())); - } - - size_t getElementSize() const noexcept{ - return Snpe_UserBufferEncoding_GetElementSize(handle()); - } -}; - - -class UserBufferSource: public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferSource_Delete}; - -public: - enum class SourceType_t - { - /// Unknown buffer source type. - UNKNOWN = 0, - - /// The network inputs are from CPU buffer. - CPU = 1, - - /// The network inputs are from OpenGL buffer. - GLBUFFER = 2 - }; -protected: - UserBufferSource(HandleType handle) - : BaseType(handle) - { } -public: - SourceType_t getSourceType() const noexcept{ - return static_cast(Snpe_UserBufferSource_GetSourceType(handle())); - } - -}; - -class UserBufferSourceGLBuffer : public UserBufferSource{ -public: - UserBufferSourceGLBuffer() - : UserBufferSource(Snpe_UserBufferSourceGLBuffer_Create()) - { } -}; - -class UserBufferEncodingUnsigned8Bit : public UserBufferEncoding{ -public: - using UserBufferEncoding::UserBufferEncoding; - UserBufferEncodingUnsigned8Bit() - : UserBufferEncoding(Snpe_UserBufferEncodingUnsigned8Bit_Create()) - { } -}; - -class UserBufferEncodingFloatN : public UserBufferEncoding{ -public: - using UserBufferEncoding::UserBufferEncoding; - - UserBufferEncodingFloatN(uint8_t bWidth=32) - : UserBufferEncoding(Snpe_UserBufferEncodingFloatN_Create(bWidth)) - { } - - UserBufferEncodingFloatN(const UserBufferEncodingFloatN& other) - : UserBufferEncoding(Snpe_UserBufferEncodingFloatN_CreateCopy(other.handle())) - { } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingFloatN_GetTypeFromWidth(width)); - } -}; - -class UserBufferEncodingFloat : public UserBufferEncoding{ -public: - using UserBufferEncoding::UserBufferEncoding; - UserBufferEncodingFloat() - : UserBufferEncoding(Snpe_UserBufferEncodingFloat_Create()) - { } - UserBufferEncodingFloat(const UserBufferEncodingFloat& other) - : UserBufferEncoding(Snpe_UserBufferEncodingFloat_CreateCopy(other.handle())) - { } - - UserBufferEncodingFloat(UserBufferEncodingFloat&& other) noexcept - : UserBufferEncoding(std::move(other)) - { } -}; - - -class UserBufferEncodingTfN : public UserBufferEncoding{ -public: - - using UserBufferEncoding::UserBufferEncoding; - template::value && std::is_floating_point::value, int>::type = 0> - UserBufferEncodingTfN(T stepFor0, U stepSize, uint8_t bWidth=8) - : UserBufferEncoding(Snpe_UserBufferEncodingTfN_Create(stepFor0, stepSize, bWidth)) - { } - - UserBufferEncodingTfN(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingTfN_CreateCopy(getHandle(ubEncoding))) - { } - UserBufferEncodingTfN(const UserBufferEncodingTfN& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingTfN_CreateCopy(getHandle(ubEncoding))) - { } - - void setStepExactly0(uint64_t stepExactly0){ - Snpe_UserBufferEncodingTfN_SetStepExactly0(handle(), stepExactly0); - } - - void setQuantizedStepSize(const float quantizedStepSize){ - Snpe_UserBufferEncodingTfN_SetQuantizedStepSize(handle(), quantizedStepSize); - } - - uint64_t getStepExactly0() const{ - return Snpe_UserBufferEncodingTfN_GetStepExactly0(handle()); - } - - float getMin() const{ - return Snpe_UserBufferEncodingTfN_GetMin(handle()); - } - float getMax() const{ - return Snpe_UserBufferEncodingTfN_GetMax(handle()); - } - - float getQuantizedStepSize() const{ - return Snpe_UserBufferEncodingTfN_GetQuantizedStepSize(handle()); - } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingTfN_GetTypeFromWidth(width)); - } -}; - -class UserBufferEncodingIntN : public UserBufferEncoding{ -public: - - UserBufferEncodingIntN(uint8_t bWidth=32) - : UserBufferEncoding(Snpe_UserBufferEncodingIntN_Create(bWidth)) - { } - - UserBufferEncodingIntN(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingIntN_CreateCopy(getHandle(ubEncoding))) - { } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingIntN_GetTypeFromWidth(width)); - } -}; - - - -class UserBufferEncodingUintN : public UserBufferEncoding{ -public: - - UserBufferEncodingUintN(uint8_t bWidth=32) - : UserBufferEncoding(Snpe_UserBufferEncodingUintN_Create(bWidth)) - { } - - UserBufferEncodingUintN(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingUintN_CreateCopy(getHandle(ubEncoding))) - { } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingUintN_GetTypeFromWidth(width)); - } -}; - - -class UserBufferEncodingTf8 : public UserBufferEncodingTfN{ -public: - using UserBufferEncodingTfN::UserBufferEncodingTfN; - UserBufferEncodingTf8() = delete; - - template::value && std::is_floating_point::value, int>::type = 0> - UserBufferEncodingTf8(T stepFor0, U stepSize) - : UserBufferEncodingTfN(stepFor0, stepSize, 8) - { } - - UserBufferEncodingTf8(const UserBufferEncoding& ubEncoding) - : UserBufferEncodingTfN(ubEncoding) - { } - -}; - -class UserBufferEncodingBool : public UserBufferEncoding{ -public: - UserBufferEncodingBool(uint8_t bWidth=8) - : UserBufferEncoding(Snpe_UserBufferEncodingBool_Create(bWidth)) - { } - - UserBufferEncodingBool(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingBool_CreateCopy(getHandle(ubEncoding))) - { } -}; - -class IUserBuffer: public Wrapper { - friend BaseType; - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction{Snpe_IUserBuffer_Delete}; - -public: - const TensorShape& getStrides() const{ - return *makeReference(Snpe_IUserBuffer_GetStrides_Ref(handle())); - } - - size_t getSize() const{ - return Snpe_IUserBuffer_GetSize(handle()); - } - - size_t getOutputSize() const{ - return Snpe_IUserBuffer_GetOutputSize(handle()); - } - - bool setBufferAddress(void* buffer) noexcept{ - return Snpe_IUserBuffer_SetBufferAddress(handle(), buffer); - } - - const UserBufferEncoding& getEncoding() const noexcept{ - auto h = Snpe_IUserBuffer_GetEncoding_Ref(handle()); - switch(Snpe_UserBufferEncoding_GetElementType(h)){ - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32: - return *makeReference(h); - - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8: - return *makeReference(h); - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16: - return *makeReference(h); - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8: - return *makeReference(h); - - default: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN: - return *makeReference(h); - } - } - UserBufferEncoding& getEncoding() noexcept{ - auto h = Snpe_IUserBuffer_GetEncoding_Ref(handle()); - switch(Snpe_UserBufferEncoding_GetElementType(h)){ - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32: - return *makeReference(h); - - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8: - return *makeReference(h); - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8: - return *makeReference(h); - - default: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN: - return *makeReference(h); - } - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncoding) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferSource) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferSourceGLBuffer) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingUnsigned8Bit) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingFloatN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingFloat) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingTfN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingIntN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingUintN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingTf8) - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IUserBuffer) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IUserBufferFactory.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IUserBufferFactory.hpp deleted file mode 100644 index b3bbb087..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/IUserBufferFactory.hpp +++ /dev/null @@ -1,68 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - -#include "Wrapper.hpp" -#include "IUserBuffer.hpp" -#include "TensorShape.hpp" - - -#include "SNPE/SNPEUtil.h" - -namespace DlSystem{ - - -// NOTE: These factories use a different handle type because they are singletons -// Never copy this pattern unless you're also implementing a singleton -class IUserBufferFactory : public Wrapper{ - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - -public: - IUserBufferFactory() - : BaseType(nullptr) - { } - - std::unique_ptr createUserBuffer(void *buffer, - size_t bufSize, - const TensorShape &strides, - UserBufferEncoding* userBufferEncoding) noexcept{ - if(!userBufferEncoding) return {}; - auto handle = Snpe_Util_CreateUserBuffer(buffer, - bufSize, - getHandle(strides), - getHandle(userBufferEncoding)); - return makeUnique(handle); - } - - std::unique_ptr createUserBuffer(void *buffer, - size_t bufSize, - const TensorShape &strides, - UserBufferEncoding* userBufferEncoding, - UserBufferSource* userBufferSource) noexcept{ - if(!userBufferEncoding || !userBufferSource) return {}; - auto handle = Snpe_Util_CreateUserBufferFromSource(buffer, - bufSize, - getHandle(strides), - getHandle(*userBufferEncoding), - getHandle(*userBufferSource)); - return makeUnique(handle); - } - -}; - - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IUserBufferFactory) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.h deleted file mode 100644 index 15b2a089..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.h +++ /dev/null @@ -1,329 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_PLATFORMCONFIG_H -#define DL_SYSTEM_PLATFORMCONFIG_H - -#include "DlSystem/DlError.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * @brief . - * - * A structure OpenGL configuration - * - * @note When certain OpenGL context and display are provided to UserGLConfig for using - * GPU buffer as input directly, the user MUST ensure the particular OpenGL - * context and display remain vaild throughout the execution of neural network models. - */ -typedef void* Snpe_UserGLConfig_Handle_t; - -/** - * @brief . - * - * Creates a new userGLConfig - * - */ -SNPE_API -Snpe_UserGLConfig_Handle_t Snpe_UserGLConfig_Create(); - -/** - * @brief Destroys the userGLConfig - * - * @param[in] handle : Handle to access the userGLConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGLConfig_Delete(Snpe_UserGLConfig_Handle_t handle); - -/** - * @brief Sets the EGL context - * - * @param[in] handle : Handle to access userGLConfig - * - * @param[in] userGLContext : void pointer - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGLConfig_SetUserGLContext(Snpe_UserGLConfig_Handle_t handle, void* userGLContext); - -/** - * @brief Sets the EGL Display - * - * @param[in] handle : Handle to access userGLConfig - * - * @param[in] userGLDisplay : void pointer - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGLConfig_SetUserGLDisplay(Snpe_UserGLConfig_Handle_t handle, void* userGLDisplay); - - -/** - * @brief Get EGL context - * - * @param[in] handle : Handle to access userGLConfig - * - * @return userGLContext of type void pointer - * - */ -SNPE_API -void* Snpe_UserGLConfig_GetUserGLContext(Snpe_UserGLConfig_Handle_t handle); - -/** - * @brief Get EGL Display - * - * @param[in] handle : Handle to access userGLConfig - * - * @return userGLDisplay of type void pointer - * - */ -SNPE_API -void* Snpe_UserGLConfig_GetUserGLDisplay(Snpe_UserGLConfig_Handle_t handle); - - -/** - * @brief . - * - * A structure Gpu configuration - */ -typedef void* Snpe_UserGpuConfig_Handle_t; - -/** - * @brief . - * - * Creates a new userGpuConfig - * - */ -SNPE_API -Snpe_UserGpuConfig_Handle_t Snpe_UserGpuConfig_Create(); - -/** - * @brief Destroys the userGpuConfig - * - * @param[in] handle : Handle to access userGLConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGpuConfig_Delete(Snpe_UserGpuConfig_Handle_t handle); - -/** - * @brief Set the userGpuConfig - * - * @param[in] handle : Handle to access userGpuConfig - * - * @param[in] glHandle : Handle needed to access userGlConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -void Snpe_UserGpuConfig_Set(Snpe_UserGpuConfig_Handle_t handle, Snpe_UserGLConfig_Handle_t glHandle); - -/** - * @brief Get the userGpuConfig - * - * @param[in] handle : Handle to access userGpuConfig - * - * @return Handle needed to access userGlConfig - */ -SNPE_API -Snpe_UserGLConfig_Handle_t Snpe_UserGpuConfig_Get_Ref(Snpe_UserGpuConfig_Handle_t handle); - - - -/** - * A typedef to indicate a SNPE PlatformConfig handle - */ -typedef void* Snpe_PlatformConfig_Handle_t; - - -/** - * @brief . - * - * Creates a new PlatformConfig - * - */ -SNPE_API -Snpe_PlatformConfig_Handle_t Snpe_PlatformConfig_Create(); - - -/** - * @brief Copy-Construct a PlatformConfig from another PlatformConfig - * - * @param[in] otherHandle Handle to the other PlatformConfig - * - * @return Handle to the Copy-Constructed PlatformConfig - */ -SNPE_API -Snpe_PlatformConfig_Handle_t Snpe_PlatformConfig_CreateCopy(Snpe_PlatformConfig_Handle_t otherHandle); - -/** - * @brief Destroys the PlatformConfig - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PlatformConfig_Delete(Snpe_PlatformConfig_Handle_t handle); - - -typedef enum -{ - /// Unknown platform type. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_UNKNOWN = 0, - - /// Snapdragon CPU. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_CPU = 1, - - /// Adreno GPU. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_GPU = 2, - - /// Hexagon DSP. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_DSP = 3 -} Snpe_PlatformConfig_PlatformType_t; - - -/** - * @brief Retrieves the platform type - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return Platform type - */ -SNPE_API -Snpe_PlatformConfig_PlatformType_t Snpe_PlatformConfig_GetPlatformType(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Indicates whther the plaform configuration is valid. - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return 1 if the platform configuration is valid; 0 otherwise. - */ -SNPE_API -int Snpe_PlatformConfig_IsValid(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Retrieves the Gpu configuration - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return userGpuConfig populated with the Gpu configuration. - * - */ -SNPE_API -Snpe_UserGpuConfig_Handle_t Snpe_PlatformConfig_GetUserGpuConfig(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Sets the Gpu configuration - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @param[in] gpuHandle : Gpu Configuration handle - * - * @return 1 if Gpu configuration was successfully set; 0 otherwise. - */ -SNPE_API -int Snpe_PlatformConfig_SetUserGpuConfig(Snpe_PlatformConfig_Handle_t handle, Snpe_UserGpuConfig_Handle_t gpuHandle); - -/** - * @brief Sets the platform options - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @param[in] options : Options as a const char* in the form of "keyword:options" - * - * @return 1 if options are pass validation; otherwise 0. If false, the options are not updated. - */ -SNPE_API -int Snpe_PlatformConfig_SetPlatformOptions(Snpe_PlatformConfig_Handle_t handle, const char* options); - -/** - * @brief Indicates whther the plaform configuration is valid. - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return 1 if the platform configuration is valid; 0 otherwise. - */ -SNPE_API -int Snpe_PlatformConfig_IsOptionsValid(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Gets the platform options - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return Options as a const char* - */ -SNPE_API -const char* Snpe_PlatformConfig_GetPlatformOptions(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Sets the platform options - * - * @note the returned string will be invalidated by subsequent calls to this function - * - * @param[in] handle : Handle needed to access the platformConfig - * @param[in] optionName : Name of platform options" - * @param[in] value : Value of specified optionName - * - * @return If 1, add "optionName:value" to platform options if optionName don't exist, otherwise update the - * value of specified optionName. - * If 0, the platform options will not be changed. - */ -SNPE_API -int Snpe_PlatformConfig_SetPlatformOptionValue(Snpe_PlatformConfig_Handle_t handle, const char* optionName, const char* value); - -/** - * @brief Removes the platform options - * - * @param[in] handle : Handle needed to access the platformConfig - * @param[in] optionName : Name of platform options" - * @param[in] value : Value of specified optionName - * - * @return If 1, removed "optionName:value" to platform options if optionName don't exist, do nothing. - * If 0, the platform options will not be changed. - */ -SNPE_API -int Snpe_PlatformConfig_RemovePlatformOptionValue(Snpe_PlatformConfig_Handle_t handle, const char* optionName, const char* value); - -SNPE_API -void Snpe_PlatformConfig_SetIsUserGLBuffer(int isUserGLBuffer); - -SNPE_API -int Snpe_PlatformConfig_GetIsUserGLBuffer(); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_PLATFORMCONFIG_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.hpp deleted file mode 100644 index 5995c51b..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.hpp +++ /dev/null @@ -1,265 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include "DlSystem/PlatformConfig.h" - -namespace DlSystem { - -struct UserGLConfig -{ - /// Holds user EGL context. - /// - void* userGLContext = nullptr; - - /// Holds user EGL display. - void* userGLDisplay = nullptr; -}; - -struct UserGpuConfig{ - /// Holds user OpenGL configuration. - /// - UserGLConfig userGLConfig; -}; - -class PlatformConfig : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_PlatformConfig_Delete}; - - class UserGLConfigInternal : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserGLConfig_Delete}; - - public: - UserGLConfigInternal() - : BaseType(Snpe_UserGLConfig_Create()) - { } - UserGLConfigInternal(const UserGLConfig& uglc) - : UserGLConfigInternal() - { - setUserGLContext(uglc.userGLContext); - setUserGLDisplay(uglc.userGLDisplay); - } - void setUserGLContext(void* userGLContext){ - Snpe_UserGLConfig_SetUserGLContext(handle(), userGLContext); - } - void setUserGLDisplay(void* userGLDisplay){ - Snpe_UserGLConfig_SetUserGLDisplay(handle(), userGLDisplay); - } - - void* getUserGLContext(){ - return Snpe_UserGLConfig_GetUserGLContext(handle()); - } - void* getUserGLDisplay(){ - return Snpe_UserGLConfig_GetUserGLDisplay(handle()); - } - }; - - - - class UserGpuConfigInternal : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserGpuConfig_Delete}; - - public: - UserGpuConfigInternal() - : BaseType(Snpe_UserGpuConfig_Create()) - { } - - void set(const UserGLConfig& userGLConfig){ - UserGLConfigInternal uglc(userGLConfig); - Snpe_UserGpuConfig_Set(handle(), getHandle(uglc)); - } - - void get(UserGLConfig& uglc){ - UserGLConfigInternal uglci(moveHandle(Snpe_UserGpuConfig_Get_Ref(handle()), true)); - - uglc.userGLContext = uglci.getUserGLContext(); - uglc.userGLDisplay = uglci.getUserGLDisplay(); - } - - }; -public: - - /** - * @brief . - * - * An enum class of all supported platform types - */ - enum class PlatformType_t - { - /// Unknown platform type. - UNKNOWN = 0, - - /// Snapdragon CPU. - CPU = 1, - - /// Adreno GPU. - GPU = 2, - - /// Hexagon DSP. - DSP = 3 - }; - - /** - * @brief . - * - * A union class user platform configuration information - */ - struct PlatformConfigInfo - { - /// Holds user GPU Configuration. - /// - UserGpuConfig userGpuConfig; - - }; - - ~PlatformConfig() = default; - - PlatformConfig() - : BaseType(Snpe_PlatformConfig_Create()) - { } - - PlatformConfig(const PlatformConfig& other) - : BaseType(Snpe_PlatformConfig_CreateCopy(other.handle())) - { } - - /** - * @brief Retrieves the platform type - * - * @return Platform type - */ - PlatformType_t getPlatformType() const{ - return static_cast(Snpe_PlatformConfig_GetPlatformType(handle())); - }; - - /** - * @brief Indicates whther the plaform configuration is valid. - * - * @return True if the platform configuration is valid; false otherwise. - */ - bool isValid() const{ - return Snpe_PlatformConfig_IsValid(handle()); - }; - - /** - * @brief Retrieves the Gpu configuration - * - * @param[out] userGpuConfig The passed in userGpuConfig populated with the Gpu configuration on return. - * - * @return True if Gpu configuration was retrieved; false otherwise. - */ - bool getUserGpuConfig(UserGpuConfig& userGpuConfig) const{ - auto platformType = static_cast(Snpe_PlatformConfig_GetPlatformType(handle())); - if(platformType != PlatformType_t::GPU) return false; - - UserGpuConfigInternal gpuConf(moveHandle(Snpe_PlatformConfig_GetUserGpuConfig(handle()))); - - gpuConf.get(userGpuConfig.userGLConfig); - return true; - } - - /** - * @brief Sets the Gpu configuration - * - * @param[in] userGpuConfig Gpu Configuration - * - * @return True if Gpu configuration was successfully set; false otherwise. - */ - bool setUserGpuConfig(UserGpuConfig& userGpuConfig){ - UserGpuConfigInternal gpuConf; - gpuConf.set(userGpuConfig.userGLConfig); - return Snpe_PlatformConfig_SetUserGpuConfig(handle(), getHandle(gpuConf)); - } - - /** - * @brief Sets the platform options - * - * @param[in] options Options as a string in the form of "keyword:options" - * - * @return True if options are pass validation; otherwise false. If false, the options are not updated. - */ - bool setPlatformOptions(const std::string& options){ - return Snpe_PlatformConfig_SetPlatformOptions(handle(), options.c_str()); - } - - /** - * @brief Indicates whther the plaform configuration is valid. - * - * @return True if the platform configuration is valid; false otherwise. - */ - bool isOptionsValid() const{ - return Snpe_PlatformConfig_IsOptionsValid(handle()); - } - - /** - * @brief Gets the platform options - * - * @return Options as a string - */ - std::string getPlatformOptions() const { - return Snpe_PlatformConfig_GetPlatformOptions(handle()); - } - - /** - * @brief Sets the platform options - * - * @param[in] optionName Name of platform options" - * @param[in] value Value of specified optionName - * - * @return If true, add "optionName:value" to platform options if optionName don't exist, otherwise update the - * value of specified optionName. - * If false, the platform options will not be changed. - */ - bool setPlatformOptionValue(const std::string& optionName, const std::string& value){ - return Snpe_PlatformConfig_SetPlatformOptionValue(handle(), optionName.c_str(), value.c_str()); - } - - /** - * @brief Removes the platform options - * - * @param[in] optionName Name of platform options" - * @param[in] value Value of specified optionName - * - * @return If true, removed "optionName:value" to platform options if optionName don't exist, do nothing. - * If false, the platform options will not be changed. - */ - bool removePlatformOptionValue(const std::string& optionName, const std::string& value){ - return Snpe_PlatformConfig_RemovePlatformOptionValue(handle(), optionName.c_str(), value.c_str()); - } - - static void SetIsUserGLBuffer(bool isUserGLBuffer){ - Snpe_PlatformConfig_SetIsUserGLBuffer(isUserGLBuffer); - } - static bool GetIsUserGLBuffer(){ - return Snpe_PlatformConfig_GetIsUserGLBuffer(); - } - -}; - - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserGLConfig) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserGpuConfig) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, PlatformConfig) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/RuntimeList.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/RuntimeList.h deleted file mode 100644 index 2b699a7a..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/RuntimeList.h +++ /dev/null @@ -1,203 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_RUNTIME_LIST_H -#define DL_SYSTEM_RUNTIME_LIST_H - -#include - -#include "DlSystem/DlEnums.h" -#include "DlSystem/DlError.h" - -#include "StringList.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE RuntimeList handle - */ -typedef void* Snpe_RuntimeList_Handle_t; - -/** - * @brief . - * - * Creates a new runtime list - * - */ -SNPE_API -Snpe_RuntimeList_Handle_t Snpe_RuntimeList_Create(); - - -/** - * Copy-Constructs a RuntimeList and returns a handle to it - * - * @param runtimeListHandle the other RuntimeList to copy - * - * @return the handle to the created RuntimeList - */ -SNPE_API -Snpe_RuntimeList_Handle_t Snpe_RuntimeList_CreateCopy(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief Destroys the RuntimeList - * - * @param[in] runtimeListHandle : Handle needed to access the runtimeList - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Delete(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param src Source RuntimeList handle - * - * @param dst Destination RuntimeList handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Assign(Snpe_RuntimeList_Handle_t src, Snpe_RuntimeList_Handle_t dst); - -/** - * @brief Returns the Runtime from list at position index - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @param[in] index : position in runtimeList - * - * @return The Runtime from list at position index - */ -SNPE_API -Snpe_Runtime_t Snpe_RuntimeList_GetRuntime(Snpe_RuntimeList_Handle_t runtimeListHandle, int index); - -/** - * @brief Set the Runtime of the list at position index - * - * @param[in] runtimeListHandle : Handle needed to access the runtimeList - * - * @param[in] index : position in runtimeList - * - * @param[in] runtime : The Runtime to assign to position index - * - * @return SNPE_SUCCESS on success - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_SetRuntime(Snpe_RuntimeList_Handle_t runtimeListHandle, size_t index, Snpe_Runtime_t runtime); - -/** - * @brief Adds runtime to the end of the runtime list - * order of precedence is former followed by latter entry - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @param[in] runtime to add - * - * @return Error code. Ruturns SNPE_SUCCESS If the runtime added successfully - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Add(Snpe_RuntimeList_Handle_t runtimeListHandle, Snpe_Runtime_t runtime); - -/** - * @brief Removes the runtime from the list - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @param[in] runtime to be removed - * - * @return Error code. Ruturns SNPE_SUCCESS If the runtime removed successfully - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Remove(Snpe_RuntimeList_Handle_t runtimeListHandle, Snpe_Runtime_t runtime) ; - -/** - * @brief Returns the number of runtimes in the list - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @return number of entries in the runtimeList. - */ -SNPE_API -size_t Snpe_RuntimeList_Size(Snpe_RuntimeList_Handle_t runtimeListHandle) ; - -/** - * @brief Returns 1 if the list is empty - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @return 1 if list empty, 0 otherwise. - */ -SNPE_API -int Snpe_RuntimeList_Empty(Snpe_RuntimeList_Handle_t runtimeListHandle) ; - -/** - * @brief . - * - * Removes all runtime from the list - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @return Error code. Returns SNPE_SUCCESS if runtime list is cleared successfully. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Clear(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief Get a StringList of names from the runtime list in order of precedence - * - * @param runtimeListHandle Handle to a RuntimeList - * - * @return Handle to a StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_RuntimeList_GetRuntimeListNames(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief . - * - * @param[in] runtime const char* - * Returns a Runtime enum corresponding to the in param string - * - */ -SNPE_API -Snpe_Runtime_t Snpe_RuntimeList_StringToRuntime(const char* str); - -/** - * @brief . - * - * @param[in] runtime - * Returns a const char* corresponding to the in param runtime enum - * - */ -SNPE_API -const char* Snpe_RuntimeList_RuntimeToString(Snpe_Runtime_t runtime); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_RUNTIME_LIST_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/RuntimeList.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/RuntimeList.hpp deleted file mode 100644 index a2abf2b7..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/RuntimeList.hpp +++ /dev/null @@ -1,115 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "StringList.hpp" -#include "DlEnums.hpp" -#include "DlSystem/RuntimeList.h" - - - - - - -namespace DlSystem { - -class RuntimeList : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_RuntimeList_Delete}; - - static Runtime_t GetRuntime(HandleType handle, size_t idx){ - return static_cast(Snpe_RuntimeList_GetRuntime(handle, int(idx))); - } - static Snpe_ErrorCode_t SetRuntime(HandleType handle, size_t idx, Runtime_t runtime){ - return Snpe_RuntimeList_SetRuntime(handle, idx, static_cast(runtime)); - } - -private: - using RuntimeReference = WrapperDetail::MemberIndexedReference; - friend RuntimeReference; - -public: - - RuntimeList() - : BaseType(Snpe_RuntimeList_Create()) - { } - RuntimeList(const RuntimeList& other) - : BaseType(Snpe_RuntimeList_CreateCopy(other.handle())) - { } - RuntimeList(RuntimeList&& other) noexcept - : BaseType(std::move(other)) - { } - - RuntimeList(const Runtime_t& runtime) - : BaseType(Snpe_RuntimeList_Create()) - { - Snpe_RuntimeList_Add(handle(), static_cast(runtime)); - } - - RuntimeList& operator=(const RuntimeList& other){ - if(this != &other){ - Snpe_RuntimeList_Assign(other.handle(), handle()); - } - return *this; - } - - RuntimeList& operator=(RuntimeList&& other) noexcept{ - return moveAssign(std::move(other)); - } - - Runtime_t operator[](size_t idx) const{ - return GetRuntime(handle(), idx); - } - - RuntimeReference operator[](size_t idx) noexcept{ - return {*this, idx}; - } - - bool add(const Runtime_t& runtime){ - return SNPE_SUCCESS == Snpe_RuntimeList_Add(handle(), static_cast(runtime)); - } - - void remove(Runtime_t runtime) noexcept{ - Snpe_RuntimeList_Remove(handle(), static_cast(runtime)); - } - - size_t size() const noexcept{ - return Snpe_RuntimeList_Size(handle()); - } - - bool empty() const noexcept{ - return Snpe_RuntimeList_Empty(handle()); - } - - void clear() noexcept{ - Snpe_RuntimeList_Clear(handle()); - } - - StringList getRuntimeListNames() const{ - return moveHandle(Snpe_RuntimeList_GetRuntimeListNames(handle())); - } - - static Runtime_t stringToRuntime(const char* runtimeStr){ - return static_cast(Snpe_RuntimeList_StringToRuntime(runtimeStr)); - } - static const char* runtimeToString(Runtime_t runtime){ - return Snpe_RuntimeList_RuntimeToString(static_cast(runtime)); - } - -}; - - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, RuntimeList) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/SnpeApiExportDefine.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/SnpeApiExportDefine.h deleted file mode 100644 index 62c6718f..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/SnpeApiExportDefine.h +++ /dev/null @@ -1,34 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -// Macro controlling visibility of SNPE API - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef SNPE_API -#define SNPE_API -#endif - -#ifdef __cplusplus -} // extern "C" -#endif diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/String.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/String.hpp deleted file mode 100644 index 85b2ef22..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/String.hpp +++ /dev/null @@ -1,70 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - - -#include - - -#include "Wrapper.hpp" - -namespace DlSystem{ - - -// Just a backwards compatible wrapper for std::string -class String{ -public: - String() = delete; - explicit String(const std::string& str) - : m_String(str) - { } - explicit String(std::string&& str) noexcept - : m_String(std::move(str)) - { } - - explicit String(const char* str) - : m_String(str) - { } - - String(String&& other) noexcept = default; - String(const String& other) = delete; - - - String& operator=(String&& other) noexcept = default; - String& operator=(const String& other) = delete; - - bool operator<(const String& rhs) const noexcept{ return m_String < rhs.m_String; } - bool operator>(const String& rhs) const noexcept{ return m_String > rhs.m_String; } - bool operator<=(const String& rhs) const noexcept{ return m_String <= rhs.m_String; } - bool operator>=(const String& rhs) const noexcept{ return m_String >= rhs.m_String; } - bool operator==(const String& rhs) const noexcept{ return m_String == rhs.m_String; } - bool operator!=(const String& rhs) const noexcept{ return m_String != rhs.m_String; } - - - bool operator<(const std::string& rhs) const noexcept{ return m_String < rhs; } - bool operator>(const std::string& rhs) const noexcept{ return m_String > rhs; } - bool operator<=(const std::string& rhs) const noexcept{ return m_String <= rhs; } - bool operator>=(const std::string& rhs) const noexcept{ return m_String >= rhs; } - bool operator==(const std::string& rhs) const noexcept{ return m_String == rhs; } - bool operator!=(const std::string& rhs) const noexcept{ return m_String != rhs; } - - - const char* c_str() const noexcept{ return m_String.c_str(); } - - explicit operator std::string&() noexcept{ return m_String; } - explicit operator const std::string&() const noexcept{ return m_String; } - -private: - std::string m_String; -}; - - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, String) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/StringList.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/StringList.h deleted file mode 100644 index faa793b3..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/StringList.h +++ /dev/null @@ -1,154 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_STRING_LIST_H -#define DL_SYSTEM_STRING_LIST_H - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate a SNPE StringList handle - */ -typedef void* Snpe_StringList_Handle_t; - -/** - * Constructs a StringList and returns a handle to it - * - * @return the handle to the created StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_StringList_Create(); - -/** - * Constructs a StringList and returns a handle to it - * - * @param[in] size : size of list - * - * @return the handle to the created StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_StringList_CreateSize(size_t size); - -/** - * Constructs a StringList and returns a handle to it - * - * @param[in] other : StringList handle to be copied from - * - * @return the handle to the created StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_StringList_CreateCopy(Snpe_StringList_Handle_t other); - -/** - * Destroys/frees a StringList - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_StringList_Delete(Snpe_StringList_Handle_t stringListHandle); - - -/** - * Append a string to the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * @param[in] str Null-terminated ASCII string to append to the list. - * - * @return SNPE_SUCCESS if Append operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_StringList_Append(Snpe_StringList_Handle_t stringListHandle, const char* string); - -/** - * Returns the string at the indicated position, - * or an empty string if the positions is greater than the size - * of the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * @param[in] idx Position in the list of the desired string - * - * @return the string at the indicated position - */ -SNPE_API -const char* Snpe_StringList_At(Snpe_StringList_Handle_t stringListHandle, size_t idx); - -/** - * Pointer to the first string in the list. - * Can be used to iterate through the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return Pointer to the first string in the list. - */ -SNPE_API -const char** Snpe_StringList_Begin(Snpe_StringList_Handle_t stringListHandle); - -/** - * Pointer to one after the last string in the list. - * Can be used to iterate through the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return Pointer to one after the last string in the list - */ -SNPE_API -const char** Snpe_StringList_End(Snpe_StringList_Handle_t stringListHandle); - -/** - * Return the number of valid string pointers held by this list. - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return The size of the StringList - */ -SNPE_API -size_t Snpe_StringList_Size(Snpe_StringList_Handle_t stringListHandle); - -/** - * Copy-assigns the contents of src into dst - * - * @param src Source StringList handle - * @param dst Destination StringList handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_StringList_Assign(Snpe_StringList_Handle_t src, Snpe_StringList_Handle_t dst); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_STRING_LIST_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/StringList.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/StringList.hpp deleted file mode 100644 index 2fd84bf1..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/StringList.hpp +++ /dev/null @@ -1,73 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/DlError.hpp" - -#include "DlSystem/StringList.h" - - -namespace DlSystem { - -class StringList : public Wrapper{ - friend BaseType; - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction = Snpe_StringList_Delete; - -public: - StringList() - : BaseType(Snpe_StringList_Create()) - { } - explicit StringList(size_t length) - : BaseType(Snpe_StringList_CreateSize(length)) - { } - StringList(const StringList& other) - : BaseType(Snpe_StringList_CreateCopy(other.handle())) - { } - StringList(StringList&& other) noexcept - : BaseType(std::move(other)) - { } - - - StringList& operator=(const StringList& other){ - if(this != &other){ - Snpe_StringList_Assign(other.handle(), handle()); - } - return *this; - } - StringList& operator=(StringList&& other) noexcept{ - return moveAssign(std::move(other)); - } - - - DlSystem::ErrorCode append(const char* str){ - return static_cast(Snpe_StringList_Append(handle(), str)); - } - - const char* at(size_t idx) const noexcept{ - return Snpe_StringList_At(handle(), idx); - } - - const char** begin() const noexcept{ - return Snpe_StringList_Begin(handle()); - } - const char** end() const noexcept{ - return Snpe_StringList_End(handle()); - } - - size_t size() const noexcept{ - return Snpe_StringList_Size(handle()); - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, StringList) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorMap.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorMap.h deleted file mode 100644 index aa367eda..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorMap.h +++ /dev/null @@ -1,154 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_TENSORMAP_H -#define DL_SYSTEM_TENSORMAP_H - -#include "DlSystem/ITensor.h" -#include "DlSystem/StringList.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate a SNPE Tensor Map handle - */ -typedef void* Snpe_TensorMap_Handle_t; - - -/** - * Constructs a TensorMap and returns a handle to it - * - * @return the handle to the created TensorMap - */ -SNPE_API -Snpe_TensorMap_Handle_t Snpe_TensorMap_Create(); - - -/** - * Copy-Constructs a TensorMap and returns a handle to it - * - * @param tensorMapHandle the other TensorMap to copy - * - * @return the handle to the created TensorMap - */ -SNPE_API -Snpe_TensorMap_Handle_t Snpe_TensorMap_CreateCopy(Snpe_TensorMap_Handle_t tensorMapHandle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param src Source TensorMap handle - * - * @param dst Destination TensorMap handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorMap_Assign(Snpe_TensorMap_Handle_t srcHandle, Snpe_TensorMap_Handle_t dstHandle); - - -/** - * Destroys/frees Tensor Map - * - * @param[in] handle : handle to tensorMap - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorMap_Delete(Snpe_TensorMap_Handle_t handle); - -/** - * @brief Adds a name and the corresponding tensor pointer - * to the map - * - * @param[in] handle : Handle to tensorMap - * @param[in] name : The name of the tensor - * @param[in] tensorHandle : Handle to access ITensor - * - * @note If a tensor with the same name already exists, the - * tensor is replaced with the existing tensor. - */ -SNPE_API -void Snpe_TensorMap_Add(Snpe_TensorMap_Handle_t handle, const char *name, Snpe_ITensor_Handle_t tensorHandle); - -/** - * @brief Removes a mapping of tensor and its name by its name - * - * @param[in] handle : Handle to tensorMap - * @param[in] name : The name of tensor to be removed - * - * @note If no tensor with the specified name is found, nothing - * is done. - */ -SNPE_API -void Snpe_TensorMap_Remove(Snpe_TensorMap_Handle_t handle, const char *name); - -/** - * @brief Returns the number of tensors in the map - * - * @param[in] handle : Handle to tensorMap - * - * @return Number of tensors in the map - */ -SNPE_API -size_t Snpe_TensorMap_Size(Snpe_TensorMap_Handle_t handle); - -/** - * @brief . - * - * @param[in] handle : Handle to tensorMap - * Removes all tensors from the map - */ -SNPE_API -void Snpe_TensorMap_Clear(Snpe_TensorMap_Handle_t handle); - -/** - * @brief Returns the tensor given its name. - * - * @param[in] handle : Handle to tensorMap - * @param[in] name : The name of the tensor to get. - * - * @return nullptr if no tensor with the specified name is - * found; otherwise, a valid pointer to the tensor. - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_TensorMap_GetTensor_Ref(Snpe_TensorMap_Handle_t handle, const char *name); - -/** - * @brief . - * - * @param[in] handle : Handle to tensorMap - * - * @return A StringList of the names of all tensors - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_TensorMap_GetTensorNames(Snpe_TensorMap_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_TENSOR_MAP_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorMap.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorMap.hpp deleted file mode 100644 index 20a6c21f..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorMap.hpp +++ /dev/null @@ -1,81 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/ITensor.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/DlError.hpp" - -#include "DlSystem/TensorMap.h" - -namespace DlSystem { - -class TensorMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_TensorMap_Delete}; -public: - - TensorMap() - : BaseType(Snpe_TensorMap_Create()) - { } - - TensorMap(const TensorMap& other) - : BaseType(Snpe_TensorMap_CreateCopy(other.handle())) - { } - - TensorMap(TensorMap&& other) noexcept - : BaseType(std::move(other)) - { } - - TensorMap& operator=(const TensorMap& other){ - if(this != &other){ - Snpe_TensorMap_Assign(other.handle(), handle()); - } - return *this; - } - TensorMap& operator=(TensorMap&& other) noexcept{ - return moveAssign(std::move(other)); - } - - DlSystem::ErrorCode add(const char* name, ITensor* tensor){ - if(!tensor) return DlSystem::ErrorCode::SNPE_CAPI_BAD_ARGUMENT; - Snpe_TensorMap_Add(handle(), name, getHandle(*tensor)); - return DlSystem::ErrorCode::NONE; - } - - void remove(const char* name) noexcept{ - Snpe_TensorMap_Remove(handle(), name); - } - - size_t size() const noexcept{ - return Snpe_TensorMap_Size(handle()); - } - - void clear() noexcept{ - Snpe_TensorMap_Clear(handle()); - } - - - ITensor* getTensor(const char* name) const noexcept{ - return makeReference(Snpe_TensorMap_GetTensor_Ref(handle(), name)); - } - - StringList getTensorNames() const{ - return moveHandle(Snpe_TensorMap_GetTensorNames(handle())); - } - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, TensorMap) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShape.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShape.h deleted file mode 100644 index 1fde628c..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShape.h +++ /dev/null @@ -1,174 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_TENSOR_SHAPE_H -#define DL_SYSTEM_TENSOR_SHAPE_H - -#include - -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE TensorShape handle - */ -typedef void* Snpe_TensorShape_Handle_t; - - -/** - * @brief . - * - * Creates a new shape with a list of dims specified in array - * - * @param[in] dims The dimensions are specified in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] size Size of the array. - * - * @return the handle to the created TensorShape - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShape_CreateDimsSize(const size_t *dims, size_t size); - -/** - * Constructs a TensorShape and returns a handle to it - * - * @return the handle to the created TensorShape - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShape_Create(); - -/** - * @brief . - * - * copy constructor. - * @param[in] other object to copy. - * - * @return the handle to the created TensorShape. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShape_CreateCopy(Snpe_TensorShape_Handle_t other); - -/** - * Destroys/frees Tensor Shape - * - * @param[in] handle : handle to tensorShape - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShape_Delete(Snpe_TensorShape_Handle_t tensorShapeHandle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param srcHandle Source TensorShape handle - * @param dstHandle Destination TensorShape handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShape_Assign(Snpe_TensorShape_Handle_t srcHandle, Snpe_TensorShape_Handle_t dstHandle); - -/** - * @brief . - * - * Concatenates additional dimensions specified in - * the array to the existing dimensions. - * - * @param[in] handle : handle to tensorShape - * @param[in] dims The dimensions are specified in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] size Size of the array. - * - */ -SNPE_API -void Snpe_TensorShape_Concatenate(Snpe_TensorShape_Handle_t tensorShape, const size_t *dims, size_t size); - -/** - * @brief . - * - * @param[in] handle : handle to tensorShape - * - * Retrieves the rank i.e. number of dimensions. - * - * @return The rank - */ -SNPE_API -size_t Snpe_TensorShape_Rank(Snpe_TensorShape_Handle_t tensorShape); - -/** - * @brief . - * - * @param[in] handle : handle to tensorShape - * - * @param[in] index : Position in the dimension array. - * - * @return The dimension value in tensor shape - */ -SNPE_API -size_t Snpe_TensorShape_At(Snpe_TensorShape_Handle_t tensorShapeHandle, size_t index); - -/** - * @brief Set a value in a TensorShape at the provided index - * - * @param[in] handle : handle to tensorShape - * - * @param[in] index : Position in the dimension array. - * - * @param[in] value : Dimension value to set - * - * @return SNPE_SUCCESS on success - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShape_Set(Snpe_TensorShape_Handle_t tensorShapeHandle, size_t index, size_t value); - -/** - * @brief . - * - * Retrieves a pointer to the first dimension of shape - * - * @param[in] handle : handle to tensorShape - * - * @return nullptr if no dimension exists; otherwise, points to - * the first dimension. - * - */ -SNPE_API -const size_t* Snpe_TensorShape_GetDimensions(Snpe_TensorShape_Handle_t tensorShape); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_TENSOR_SHAPE_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShape.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShape.hpp deleted file mode 100644 index 776637c7..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShape.hpp +++ /dev/null @@ -1,104 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include - -#include "Wrapper.hpp" - -#include "DlSystem/TensorShape.h" - -namespace DlSystem { - - -using Dimension = size_t; - - - -class TensorShape : public Wrapper { - friend BaseType; - using BaseType::BaseType; - -protected: - static constexpr DeleteFunctionType DeleteFunction{Snpe_TensorShape_Delete}; - -private: - using DimensionReference = WrapperDetail::MemberIndexedReference; - friend DimensionReference; - -public: - - TensorShape() - : BaseType(Snpe_TensorShape_Create()) - { } - - TensorShape(const TensorShape& other) - : BaseType(Snpe_TensorShape_CreateCopy(other.handle())) - { } - - TensorShape(TensorShape&& other) noexcept - : BaseType(std::move(other)) - { } - - TensorShape(std::initializer_list dims) - : BaseType(Snpe_TensorShape_CreateDimsSize(dims.begin(), dims.size())) - { } - - TensorShape& operator=(const TensorShape& other) noexcept{ - if(this != &other){ - Snpe_TensorShape_Assign(other.handle(), handle()); - } - return *this; - } - - TensorShape& operator=(TensorShape&& other) noexcept{ - return moveAssign(std::move(other)); - } - - TensorShape(const size_t *dims, size_t size) - : BaseType(Snpe_TensorShape_CreateDimsSize(dims, size)) - { } - - TensorShape(const std::vector& dims) - : TensorShape(dims.data(), dims.size()) - { } - - - void concatenate(const size_t *dims, size_t size){ - Snpe_TensorShape_Concatenate(handle(), dims, size); - } - - void concatenate(const size_t &dim){ - return concatenate(&dim, 1); - } - - size_t operator[](size_t idx) const{ - return Snpe_TensorShape_At(handle(), idx); - } - - DimensionReference operator[](size_t idx){ - return {*this, idx}; - } - - size_t rank() const{ - return Snpe_TensorShape_Rank(handle()); - } - - const size_t* getDimensions() const{ - return Snpe_TensorShape_GetDimensions(handle()); - } - - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, Dimension) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, TensorShape) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.h deleted file mode 100644 index 520fa5ab..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.h +++ /dev/null @@ -1,163 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - - -/** - * @file - */ - -#ifndef _SNPE_TENSOR_SHAPE_MAP_H_ -#define _SNPE_TENSOR_SHAPE_MAP_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/TensorShape.h" -#include "DlSystem/StringList.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE TensorShapeMap handle - */ -typedef void* Snpe_TensorShapeMap_Handle_t; - -/** - * Constructs a TensorShapeMap and returns a handle to it - * - * @return the handle to the created TensorShapeMap - */ -SNPE_API -Snpe_TensorShapeMap_Handle_t Snpe_TensorShapeMap_Create(); - -/** - * @brief . - * - * copy constructor. - * - * @param[in] tsmHandle : Handle to the other object to copy. - * @return the handle to the created TensorShapeMap - */ -SNPE_API -Snpe_TensorShapeMap_Handle_t Snpe_TensorShapeMap_CreateCopy(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * Destroys/frees Tensor Shape Map - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Delete(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * @brief . - * - * assignment operator. Copy-assigns from srcHandle to dstHandle - * @param[in] srcHandle : handle to source Tensor Shape Map object - * @param[out] dstHandle : handle to destination Tensor Shape Map object - * - * @return Returns SNPE_SUCCESS if Assignment successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Assign(Snpe_TensorShapeMap_Handle_t srcHandle, Snpe_TensorShapeMap_Handle_t dstHandle); - -/** - * @brief Adds a name and the corresponding tensor pointer - * to the map - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @param[in] name The name of the tensor - * @param[in] tsHandle : Handle to access Tensor Shape - * - * @return Returns SNPE_SUCCESS if Add operation successful - * @note If a tensor with the same name already exists, no new - * tensor is added. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Add(Snpe_TensorShapeMap_Handle_t tsmHandle, const char* name, Snpe_TensorShape_Handle_t tsHandle); - -/** - * @brief Removes a mapping of tensor and its name by its name - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @param[in] name The name of tensor to be removed - * @return Returns SNPE_SUCCESS if Remove operation successful - * - * @note If no tensor with the specified name is found, nothing - * is done. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Remove(Snpe_TensorShapeMap_Handle_t tsmHandle, const char* name); - -/** - * @brief Returns the number of tensors in the map - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @return Returns number entries in TensorShapeMap - */ -SNPE_API -size_t Snpe_TensorShapeMap_Size(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * @brief . - * - * Removes all tensors from the map - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @return Returns SNPE_SUCCESS if Clear operation successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Clear(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * @brief Returns the tensor given its name. - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @param[in] name The name of the tensor to get. - * - * @return nullptr if no tensor with the specified name is - * found; otherwise, a valid Tensor Shape Handle. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShapeMap_GetTensorShape(Snpe_TensorShapeMap_Handle_t tsmHandle, const char* name); - -/** - * @brief . - * - * @param[in] tsmHandle : handle to access Tensor Shape Map - * @return A stringList Handle to access names of all tensor shapes - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_TensorShapeMap_GetTensorShapeNames(Snpe_TensorShapeMap_Handle_t tsmHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_TENSOR_SHAPE_MAP_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.hpp deleted file mode 100644 index 8b79a6e2..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.hpp +++ /dev/null @@ -1,77 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include "DlSystem/StringList.hpp" -#include "DlSystem/TensorShape.hpp" -#include "DlSystem/DlError.hpp" - -#include "DlSystem/TensorShapeMap.h" - -namespace DlSystem { - -class TensorShapeMap : public Wrapper { - friend BaseType; - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction{Snpe_TensorShapeMap_Delete}; - -public: - TensorShapeMap() - : BaseType(Snpe_TensorShapeMap_Create()) - { } - TensorShapeMap(const TensorShapeMap& other) - : BaseType(Snpe_TensorShapeMap_CreateCopy(other.handle())) - { } - TensorShapeMap(TensorShapeMap&& other) noexcept - : BaseType(std::move(other)) - { } - - TensorShapeMap& operator=(const TensorShapeMap& other){ - if(this != &other){ - Snpe_TensorShapeMap_Assign(other.handle(), handle()); - } - return *this; - } - TensorShapeMap& operator=(TensorShapeMap&& other) noexcept{ - return moveAssign(std::move(other)); - } - - DlSystem::ErrorCode add(const char *name, const TensorShape& tensorShape){ - return static_cast( - Snpe_TensorShapeMap_Add(handle(), name, getHandle(tensorShape)) - ); - } - - DlSystem::ErrorCode remove(const char* name) noexcept{ - return static_cast(Snpe_TensorShapeMap_Remove(handle(), name)); - } - - size_t size() const noexcept{ - return Snpe_TensorShapeMap_Size(handle()); - } - - DlSystem::ErrorCode clear() noexcept{ - return static_cast(Snpe_TensorShapeMap_Clear(handle())); - } - - TensorShape getTensorShape(const char* name) const noexcept{ - return moveHandle(Snpe_TensorShapeMap_GetTensorShape(handle(), name)); - } - - StringList getTensorShapeNames() const{ - return moveHandle(Snpe_TensorShapeMap_GetTensorShapeNames(handle())); - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, TensorShapeMap) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.h deleted file mode 100644 index 2da1c792..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.h +++ /dev/null @@ -1,151 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_USER_BUFFER_MAP_H -#define DL_SYSTEM_USER_BUFFER_MAP_H - -#include "DlSystem/StringList.h" -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE UserBufferMap handle - */ -typedef void* Snpe_UserBufferMap_Handle_t; - -/** - * @brief . - * - * Creates a new empty UserBuffer map - */ -SNPE_API -Snpe_UserBufferMap_Handle_t Snpe_UserBufferMap_Create(); - -/** - * copy constructor. - * @param[in] other : Handle to the other userBufferMap to be copied from. - */ -SNPE_API -Snpe_UserBufferMap_Handle_t Snpe_UserBufferMap_CreateCopy(Snpe_UserBufferMap_Handle_t other); - - -/** - * @brief Adds a name and the corresponding UserBuffer pointer - * to the map - * - * @param[in] handle : Handle to access UserBufferMap - * @param[in] name : The name of the UserBuffer - * @param[in] bufferHandle : Handle to access UserBuffer - * - * @note If a UserBuffer with the same name already exists, the new - * UserBuffer pointer would be updated. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Add(Snpe_UserBufferMap_Handle_t handle, const char *name, Snpe_IUserBuffer_Handle_t bufferHandle); - -/** - * @brief Removes a mapping of one UserBuffer and its name by its name - * - * @param[in] handle : Handle to access UserBufferMap - * - * @param[in] name : The name of UserBuffer to be removed - * - * @note If no UserBuffer with the specified name is found, nothing - * is done. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Remove(Snpe_UserBufferMap_Handle_t handle, const char *name); - -/** - * @brief Returns the number of UserBuffers in the map - * @param[in] handle : Handle to access UserBufferMap - */ -SNPE_API -size_t Snpe_UserBufferMap_Size(Snpe_UserBufferMap_Handle_t handle); - -/** - * @brief . - * - * @param[in] handle : Handle to access UserBufferMap - * Removes all UserBuffers from the map - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Clear(Snpe_UserBufferMap_Handle_t handle); - -/** - * @brief Returns the UserBuffer given its name. - * - * @param[in] handle : Handle to access UserBufferMap - * - * @param[in] name : The name of the UserBuffer to get. - * - * @return nullptr if no UserBuffer with the specified name is - * found; otherwise, a valid pointer to the UserBuffer. - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_UserBufferMap_GetUserBuffer_Ref(Snpe_UserBufferMap_Handle_t handle , const char *name); - -/** - * @brief . - * - * Returns the names of all UserBuffers - * - * @param[in] handle : Handle to access UserBufferMap - * - * @return A list of UserBuffer names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_UserBufferMap_GetUserBufferNames(Snpe_UserBufferMap_Handle_t handle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param src Source UserBufferMap handle - * @param dst Destination UserBufferMap handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Assign(Snpe_UserBufferMap_Handle_t srcHandle, Snpe_UserBufferMap_Handle_t dstHandle); - -/** - * Destroys/frees UserBuffer Map - * - * @param[in] handle : Handle to access UserBuffer Map - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Delete(Snpe_UserBufferMap_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_USER_BUFFER_MAP_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.hpp deleted file mode 100644 index acf3207c..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.hpp +++ /dev/null @@ -1,80 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - -#include "Wrapper.hpp" -#include "DlSystem/DlError.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/IUserBuffer.hpp" - -#include "DlSystem/UserBufferMap.h" - -namespace DlSystem { - -class UserBufferMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferMap_Delete}; - -public: - UserBufferMap() - : BaseType(Snpe_UserBufferMap_Create()) - { } - - UserBufferMap(const UserBufferMap& other) - : BaseType(Snpe_UserBufferMap_CreateCopy(other.handle())) - { } - UserBufferMap(UserBufferMap&& other) noexcept - : BaseType(std::move(other)) - { } - - UserBufferMap& operator=(const UserBufferMap& other){ - if(this != &other){ - Snpe_UserBufferMap_Assign(other.handle(), handle()); - } - return *this; - } - UserBufferMap& operator=(UserBufferMap&& other) noexcept{ - return moveAssign(std::move(other)); - } - - DlSystem::ErrorCode add(const char* name, IUserBuffer* buffer){ - if(!buffer) return ErrorCode::SNPE_CAPI_BAD_ARGUMENT; - return static_cast(Snpe_UserBufferMap_Add(handle(), name, getHandle(*buffer))); - } - - DlSystem::ErrorCode remove(const char* name) noexcept{ - return static_cast(Snpe_UserBufferMap_Remove(handle(), name)); - } - - size_t size() const noexcept{ - return Snpe_UserBufferMap_Size(handle()); - } - - DlSystem::ErrorCode clear() noexcept{ - return static_cast(Snpe_UserBufferMap_Clear(handle())); - } - - IUserBuffer* getUserBuffer(const char* name) const noexcept{ - return makeReference(Snpe_UserBufferMap_GetUserBuffer_Ref(handle(), name)); - } - - StringList getUserBufferNames() const{ - return moveHandle(Snpe_UserBufferMap_GetUserBufferNames(handle())); - } - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferMap) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.h deleted file mode 100644 index c927d33e..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.h +++ /dev/null @@ -1,156 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_USER_MEMORY_MAP_H -#define DL_SYSTEM_USER_MEMORY_MAP_H - -#include "DlSystem/StringList.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE User Memory handle - */ -typedef void* Snpe_UserMemoryMap_Handle_t; - -/** - * @brief . - * - * Creates a new empty UserMemory map - */ -SNPE_API -Snpe_UserMemoryMap_Handle_t Snpe_UserMemoryMap_Create(); - -/** - * copy constructor. - * @param[in] other : Handle to the other object to copy. - */ -SNPE_API -Snpe_UserMemoryMap_Handle_t Snpe_UserMemoryMap_Copy(Snpe_UserMemoryMap_Handle_t other); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param[in] srcHandle Source UserMemoryMap handle - * - * @param[out] dstHandle Destination UserMemoryMap handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Assign(Snpe_UserMemoryMap_Handle_t srcHandle, Snpe_UserMemoryMap_Handle_t dstHandle); - -/** - * Destroys/frees UserMemory Map - * - * @param[in] handle : Handle to access UserMemory Map - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Delete(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief Adds a name and the corresponding buffer address - * to the map - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of the UserMemory - * @param[in] address : The pointer to the Buffer Memory - * - * @note If a UserBuffer with the same name already exists, the new - * address would be updated. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Add(Snpe_UserMemoryMap_Handle_t handle, const char *name, void *address); - -/** - * @brief Removes a mapping of one Buffer address and its name by its name - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of Memory address to be removed - * - * @note If no UserBuffer with the specified name is found, nothing - * is done. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Remove(Snpe_UserMemoryMap_Handle_t handle, const char *name); - -/** - * @brief Returns the number of User Memory addresses in the map - * @param[in] handle : Handle to access UserMemory Map - */ -SNPE_API -size_t Snpe_UserMemoryMap_Size(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief . - * - * Removes all User Memory from the map - * @param[in] handle : Handle to access UserMemory Map - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Clear(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief . - * Returns the names of all User Memory - * - * @param[in] handle : Handle to access UserMemory Map - * - * @return Returns a handle to the stringList. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_UserMemoryMap_GetUserBufferNames(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief Returns the no of UserMemory addresses mapped to the buffer - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of the UserMemory - * - */ -SNPE_API -size_t Snpe_UserMemoryMap_GetUserMemoryAddressCount(Snpe_UserMemoryMap_Handle_t handle, const char *name); - -/** - * @brief Returns address at a specified index corresponding to a UserMemory buffer name - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of the buffer - * @param[in] index : The index in the list of addresses - * - */ -SNPE_API -void* Snpe_UserMemoryMap_GetUserMemoryAddressAtIndex(Snpe_UserMemoryMap_Handle_t handle, const char *name, uint32_t index); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_USER_MEMORY_MAP_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.hpp deleted file mode 100644 index 36e9cd37..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.hpp +++ /dev/null @@ -1,76 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/DlError.hpp" -#include "DlSystem/StringList.hpp" - -#include "DlSystem/UserMemoryMap.h" - -namespace DlSystem { - -class UserMemoryMap : public Wrapper { - friend BaseType; -// Use this to get free move Ctor and move assignment operator, provided this class does not specify -// as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserMemoryMap_Delete}; -public: - UserMemoryMap() - : BaseType(Snpe_UserMemoryMap_Create()) - { } - UserMemoryMap(const UserMemoryMap& other) - : BaseType(Snpe_UserMemoryMap_Copy(other.handle())) - { } - UserMemoryMap(UserMemoryMap&& other) noexcept - : BaseType(std::move(other)) - { } - - UserMemoryMap& operator=(const UserMemoryMap& other){ - if(this != &other){ - Snpe_UserMemoryMap_Assign(handle(), other.handle()); - } - return *this; - } - - DlSystem::ErrorCode add(const char* name, void* address) noexcept{ - return static_cast(Snpe_UserMemoryMap_Add(handle(), name, address)); - } - - DlSystem::ErrorCode remove(const char* name){ - return static_cast(Snpe_UserMemoryMap_Remove(handle(), name)); - } - - size_t size() const noexcept{ - return Snpe_UserMemoryMap_Size(handle()); - } - - DlSystem::ErrorCode clear() noexcept{ - return static_cast(Snpe_UserMemoryMap_Clear(handle())); - } - - StringList getUserBufferNames() const{ - return moveHandle(Snpe_UserMemoryMap_GetUserBufferNames(handle())); - } - - size_t getUserMemoryAddressCount(const char* name) const noexcept{ - return Snpe_UserMemoryMap_GetUserMemoryAddressCount(handle(), name); - } - - void* getUserMemoryAddressAtIndex(const char* name, uint32_t index) const noexcept{ - return Snpe_UserMemoryMap_GetUserMemoryAddressAtIndex(handle(), name, index); - } - -}; - - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserMemoryMap) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.h deleted file mode 100644 index 282ee547..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.h +++ /dev/null @@ -1,107 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _PLATFORM_VALIDATOR_H_ -#define _PLATFORM_VALIDATOR_H_ - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" -#include "DlSystem/DlEnums.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE PlatformValidator handle - */ -typedef void* Snpe_PlatformValidator_Handle_t; - -/** - * @brief . - * - * Creates a new Platform Validator - * - */ -SNPE_API -Snpe_PlatformValidator_Handle_t Snpe_PlatformValidator_Create(); - - -/** - * Destroys/frees Platform Validator - * - * @param[in] handle : Handle to access Platform Validator - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PlatformValidator_Delete(Snpe_PlatformValidator_Handle_t handle); - -/** - * @brief Sets the runtime processor for compatibility check - * - * @return Void - */ -SNPE_API -void Snpe_PlatformValidator_SetRuntime(Snpe_PlatformValidator_Handle_t handle, - Snpe_Runtime_t runtime, - bool unsignedPD=true); - -/** - * @brief Checks if the Runtime prerequisites for SNPE are available. - * - * @return 1 if the Runtime prerequisites are available, else 0. - */ -SNPE_API -int Snpe_PlatformValidator_IsRuntimeAvailable(Snpe_PlatformValidator_Handle_t handle, - bool unsignedPD=true); - -/** - * @brief Returns the core version for the Runtime selected. - * - * @return char* which contains the actual core version value - */ -SNPE_API -const char* Snpe_PlatformValidator_GetCoreVersion(Snpe_PlatformValidator_Handle_t handle); - -/** - * @brief Returns the library version for the Runtime selected. - * - * @return char* which contains the actual lib version value - */ -SNPE_API -const char* Snpe_PlatformValidator_GetLibVersion(Snpe_PlatformValidator_Handle_t handle); - -/** - * @brief Runs a small program on the runtime and Checks if SNPE is supported for Runtime. - * - * @return If 1, the device is ready for SNPE execution, else return 0. - */ -SNPE_API -int Snpe_PlatformValidator_RuntimeCheck(Snpe_PlatformValidator_Handle_t handle, - bool unsignedPD=true); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _PLATFORM_VALIDATOR_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.hpp deleted file mode 100644 index de52635c..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.hpp +++ /dev/null @@ -1,57 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - -#include "Wrapper.hpp" - -#include "DlSystem/DlEnums.hpp" - - -#include "PlatformValidator/PlatformValidator.h" - - -namespace SNPE { - -class PlatformValidator : public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_PlatformValidator_Delete}; - -public: - PlatformValidator() - : BaseType(Snpe_PlatformValidator_Create()) - { } - - void setRuntime(DlSystem::Runtime_t runtime, bool unsignedPD=true){ - Snpe_PlatformValidator_SetRuntime(handle(), static_cast(runtime), unsignedPD); - } - - bool isRuntimeAvailable(bool unsignedPD=true){ - return Snpe_PlatformValidator_IsRuntimeAvailable(handle(), unsignedPD); - } - - std::string getCoreVersion(){ - return Snpe_PlatformValidator_GetCoreVersion(handle()); - } - - std::string getLibVersion(){ - return Snpe_PlatformValidator_GetLibVersion(handle()); - } - - bool runtimeCheck(bool unsignedPD=true){ - return Snpe_PlatformValidator_RuntimeCheck(handle(), unsignedPD); - } - -}; - -} // ns SNPE - -ALIAS_IN_ZDL_NAMESPACE(SNPE, PlatformValidator) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.h deleted file mode 100644 index 8a2bb7d2..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.h +++ /dev/null @@ -1,85 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef _SNPE_APPLICATION_BUFFER_MAP_H_ -#define _SNPE_APPLICATION_BUFFER_MAP_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" -#include "DlSystem/StringList.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -typedef void* Snpe_ApplicationBufferMap_Handle_t; - -SNPE_API -Snpe_ApplicationBufferMap_Handle_t Snpe_ApplicationBufferMap_Create(); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Delete(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Add(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name, - const uint8_t* buff, - size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_AddFloat(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name, - const float* buff, - size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Remove(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name); - -SNPE_API -size_t Snpe_ApplicationBufferMap_Size(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Clear(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - - -SNPE_API -Snpe_StringList_Handle_t Snpe_ApplicationBufferMap_GetUserBufferNames(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_GetUserBuffer(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name, - size_t* size, - const uint8_t** data); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_APPLICATION_BUFFER_MAP_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.hpp deleted file mode 100644 index 6ad745bb..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.hpp +++ /dev/null @@ -1,90 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include -#include - -#include "Wrapper.hpp" -#include "DlSystem/StringList.hpp" - -#include "SNPE/ApplicationBufferMap.h" - -namespace PSNPE { - -class ApplicationBufferMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_ApplicationBufferMap_Delete}; -public: - ApplicationBufferMap() - : BaseType(Snpe_ApplicationBufferMap_Create()){} - - explicit ApplicationBufferMap(const std::unordered_map> &buffer) - : ApplicationBufferMap(){ - for(const auto &kv: buffer){ - add(kv.first.c_str(), kv.second); - } - } - - void add(const char *name, const std::vector &buff){ - Snpe_ApplicationBufferMap_Add(handle(), name, buff.data(), buff.size()); - } - - void add(const char *name, const std::vector &buff){ - Snpe_ApplicationBufferMap_Add(handle(), name, reinterpret_cast(buff.data()), buff.size()*sizeof(float)); - } - - void remove(const char *name) noexcept{ - Snpe_ApplicationBufferMap_Remove(handle(), name); - } - - size_t size() const noexcept{ - return Snpe_ApplicationBufferMap_Size(handle()); - } - - void clear() noexcept{ - Snpe_ApplicationBufferMap_Clear(handle()); - } - - std::vector getUserBuffer(const char *name) const{ - size_t size{}; - const uint8_t *data{}; - Snpe_ApplicationBufferMap_GetUserBuffer(handle(), name, &size, &data); - - return std::vector(data, data + size); - } - - std::vector operator[](const char *name) const{ - return getUserBuffer(name); - } - - DlSystem::StringList getUserBufferNames() const{ - return moveHandle(Snpe_ApplicationBufferMap_GetUserBufferNames(handle())); - } - - std::unordered_map> getUserBuffer() const{ - std::unordered_map> toret; - for(auto name: getUserBufferNames()){ - toret.emplace(name, getUserBuffer(name)); - } - - return toret; - } - -}; - -} // ns PSNPE - - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, ApplicationBufferMap) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/PSNPE.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/PSNPE.h deleted file mode 100644 index 2358d535..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/PSNPE.h +++ /dev/null @@ -1,898 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022,2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _SNPE_PSNPE_H_ -#define _SNPE_PSNPE_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlContainer/DlContainer.h" -#include "SNPE/ApplicationBufferMap.h" -#include "SNPE/RuntimeConfigList.h" -#include "SNPE/UserBufferList.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/IBufferAttributes.h" - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/UserMemoryMap.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate the callback PSNPE handle of Async Output mode - */ -typedef void* Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t; - -//SNPE_API -//Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t Snpe_PSNPE_OutputAsyncCallbackParam_Create(size_t index, -// int status, -// const char* errorMsg); -// -//SNPE_API -//Snpe_ErrorCode_t Snpe_PSNPE_OutputAsyncCallbackParam_Delete(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -// NOTE: we don't need _{Create,Delete} functions because the user does not create or delete these handles -// They're passed in to the callback functions they created - -/** - * @brief Get the data index of an output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return The data idx for output async mode - */ -SNPE_API -size_t Snpe_PSNPE_OutputAsyncCallbackParam_GetDataIdx(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -/** - * @brief Execute an output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return True if executed successfully with outputAsync mode - */ -SNPE_API -int Snpe_PSNPE_OutputAsyncCallbackParam_GetExecuteStatus(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -/** - * @brief Get the error message during the execution of PSNPE output async mode - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return Error message - */ -SNPE_API -const char* Snpe_PSNPE_OutputAsyncCallbackParam_GetErrorMsg(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -/** - * @brief Get the ID of an output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return The id of an PSNPE object for output async mode - */ -SNPE_API -size_t Snpe_PSNPE_OutputAsyncCallbackParam_GetID(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - - - -/** - * A typedef to indicate the output callback of PSNPE handle of input-output async mode - */ -typedef void* Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t; - -/** - * @brief Get the data index of an input-output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return The data index for input-output async mode - */ -SNPE_API -size_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetDataIdx(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Execute an input-output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return True if executed successfully with input-output async mode - */ -SNPE_API -int Snpe_PSNPE_InputOutputAsyncCallbackParam_GetExecuteStatus(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the error message during the execution of PSNPE input-output async mode - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return error message - */ -SNPE_API -const char* Snpe_PSNPE_InputOutputAsyncCallbackParam_GetErrorMsg(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the names of output buffers to the network - * - * @param[in] ioacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return Handle of output buffer name list - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetUserBufferNames(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the output buffer map of PSNPE object for input-output async mode - * - * @param[in] ioacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return The reference handle of output ApplicationBufferMap - */ -SNPE_API -Snpe_ApplicationBufferMap_Handle_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetOutputMap_Ref(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the id of the output callback for input-output async mode - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return The id for output callback for input-output async mode - */ -SNPE_API -size_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetID(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * A typedef to indicate the input callback of PSNPE handle of input-output async mode - */ -typedef void* Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t; - -/** - * @brief Get the input list for input callback of input-output async mode - * - * @param[in] ioacpHandle Handle to access the object of input callback of input-output async mode - * - * @return List the inputs - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputs(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t ioiacpHandle); - -/** - * @brief Get the input names for input callback of input-output async mode - * - * @param[in] ioacpHandle Handle to access the object of input callback of input-output async mode - * - * @return List the names of input - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputNames(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t ioiacpHandle); - -/** - * @brief Get the id of the input callback for input-output async mode - * - * @param[in] oacpHandle Handle to access the object of input-output async mode - * - * @return The id of input callback for input-output async mode - */ -SNPE_API -size_t Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetID(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t ioiacpHandle); - -/** - * @brief A struct to indicate userbuffer data type in output callback of input-output async mode - */ -typedef struct{ - /// data for the one output - const uint8_t* data; - /// the data size of this output - size_t size; -} Snpe_UserBufferData_t; - -/** - * @brief Get the output data of the output callback for input-output async mode - * - * @param[in] oacpHandle Handle to access the object of output callback of input-output async mode - * - * @param[in] name The output name of output callback of input-output async mode - * - * @return The output data of output callback for input-output async mode - */ -SNPE_API -Snpe_UserBufferData_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetUserBuffer(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle, - const char* name); -/** - * A typedef to indicate build configuration - */ -typedef void* Snpe_BuildConfig_Handle_t; - -/** - * A typedef to indicate a PSNPE object - */ -typedef void* Snpe_PSNPE_Handle_t; - -/** - * A typedef to indicate if PSNPE object is built in serial or parallel, default = 0 - */ -typedef enum SNPE_API { - SNPE_PSNPE_BUILDMODE_SERIAL = 0, - SNPE_PSNPE_BUILDMODE_PARALLEL = 1 -} Snpe_PSNPE_BuildMode_t; - -/** - * A typedef to indicate if PSNPE objects are executed in sync mode or output async mode or input-output async mode, default = 0 - */ -typedef enum SNPE_API { - SNPE_PSNPE_INPUTOUTPUTTRANSMISSIONMODE_SYNC = 0, - SNPE_PSNPE_INPUTOUTPUTTRANSMISSIONMODE_OUTPUTASYNC = 1, - SNPE_PSNPE_INPUTOUTPUTTRANSMISSIONMODE_INPUTOUTPUTASYNC = 2 -} Snpe_PSNPE_InputOutputTransmissionMode_t; - -// BuildConfig -/** - * @brief Create the object of snpe build config - * - * @return the SNPE build handle - */ -SNPE_API -Snpe_BuildConfig_Handle_t Snpe_BuildConfig_Create(); - -/** - * @brief Release the object of snpe build config - * - * @param[in] buildConfigHandle Handle to access the object of snpe buid config - * - * @return The error of build config result - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_Delete(Snpe_BuildConfig_Handle_t buildConfigHandle); - -/** - * @brief Get the mode of build snpe object, serial or parallel - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The value of Snpe_PSNPE_BuildMode_t - */ -SNPE_API -Snpe_PSNPE_BuildMode_t Snpe_BuildConfig_GetBuildMode(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the mode of build snpe object, serial or parallel - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] buildMode A typedef of Snpe_PSNPE_BuildMode_t - * - * @return The result of setting mode - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetBuildMode(Snpe_BuildConfig_Handle_t bcHandle, Snpe_PSNPE_BuildMode_t buildMode); - -/** - * @brief Set the dlc model - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] dlcHandle A handle of snpe DLC container - * - * @return The result of setting dlc model - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetContainer(Snpe_BuildConfig_Handle_t bcHandle, Snpe_DlContainer_Handle_t dlcHandle); - -/** - * @brief Get dlc container in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of DLC container - */ -SNPE_API -Snpe_DlContainer_Handle_t Snpe_BuildConfig_GetContainer_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output buffer names in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] slHandle A handle of the output layer name list - * - * @return The result of setting output names - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputBufferNames(Snpe_BuildConfig_Handle_t bcHandle, Snpe_StringList_Handle_t slHandle); - -/** - * @brief Get output buffer names in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of output buffer name list. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_BuildConfig_GetOutputBufferNames_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output buffer names in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] slHandle List of tensor names to output. An empty list will result in producing output for the final output tensor of the model. The list will be copied - * - * @return The result of setting output tensors - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputTensors(Snpe_BuildConfig_Handle_t bcHandle, Snpe_StringList_Handle_t slHandle); - -/** - * @brief Get output tensors in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of output tensor list - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_BuildConfig_GetOutputTensors_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set runtime config list for snpe buildConfig - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] rclHandle Handle to access the object of runtime config list - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetRuntimeConfigList(Snpe_BuildConfig_Handle_t bcHandle, Snpe_RuntimeConfigList_Handle_t rclHandle); - -/** - * @brief Get runtime config list for snpe buildConfig - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of runtime config list - */ -SNPE_API -Snpe_RuntimeConfigList_Handle_t Snpe_BuildConfig_GetRuntimeConfigList_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Get input thread number of input data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The number of input thread - */ -SNPE_API -size_t Snpe_BuildConfig_GetInputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set input thread number of input data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] threadNumbers The number of input thread for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle, size_t threadNumbers); - -/** - * @brief Get output thread number of output data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The number of output thread - */ -SNPE_API -size_t Snpe_BuildConfig_GetOutputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output thread number of output data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] threadNumbers The number of output thread for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle, size_t threadNumbers); - -/** - * @brief Set output callback for output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] callbackFunc The ouutput callback function for output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputCallback(Snpe_BuildConfig_Handle_t bcHandle, - void (*callbackFunc)(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t)); -/** - * @brief Set the id of output callback function for output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] id The id of output callback function - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputCallbackID(Snpe_BuildConfig_Handle_t bcHandle, size_t id); - -/** - * @brief Set the inside output callback handle to NULL for output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_ClearOutputCallback(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output callback for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] callbackFunc The output callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputCallback(Snpe_BuildConfig_Handle_t bcHandle, - void (*callbackFunc)(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t)); - -/** - * @brief Set the id of output callback function for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] id The id of output callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputCallbackID(Snpe_BuildConfig_Handle_t bcHandle, size_t id); - -/** - * @brief Set the inside output callback handle to NULL for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_ClearInputOutputCallback(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set input callback for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] callbackFunc The input callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputInputCallback(Snpe_BuildConfig_Handle_t bcHandle, - Snpe_ApplicationBufferMap_Handle_t (*callbackFunc)( - Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t - ) - ); - -/** - * @brief Set the id of input callback function for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] id The id of input callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputInputCallbackID(Snpe_BuildConfig_Handle_t bcHandle, size_t id); - -/** - * @brief Set the inside input callback handle to NULL for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_ClearInputOutputInputCallback(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the input and output transmission mode including sync mode, output async mode and input-output async mode, defult is sync mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] iotMode The typedef of Snpe_PSNPE_InputOutputTransmissionMode_t - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputTransmissionMode(Snpe_BuildConfig_Handle_t bcHandle, - Snpe_PSNPE_InputOutputTransmissionMode_t iotMode); - -/** - * @brief Get the input and output transmission mode including sync mode, output async mode and input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The typedef of Snpe_PSNPE_InputOutputTransmissionMode_t - */ -SNPE_API -Snpe_PSNPE_InputOutputTransmissionMode_t Snpe_BuildConfig_GetInputOutputTransmissionMode(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the profiling level for PSNPE build config, default is SNPE_PROFILING_LEVEL_OFF - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] profilingLevel The typedef of Snpe_ProfilingLevel_t - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetProfilingLevel(Snpe_BuildConfig_Handle_t bcHandle, Snpe_ProfilingLevel_t profilingLevel); - -/** - * @brief Get the profiling level for PSNPE build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The typedef of Snpe_ProfilingLevel_t - */ -SNPE_API -Snpe_ProfilingLevel_t Snpe_BuildConfig_GetProfilingLevel(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief To be deprecated, set the encode value when you want to divide one image into 2 or 4 parts to run, default is 0 which means the input don't need dividing. - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] encode0 The uint64 value of encode0 - * - * @param[in] encode1 The uint64 value of encode1 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEncode(Snpe_BuildConfig_Handle_t bcHandle, uint64_t encode0, uint64_t encode1); - -/** - * @brief To be deprecated, set the encode0 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] encode0 The uint64 value of encode0 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEncode0(Snpe_BuildConfig_Handle_t bcHandle, uint64_t encode0); - -/** - * @brief To be deprecated, set the encode1 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] encode1 The uint64 value of encode1 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEncode1(Snpe_BuildConfig_Handle_t bcHandle, uint64_t encode1); - -/** - * @brief To be deprecated, get the encode0 and encode1 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The uint64 value of encode - */ -SNPE_API -uint64_t* Snpe_BuildConfig_GetEncode(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief To be deprecated, get the encode0 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The uint64 value of encode0 - */ -SNPE_API -uint64_t Snpe_BuildConfig_GetEncode0(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief To be deprecated, get the encode1 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The uint64 value of encode1 - */ -SNPE_API -uint64_t Snpe_BuildConfig_GetEncode1(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set true or false for enabling init cache for snpe build config, enabling init cache = 1 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] enableInitCache True for enabing init cache - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEnableInitCache(Snpe_BuildConfig_Handle_t bcHandle, int enableInitCache); - -/** - * @brief Get the satus of enabling init cache for snpe build config, enabling init cache = 1. - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] enableInitCache True for enabing init cache - * - * @return 1 or 0 for enabling init cache - */ -SNPE_API -int Snpe_BuildConfig_GetEnableInitCache(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Handle needed to access the platformConfig. - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] platformOptions Options as a const char* - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetPlatformOptions(Snpe_BuildConfig_Handle_t bcHandle, const char* platformOptions); - -/** - * @brief Get the optional platform features for snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return Options as a const char* - */ -SNPE_API -const char* Snpe_BuildConfig_GetPlatformOptions(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the path directory of output diag log you want to save - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] diaglogOutputDir The string directory - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetDiaglogOutputDir(Snpe_BuildConfig_Handle_t bcHandle, const char* diaglogOutputDir); - -/** - * @brief Get the path of output diag log - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The string directory - */ -SNPE_API -const char* Snpe_BuildConfig_GetDiaglogOutputDir(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Create the handle of PSNPE object - * - * @return The handle of PSNPE object - */ -SNPE_API -Snpe_PSNPE_Handle_t Snpe_PSNPE_Create(); - -/** - * @brief Release the handle of PSNPE object - * - * @param[in] psnpeHandle Handle to access the PSNPE object - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_Delete(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Build the instance of PSNPE object accorading of snpe build config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_Build(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Execute PSNPE object for sync mode. - * - * @param[in] psnpeHandle Handle to access the PSNPE object - * - * @param[in] inputBufferListHandle Handle to access the input user buffer list - * - * @param[in] outputBufferListHandle Handle to access the output user buffer list - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_Execute(Snpe_PSNPE_Handle_t psnpeHandle, - Snpe_UserBufferList_Handle_t inputBufferListHandle, - Snpe_UserBufferList_Handle_t outputBufferListHandle); - -/** - * @brief Execute PSNPE object for input-output async mode - * - * @param[in] psnpeHandle Handle to access the PSNPE object - * - * @param[in] inputMapHandle Handle to access the input buffer map - * - * @param[in] dataIndex The index of input data - * - * @param[in] isTF8buff If the input buffer is TF8 - * - * @param[in] isTF8Outputbuff If the output buffer is TF8 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_ExecuteInputOutputAsync(Snpe_PSNPE_Handle_t psnpeHandle, - Snpe_StringList_Handle_t inputMapHandle, - size_t dataIndex, - int isTF8buff, - int isTF8Outputbuff); - -/** - * @brief Get the input tensor names for PSNPE object. - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The string list of input tensor names - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_GetInputTensorNames(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the output tensor names for PSNPE object - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The string list of output tensor names - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_GetOutputTensorNames(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the input dimension shape for PSNPE object - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The tensor shape of input dimension - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_PSNPE_GetInputDimensions(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the input dimension shape for the specific input name for PSNPE object - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @param[in] name The name of input data - * - * @return The tensor shape of a specific input name - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_PSNPE_GetInputDimensions_Name(Snpe_PSNPE_Handle_t psnpeHandle, const char* name); - -/** - * @brief Get the number of elements in each dimension for input and output buffer - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @param[in] name The name of input and output buffer - * - * @return Dimension size - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_PSNPE_GetBufferAttributesDims(Snpe_PSNPE_Handle_t psnpeHandle, const char* name); - -/* To be deprecated, please use new api Snpe_PSNPE_RegisterUserMemoryMappedBuffers */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_RegisterIonBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_UserMemoryMap_Handle_t ionBufferMapHandle); - -/* To be deprecated, please use new api Snpe_PSNPE_DeregisterUserMemoryMappedBuffers */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_DeregisterIonBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_StringList_Handle_t ionBufferNames); - -/** - * @brief Register Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferMapHandle A UserMemoryMap of virtual addresses - * - * @note UserBuffer type passed for registration must match the data type of the tensor in the dlc - * For regular UserBuffers SNPE performs an online data conversion (quantization or - * dequantization etc). This is not possible for memory mapped buffers hence can lead to - * issues during execution or accuracy degradation - * - * @return SNPE_SUCCESS upon successful memory mapped buffer registration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_RegisterUserMemoryMappedBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_UserMemoryMap_Handle_t bufferMapHandle); - -/** - * @brief Deregister Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferNamesHandle A StringList of memory mapped buffer names - * - * @return SNPE_SUCCESS upon successful memory mapped buffer deregistration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_DeregisterUserMemoryMappedBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_StringList_Handle_t bufferNamesHandle); - -/** - * @brief Get the error message during the failed execution - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The error message - */ -SNPE_API -const char* Snpe_PSNPE_GetLastErrorString(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the handle of IBufferAttributes - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @param[in] name The name of attribute buffer - * - * @return Handle to access IBufferAttributes - */ -SNPE_API -Snpe_IBufferAttributes_Handle_t Snpe_PSNPE_GetInputOutputBufferAttributes(Snpe_PSNPE_Handle_t psnpeHandle, const char *name); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_PSNPE_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/PSNPE.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/PSNPE.hpp deleted file mode 100644 index bd3af1ac..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/PSNPE.hpp +++ /dev/null @@ -1,537 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include -#include -#include - - -#include "Wrapper.hpp" - - -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/DlVersion.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/DlOptional.hpp" -#include "DlSystem/IBufferAttributes.hpp" -#include "DlSystem/UserMemoryMap.hpp" - -#include "SNPE/UserBufferList.hpp" -#include "SNPE/ApplicationBufferMap.hpp" -#include "SNPE/RuntimeConfigList.hpp" -#include "DlContainer/IDlContainer.hpp" - -#include "SNPE/RuntimeConfigList.hpp" - - -#include "SNPE/PSNPE.h" - -namespace PSNPE{ - -enum BuildMode { - SERIAL = 0, - PARALLEL = 1 -}; -/** - * @brief Input and output transmission mode - */ -enum InputOutputTransmissionMode { - sync = 0, - outputAsync = 1, - inputOutputAsync = 2 -}; - - -struct OutputAsyncCallbackParam : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - - - template - using DataIndexReference = WrapperDetail::GenericConstMemberReference - ; - - - template - using ExecuteStatusReference = WrapperDetail::GenericConstMemberReference - >; - - - static std::string ErrMsgGetter(Snpe_DlVersion_Handle_t handle){ - return Snpe_PSNPE_OutputAsyncCallbackParam_GetErrorMsg(handle); - } - template - using ErrorMsgReference = WrapperDetail::GenericConstMemberReference - ; - - template - using CallbackIDReference = WrapperDetail::GenericConstMemberReference - ; - - - - -public: - OutputAsyncCallbackParam() = delete; - OutputAsyncCallbackParam(OutputAsyncCallbackParam&& other) noexcept - : BaseType(std::move(other)) - { } - - DataIndexReference dataIndex{*this}; - ExecuteStatusReference executeStatus{*this}; - ErrorMsgReference errorMsg{*this}; - - CallbackIDReference callbackID{*this}; -}; - - - -struct InputOutputInputAsyncCallbackParam : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - - - static std::vector GetInputs(HandleType handle){ - DlSystem::StringList inputs(moveHandle(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputs(handle))); - - return std::vector(inputs.begin(), inputs.end()); - } - - template - using InputsReference = WrapperDetail::GenericConstMemberReference - ; - - - static DlSystem::StringList GetInputNames(HandleType handle){ - return moveHandle(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputNames(handle)); - } - template - using InputNamesReference = WrapperDetail::GenericConstMemberReference - ; - - template - using CallbackIDReference = WrapperDetail::GenericConstMemberReference - ; - - -public: - InputOutputInputAsyncCallbackParam() = delete; - InputOutputInputAsyncCallbackParam(InputOutputInputAsyncCallbackParam&& other) noexcept - : BaseType(std::move(other)) - { } - - InputsReference> inputs{*this}; - InputNamesReference inputNames{*this}; - CallbackIDReference callbackID{*this}; - -}; - - - - - -struct InputOutputAsyncCallbackParam : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - - template - using DataIndexReference = WrapperDetail::GenericConstMemberReference - ; - - static bool GetExecuteStatus(HandleType handle){ - return Snpe_PSNPE_InputOutputAsyncCallbackParam_GetExecuteStatus(handle); - } - template - using ExecuteStatusReference = WrapperDetail::GenericConstMemberReference - ; - - static std::string ErrMsgGetter(Snpe_DlVersion_Handle_t handle){ - return Snpe_PSNPE_OutputAsyncCallbackParam_GetErrorMsg(handle); - } - template - using ErrorMsgReference = WrapperDetail::GenericConstMemberReference - ; - - - - // This should work - static ApplicationBufferMap GetOutputMap(HandleType handle){ - return moveHandle(Snpe_PSNPE_InputOutputAsyncCallbackParam_GetOutputMap_Ref(handle), true); - } - - template - using OutputMapReference = WrapperDetail::GenericConstMemberReference - ; - - template - using CallbackIDReference = WrapperDetail::GenericConstMemberReference - ; - -public: - - InputOutputAsyncCallbackParam(InputOutputAsyncCallbackParam&& other) noexcept - : BaseType(std::move(other)) - { } - - DataIndexReference dataIndex{*this}; - OutputMapReference outputMap{*this}; /// OOOH, this will be super tricky to not have a copy every time - ExecuteStatusReference executeStatus{*this}; - ErrorMsgReference errorMsg{*this}; - CallbackIDReference callbackID{*this}; -}; - -/** - * @brief This callback is called when the output data is ready, only use for Output Async mode - */ -using OutputAsyncCallbackFunc = std::function; -/** - * @brief This callback is called when the output data is ready, only use for Output-Input Async mode - */ -using InputOutputAsyncCallbackFunc = std::function; -/** - * @brief This callback is called when the input data is ready,only use for Output-Input Async mode - */ -using InputOutputAsyncInputCallback = std::function(InputOutputInputAsyncCallbackParam)>; - - -struct BuildConfig final { - BuildMode buildMode = BuildMode::SERIAL; ///< Specify build in serial mode or parallel mode - zdl::DlContainer::IDlContainer* container;///< The opened container ptr - zdl::DlSystem::StringList outputBufferNames;///< Specify the output layer name - zdl::DlSystem::StringList outputTensors;///< Specify the output layer name - RuntimeConfigList runtimeConfigList;///< The runtime config list for PSNPE, @see RuntimeConfig - size_t inputThreadNumbers = 1;///< Specify the number of threads used in the execution phase to process input data, only used in inputOutputAsync mode - size_t outputThreadNumbers = 1;///< Specify the number of threads used in the execution phase to process output data, only used in inputOutputAsync and outputAsync mode - OutputAsyncCallbackFunc outputCallback;///< The callback to deal with output data ,only used in outputAsync mode - InputOutputAsyncCallbackFunc inputOutputCallback;///< The callback to deal with output data ,only used in inputOutputAsync mode - InputOutputAsyncInputCallback inputOutputInputCallback;///< The callback to deal with input data ,only used in inputOutputAsync mode - InputOutputTransmissionMode inputOutputTransmissionMode = InputOutputTransmissionMode::sync;///< Specify execution mode - zdl::DlSystem::ProfilingLevel_t profilingLevel = zdl::DlSystem::ProfilingLevel_t::OFF;///< Specify profiling level for Diaglog - uint64_t encode[2] = {0, 0}; - bool enableInitCache = false; - std::string platformOptions; - std::string diaglogOutputDir = "./diaglogs/"; ///< Specify a diaglog output directory to save the generated Diaglog files. - - size_t callbackID{}; -}; - - - - - -class PSNPE : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_PSNPE_Delete}; -// struct BuildConfigInternal : public Wrapper{ -// -// }; -public: - PSNPE() - : BaseType(Snpe_PSNPE_Create()) - { } - -private: - - template - static std::unordered_map& getCallbackMap(){ - static std::unordered_map toret; - return toret; - } - template - static std::mutex& getCallbackMapMutex(){ - static std::mutex mtx; - return mtx; - } - - static void outputCallbackTrampoline(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t paramHandle){ - OutputAsyncCallbackParam param(moveHandle(paramHandle)); - std::function callback; - { - std::lock_guard lk(getCallbackMapMutex()); - callback = getCallbackMap()[param.callbackID]; - } - callback(std::move(param)); - } - static void inputOutputCallbackTrampoline(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t paramHandle){ - InputOutputAsyncCallbackParam param(moveHandle(paramHandle)); - std::function callback; - { - std::lock_guard lk(getCallbackMapMutex()); - callback = getCallbackMap()[param.callbackID]; - } - callback(std::move(param)); - } - - static Snpe_ApplicationBufferMap_Handle_t inputOutputInputCallbackTrampoline( - Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t paramHandle - ){ - InputOutputInputAsyncCallbackParam param(moveHandle(paramHandle)); - - std::function(InputOutputInputAsyncCallbackParam)> callback; - { - std::lock_guard lk(getCallbackMapMutex()); - callback = getCallbackMap()[param.callbackID]; - } - auto abm = callback(std::move(param)); - return WrapperDetail::HandleReleaser::release(*abm); - } - - template - class CallbackIdManager{ - public: - ~CallbackIdManager(){ - clear(); - } - std::pair registerCallback(WrapperCallbackType func){ - size_t id = get(); - - std::lock_guard lk(getCallbackMapMutex()); - getCallbackMap()[id] = std::move(func); - return {id, CapiCallback}; - } - private: - size_t m_CallbackId{}; - - void clear(){ - if(m_CallbackId){ - std::lock_guard lk(getCallbackMapMutex()); - getCallbackMap().erase(m_CallbackId); - } - } - - size_t get(){ - static std::atomic id{0}; - clear(); - m_CallbackId = ++id; - return m_CallbackId; - } - - }; - CallbackIdManager outputCallbackIdManager; - - CallbackIdManager inputOutputCallbackIdManager; - - CallbackIdManager inputOutputInputCallbackIdManager; - - -public: - - - - bool build(BuildConfig& buildConfig) noexcept{ - // Copy the BuildConfig across the CAPI boundary - - Snpe_BuildConfig_Handle_t bcHandle = Snpe_BuildConfig_Create(); - - Snpe_BuildConfig_SetBuildMode(bcHandle, static_cast(buildConfig.buildMode)); - Snpe_BuildConfig_SetContainer(bcHandle, getHandle(buildConfig.container)); - Snpe_BuildConfig_SetOutputBufferNames(bcHandle, getHandle(buildConfig.outputBufferNames)); - Snpe_BuildConfig_SetOutputTensors(bcHandle, getHandle(buildConfig.outputTensors)); - Snpe_BuildConfig_SetRuntimeConfigList(bcHandle, getHandle(buildConfig.runtimeConfigList)); - - Snpe_BuildConfig_SetInputThreadNumbers(bcHandle, buildConfig.inputThreadNumbers); - Snpe_BuildConfig_SetOutputThreadNumbers(bcHandle, buildConfig.outputThreadNumbers); - - - if(buildConfig.outputCallback){ - auto id_callback = outputCallbackIdManager.registerCallback(buildConfig.outputCallback); - Snpe_BuildConfig_SetOutputCallbackID(bcHandle, id_callback.first); - Snpe_BuildConfig_SetOutputCallback(bcHandle, id_callback.second); - } - - if(buildConfig.inputOutputCallback){ - auto id_callback = inputOutputCallbackIdManager.registerCallback(buildConfig.inputOutputCallback); - Snpe_BuildConfig_SetInputOutputCallbackID(bcHandle, id_callback.first); - Snpe_BuildConfig_SetInputOutputCallback(bcHandle, id_callback.second); - } - - if(buildConfig.inputOutputInputCallback){ - auto id_callback = inputOutputInputCallbackIdManager.registerCallback(buildConfig.inputOutputInputCallback); - Snpe_BuildConfig_SetInputOutputInputCallbackID(bcHandle, id_callback.first); - Snpe_BuildConfig_SetInputOutputInputCallback(bcHandle, id_callback.second); - } - - - Snpe_BuildConfig_SetInputOutputTransmissionMode(bcHandle, - static_cast(buildConfig.inputOutputTransmissionMode)); - - Snpe_BuildConfig_SetProfilingLevel(bcHandle, static_cast(buildConfig.profilingLevel)); - Snpe_BuildConfig_SetEncode(bcHandle, buildConfig.encode[0], buildConfig.encode[1]); - Snpe_BuildConfig_SetEnableInitCache(bcHandle, buildConfig.enableInitCache); - Snpe_BuildConfig_SetPlatformOptions(bcHandle, buildConfig.platformOptions.c_str()); - Snpe_BuildConfig_SetDiaglogOutputDir(bcHandle, buildConfig.diaglogOutputDir.c_str()); - - - auto status = Snpe_PSNPE_Build(handle(), bcHandle); - Snpe_BuildConfig_Delete(bcHandle); - - - return status == SNPE_SUCCESS; - } - - /** - * @brief Execute snpe instances in Async Output mode and Sync mode - * - * @param[in] inputBufferList A list of user buffers that contains the input data - * - * @param[in,out] outputBufferList A list of user buffers that will hold the output data - * - */ - bool execute(UserBufferList& inputBufferList, UserBufferList& outputBufferList) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_Execute(handle(), getHandle(inputBufferList), getHandle(outputBufferList)); - } - - /** - * @brief Execute snpe instances in Async Input/Output mode - * - * @param[in]inputMap A map of input buffers that contains input data. The names of buffers - * need to be matched with names retrived through getInputTensorNames() - * - * @param dataIndex Index of the input data - * - * @param isTF8buff Whether prefer to using 8 bit quantized element for inference - * - * @return True if executed successfully; flase, otherwise. - */ - bool executeInputOutputAsync(const DlSystem::StringList& inputMap, size_t dataIndex, bool isTF8buff, bool isTF8Outputbuff) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_ExecuteInputOutputAsync(handle(), getHandle(inputMap), dataIndex, isTF8buff, isTF8Outputbuff); - } - bool executeInputOutputAsync(const std::vector& inputMap, size_t dataIndex, bool isTF8buff, bool isTF8Outputbuff) noexcept{ - DlSystem::StringList sl(inputMap.size()); - for(auto&& e : inputMap) sl.append(e.c_str()); - return executeInputOutputAsync(sl, dataIndex, isTF8buff, isTF8Outputbuff); - } - - bool executeInputOutputAsync(const DlSystem::StringList& inputMap, size_t dataIndex, bool isTF8buff) noexcept{ - return executeInputOutputAsync(inputMap, dataIndex, isTF8buff, isTF8buff); - } - bool executeInputOutputAsync(const std::vector& inputMap, size_t dataIndex, bool isTF8buff) noexcept{ - return executeInputOutputAsync(inputMap, dataIndex, isTF8buff, isTF8buff); - } - - - - /** - * @brief Returns the input layer names of the network. - * - * @return StringList which contains the input layer names - */ - const DlSystem::StringList getInputTensorNames() const noexcept{ - return moveHandle(Snpe_PSNPE_GetInputTensorNames(handle())); - } - - /** - * @brief Returns the output layer names of the network. - * - * @return StringList which contains the output layer names - */ - const DlSystem::StringList getOutputTensorNames() const noexcept{ - return moveHandle(Snpe_PSNPE_GetOutputTensorNames(handle())); - } - - /** - * @brief Returns the input tensor dimensions of the network. - * - * @return TensorShape which contains the dimensions. - */ - const DlSystem::TensorShape getInputDimensions() const noexcept{ - return moveHandle(Snpe_PSNPE_GetInputDimensions(handle())); - } - - const zdl::DlSystem::TensorShape getInputDimensions(const char *name) const noexcept{ - return moveHandle(Snpe_PSNPE_GetInputDimensions_Name(handle(), name)); - } - - /** - * @brief Returns attributes of buffers. - * - * @see zdl::SNPE - * - * @return BufferAttributes of input/output tensor named. - */ - zdl::DlSystem::TensorShape getBufferAttributesDims(const char *name) const noexcept{ - return moveHandle(Snpe_PSNPE_GetBufferAttributesDims(handle(), name)); - } - - DlSystem::Optional getInputOutputBufferAttributes(const char *name) const noexcept{ - return { - new DlSystem::IBufferAttributes(moveHandle(Snpe_PSNPE_GetInputOutputBufferAttributes(handle(), name))), - DlSystem::Optional::LIFECYCLE::POINTER_OWNED - }; - } - /* To be deprecated, please use new api registerMemoryMappedBuffers */ - bool registerIonBuffers(const DlSystem::UserMemoryMap& ionBufferMap) const noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_RegisterIonBuffers(handle(), getHandle(ionBufferMap)); - } - /* To be deprecated, please use new api deregisterMemoryMappedBuffers */ - bool deregisterIonBuffers(const DlSystem::StringList& ionBufferNames) const noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_DeregisterIonBuffers(handle(), getHandle(ionBufferNames)); - } - - bool registerMemoryMappedBuffers(const DlSystem::UserMemoryMap& memoryMappedBufferMap) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_RegisterUserMemoryMappedBuffers(handle(), getHandle(memoryMappedBufferMap)); - } - - bool deregisterMemoryMappedBuffers(const DlSystem::StringList& bufferNames) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_DeregisterUserMemoryMappedBuffers(handle(), getHandle(bufferNames)); - } - - const char* getLastErrorString(){ - return Snpe_PSNPE_GetLastErrorString(handle()); - } - -private: - PSNPE(const PSNPE&) = delete; - PSNPE& operator=(const PSNPE&) = delete; - -}; - -} // ns PSNPE - - - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, BuildMode) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputTransmissionMode) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, OutputAsyncCallbackParam) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputAsyncCallbackParam) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputInputAsyncCallbackParam) - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, OutputAsyncCallbackFunc) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputAsyncCallbackFunc) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputAsyncInputCallback) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, BuildConfig) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, PSNPE) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.h deleted file mode 100644 index 59295d59..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.h +++ /dev/null @@ -1,118 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef _SNPE_RUNTIME_CONFIG_LIST_H_ -#define _SNPE_RUNTIME_CONFIG_LIST_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/DlEnums.h" -#include "DlSystem/RuntimeList.h" -#include "DlSystem/TensorShapeMap.h" - - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void* Snpe_RuntimeConfig_Handle_t; - -SNPE_API -Snpe_RuntimeConfig_Handle_t Snpe_RuntimeConfig_Create(); - -SNPE_API -Snpe_RuntimeConfig_Handle_t Snpe_RuntimeConfig_CreateCopy(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_Delete(Snpe_RuntimeConfig_Handle_t rcHandle); - - -SNPE_API -Snpe_Runtime_t Snpe_RuntimeConfig_GetRuntime(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetRuntime(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_Runtime_t runtime); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetRuntimeList(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_RuntimeList_Handle_t rlHandle); - -SNPE_API -Snpe_RuntimeList_Handle_t Snpe_RuntimeConfig_GetRuntimeList_Ref(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_PerformanceProfile_t Snpe_RuntimeConfig_GetPerformanceProfile(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetPerformanceProfile(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_PerformanceProfile_t perfProfile); - -SNPE_API -int Snpe_RuntimeConfig_GetEnableCPUFallback(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetEnableCPUFallback(Snpe_RuntimeConfig_Handle_t rcHandle, int enableCpuFallback); - - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetInputDimensionsMap(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_TensorShapeMap_Handle_t tsmHandle); - -SNPE_API -Snpe_TensorShapeMap_Handle_t Snpe_RuntimeConfig_GetInputDimensionsMap_Ref(Snpe_RuntimeConfig_Handle_t rcHandle); - - - -typedef void* Snpe_RuntimeConfigList_Handle_t; - -SNPE_API -Snpe_RuntimeConfigList_Handle_t Snpe_RuntimeConfigList_Create(); - -SNPE_API -Snpe_RuntimeConfigList_Handle_t Snpe_RuntimeConfigList_CreateSize(size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_Delete(Snpe_RuntimeConfigList_Handle_t rclHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_PushBack(Snpe_RuntimeConfigList_Handle_t rclHandle, Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_RuntimeConfig_Handle_t Snpe_RuntimeConfigList_At_Ref(Snpe_RuntimeConfigList_Handle_t rclHandle, size_t idx); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_Assign(Snpe_RuntimeConfigList_Handle_t rclSrcHandle, Snpe_RuntimeConfigList_Handle_t rclDstHandle); - -SNPE_API -size_t Snpe_RuntimeConfigList_Size(Snpe_RuntimeConfigList_Handle_t rclHandle); - -SNPE_API -size_t Snpe_RuntimeConfigList_Capacity(Snpe_RuntimeConfigList_Handle_t rclHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_Clear(Snpe_RuntimeConfigList_Handle_t rclHandle); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_RUNTIME_CONFIG_LIST_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.hpp deleted file mode 100644 index faf052c5..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.hpp +++ /dev/null @@ -1,153 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - - -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/RuntimeList.hpp" -#include "DlSystem/TensorShapeMap.hpp" - - -#include "SNPE/RuntimeConfigList.h" - -namespace PSNPE { - - - -struct RuntimeConfig : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_RuntimeConfig_Delete}; - - template - using RuntimeReference = WrapperDetail::GenericMemberReference - , - CastingSetter >; - - - template - using RuntimeListReference = WrapperMemberReference< - RuntimeListType, - Snpe_RuntimeList_Handle_t, - Snpe_RuntimeConfig_GetRuntimeList_Ref, - Snpe_RuntimeConfig_SetRuntimeList - >; - - template - using InputDimensionsMapReference = WrapperMemberReference< - InputDimensionsMapType, - Snpe_TensorShapeMap_Handle_t, - Snpe_RuntimeConfig_GetInputDimensionsMap_Ref, - Snpe_RuntimeConfig_SetInputDimensionsMap - >; - - template - using PerfProfileReference = WrapperDetail::GenericMemberReference - , - CastingSetter >; - - template - using EnableCPUFallbackReference = WrapperDetail::GenericMemberReference - , - CastingSetter >; - -public: - RuntimeConfig() - : BaseType(Snpe_RuntimeConfig_Create()) - { } - RuntimeConfig(const RuntimeConfig& other) - : BaseType(Snpe_RuntimeConfig_CreateCopy(other.handle())) - { } - - RuntimeConfig(RuntimeConfig&& other) noexcept - : BaseType(std::move(other)) - { } - - RuntimeConfig& operator=(RuntimeConfig&& other) noexcept{ - return moveAssign(std::move(other)); - } - - - RuntimeReference runtime{*this, DlSystem::Runtime_t::CPU_FLOAT32}; - RuntimeListReference runtimeList{*this}; - PerfProfileReference perfProfile{*this, DlSystem::PerformanceProfile_t::HIGH_PERFORMANCE}; - InputDimensionsMapReference inputDimensionsMap{*this}; - EnableCPUFallbackReference enableCPUFallback{*this, false}; - -}; - - -class RuntimeConfigList : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_RuntimeConfigList_Delete}; - -public: - RuntimeConfigList() - : BaseType(Snpe_RuntimeConfigList_Create()) - { } - RuntimeConfigList(size_t size) - : BaseType(Snpe_RuntimeConfigList_CreateSize(size)) - { } - - RuntimeConfigList(RuntimeConfigList&& other) noexcept - : BaseType(std::move(other)) - { } - - RuntimeConfigList& operator=(RuntimeConfigList&& other) noexcept{ - return moveAssign(std::move(other)); - } - RuntimeConfigList& operator=(const RuntimeConfigList& other){ - Snpe_RuntimeConfigList_Assign(other.handle(), handle()); - return *this; - } - - - - void push_back(const RuntimeConfig& runtimeConfig){ - Snpe_RuntimeConfigList_PushBack(handle(), getHandle(runtimeConfig)); - } - - RuntimeConfig& operator[](size_t index){ - return *makeReference(Snpe_RuntimeConfigList_At_Ref(handle(), index)); - } - const RuntimeConfig& operator[](size_t index) const{ - return *makeReference(Snpe_RuntimeConfigList_At_Ref(handle(), index)); - } - - size_t size() const noexcept{ - return Snpe_RuntimeConfigList_Size(handle()); - } - size_t capacity() const noexcept{ - return Snpe_RuntimeConfigList_Capacity(handle()); - } - - void clear() noexcept{ - Snpe_RuntimeConfigList_Clear(handle()); - } - -}; - -} // ns PSNPE - - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, RuntimeConfig) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, RuntimeConfigList) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPE.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPE.h deleted file mode 100644 index eb05473a..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPE.h +++ /dev/null @@ -1,336 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _SNPE_SNPE_H_ -#define _SNPE_SNPE_H_ - - -#include "DlSystem/IBufferAttributes.h" -#include "DlSystem/ITensor.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/TensorMap.h" -#include "DlSystem/StringList.h" -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/UserBufferMap.h" -#include "DlSystem/UserMemoryMap.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#include "DiagLog/IDiagLog.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate a SNPE handle - */ -typedef void* Snpe_SNPE_Handle_t; - -/** - * Destroys/frees a SNPE object - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_Delete(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the names of input tensors to the network - * - * To support multiple input scenarios, where multiple tensors are - * passed through execute() in a TensorMap, each tensor needs to - * be uniquely named. The names of tensors can be retrieved - * through this function. - * - * In the case of a single input, one name will be returned. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return A StringList of input tensor names. - * - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetInputTensorNames(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the names of output tensors to the network - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return List of output tensor names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetOutputTensorNames(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the names of output tensor from the input layer name - * - * @param[in] snpeHandle Handle to access the SNPE object - * @param[in] name Layer name - * - * @return Output tensor names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetOutputTensorNamesByLayerName(Snpe_SNPE_Handle_t snpeHandle, const char* name); - - -/** - * @brief Processes the input data and returns the output - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] inputHandle A map of tensors that contains the input data for - * each input. The names of tensors needs to be - * matched with names retrieved through - * getInputTensorNames() - * - * @param[in,out] outputHandle An empty map of tensors that will contain the output - * data of potentially multiple layers (the key - * in the map is the layer name) upon return - * - * @note output TensorMap has to be empty. To forward propagate - * and get results in user-supplied tensors, use - * Snpe_SNPE_ExecuteUserBuffers(). - * - * @return SNPE_SUCCESS upon successful execution - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_ExecuteITensors(Snpe_SNPE_Handle_t snpeHandle, Snpe_TensorMap_Handle_t inputHandle, Snpe_TensorMap_Handle_t outputHandle); - -/** - * @brief Processes the input data and returns the output - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] inputHandle A single tensor contains the input data. - * - * @param[in,out] outputHandle An empty map of tensors that will contain the output - * data of potentially multiple layers (the key - * in the map is the layer name) upon return - * - * @note output TensorMap has to be empty. To forward propagate - * and get results in user-supplied tensors, use - * Snpe_SNPE_ExecuteUserBuffers. - * - * @return SNPE_SUCCESS upon successful execution - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_ExecuteITensor(Snpe_SNPE_Handle_t snpeHandle, Snpe_ITensor_Handle_t inputHandle, Snpe_TensorMap_Handle_t outputHandle); - -/** - * @brief Processes the input data and returns the output, using - * user-supplied buffers - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] inputHandle A map of UserBuffers that contains the input data for - * each input. The names of UserBuffers needs to be - * matched with names retrieved through - * getInputTensorNames() - * - * @param[in,out] outputHandle A map of UserBuffers that will hold the output - * data of potentially multiple layers (the key - * in the map is the UserBuffer name) - * - * @note input and output UserBuffer maps must be fully pre-populated. with - * dimensions matching what the network expects. - * For example, if there are 5 output UserBuffers they all have to be - * present in map. - * - * Caller must guarantee that for the duration of execute(), the buffer - * stored in UserBuffer would remain valid. For more detail on buffer - * ownership and lifetime requirements, please refer to zdl::DlSystem::UserBuffer - * documentation. - * - * @return SNPE_SUCCESS upon successful execution - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_ExecuteUserBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_UserBufferMap_Handle_t inputHandle, Snpe_UserBufferMap_Handle_t outputHandle); - - -/** - * @brief Register Client ION Buffers - * - * @note To be deprecated, please use new api Snpe_SNPE_RegisterUserMemoryMappedBuffers - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] ionBufferMapHandle A UserMemoryMap of virtual addresses - * - * @return SNPE_SUCCESS upon successful ION Buffer registration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_RegisterIonBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_UserMemoryMap_Handle_t ionBufferMapHandle); - -/** - * @brief Deregister Client ION Buffers - * - * @note To be deprecated, please use new api Snpe_SNPE_DeregisterUserMemoryMappedBuffers - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] ionBufferNamesHandle A StringList of ION Buffer names - * - * @return SNPE_SUCCESS upon successful ION Buffer deregistration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_DeregisterIonBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_StringList_Handle_t ionBufferNamesHandle); - -/** - * @brief Register Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferMapHandle A UserMemoryMap of virtual addresses - * - * @note UserBuffer type passed for registration must match the data type of the tensor in the dlc - * For regular UserBuffers SNPE performs an online data conversion (quantization or - * dequantization etc). This is not possible for memory mapped buffers hence can lead to - * issues during execution or accuracy degradation - * - * @return SNPE_SUCCESS upon successful memory mapped buffer registration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_RegisterUserMemoryMappedBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_UserMemoryMap_Handle_t bufferMapHandle); - -/** - * @brief Deregister Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferNamesHandle A StringList of memory mapped buffer names - * - * @return SNPE_SUCCESS upon successful memory mapped buffer deregistration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_DeregisterUserMemoryMappedBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_StringList_Handle_t bufferNamesHandle); - -/** - * @brief Returns the version string embedded at model conversion - * time. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return Model version string, which is a free-form string - * supplied at the time of the conversion - * - */ -SNPE_API -const char* Snpe_SNPE_GetModelVersion(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Returns the dimensions of the input data to the model in the - * form of TensorShape. The dimensions in TensorShape corresponds to - * what the tensor dimensions would need to be for an input tensor to - * the model. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] name input name. - * - * @note Note that this function only makes sense for networks - * that have a fixed input size. For networks in which the - * input size varies with each call of Execute(), this - * function should not be used. - * - * @return a TensorShape that maintains dimensions, - * matching the tensor dimensions for input to the model, - * where the last entry is the fastest varying dimension, etc. - * - * @see Snpe_ITensor_Handle_t - * @see Snpe_TensorShape_Handle_t - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_SNPE_GetInputDimensions(Snpe_SNPE_Handle_t snpeHandle, const char* name); - -/** - * @brief Returns the dimensions of the first input's data to the model in the - * form of TensorShape. The dimensions in TensorShape corresponds to - * what the tensor dimensions would need to be for an input tensor to - * the model. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @note Note that this function only makes sense for networks - * that have a fixed input size. For networks in which the - * input size varies with each call of Execute(), this - * function should not be used. - * - * @return a TensorShape that maintains dimensions, - * matching the tensor dimensions for first input to the model, - * where the last entry is the fastest varying dimension, etc. - * - * @see Snpe_ITensor_Handle_t - * @see Snpe_TensorShape_Handle_t - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_SNPE_GetInputDimensionsOfFirstTensor(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the output layer(s) for the network. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @note The output layers returned by this function may be - * different than those specified when the network was created - * via the @ref CAPI_SNPEBuilder "SNPEBuilder". For example, if the - * network was created in debug mode with no explicit output - * layers specified, this will contain all layers. - * - * - * @return A StringList of output layer names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetOutputLayerNames(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Returns attributes of buffers used to feed input tensors and receive result from output tensors. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] name Tensor name. - * - * @return BufferAttributes of input/output tensor named - */ -SNPE_API -Snpe_IBufferAttributes_Handle_t Snpe_SNPE_GetInputOutputBufferAttributes(Snpe_SNPE_Handle_t snpeHandle, const char *name); - -/** - * @brief . - * - * Get the diagnostic logging interface - * - * @param[in] snpeHandle Handle to access the SNPE object - * - */ -SNPE_API -Snpe_IDiagLog_Handle_t Snpe_SNPE_GetDiagLogInterface_Ref(Snpe_SNPE_Handle_t snpeHandle); - - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPE.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPE.hpp deleted file mode 100644 index d4ad18df..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPE.hpp +++ /dev/null @@ -1,125 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/TensorMap.hpp" -#include "DlSystem/UserBufferMap.hpp" -#include "DlSystem/UserMemoryMap.hpp" -#include "DlSystem/IBufferAttributes.hpp" -#include "DiagLog/IDiagLog.hpp" - -#include "DlSystem/DlOptional.hpp" - - -#include "SNPE/SNPE.h" - -namespace SNPE{ - -class SNPE : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_SNPE_Delete}; - - template - static DlSystem::Optional makeOptional(H handle){ - return DlSystem::Optional(T(moveHandle(handle))); - } -public: - - DlSystem::Optional getInputTensorNames() const noexcept{ - return makeOptional(Snpe_SNPE_GetInputTensorNames(handle())); - } - - DlSystem::Optional getOutputTensorNames() const noexcept{ - return makeOptional(Snpe_SNPE_GetOutputTensorNames(handle())); - } - - DlSystem::StringList getOutputTensorNamesByLayerName(const char *name) const noexcept{ - return moveHandle(Snpe_SNPE_GetOutputTensorNamesByLayerName(handle(), name)); - } - - bool execute(const DlSystem::TensorMap& input, DlSystem::TensorMap& output) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_ExecuteITensors(handle(), getHandle(input), getHandle(output)); - } - - - bool execute(const DlSystem::ITensor* input, DlSystem::TensorMap& output) noexcept{ - if(!input) return false; - return SNPE_SUCCESS == Snpe_SNPE_ExecuteITensor(handle(), getHandle(*input), getHandle(output)); - } - - bool execute(const DlSystem::UserBufferMap& input, const DlSystem::UserBufferMap& output) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_ExecuteUserBuffers(handle(), getHandle(input), getHandle(output)); - } - - - /* To be deprecated, please use new api registerMemoryMappedBuffers */ - bool registerIonBuffers(const DlSystem::UserMemoryMap& ionBufferMap) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_RegisterUserMemoryMappedBuffers(handle(), getHandle(ionBufferMap)); - } - - /* To be deprecated, please use new api deregisterMemoryMappedBuffers */ - bool deregisterIonBuffers(const DlSystem::StringList& ionBufferNames) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_DeregisterUserMemoryMappedBuffers(handle(), getHandle(ionBufferNames)); - } - - bool registerMemoryMappedBuffers(const DlSystem::UserMemoryMap& memoryMappedBufferMap) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_RegisterUserMemoryMappedBuffers(handle(), getHandle(memoryMappedBufferMap)); - } - - bool deregisterMemoryMappedBuffers(const DlSystem::StringList& bufferNames) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_DeregisterUserMemoryMappedBuffers(handle(), getHandle(bufferNames)); - } - - std::string getModelVersion() const{ - auto str = Snpe_SNPE_GetModelVersion(handle()); - return str ? str : ""; - } - - DlSystem::Optional getInputDimensions() const noexcept{ - return makeOptional(Snpe_SNPE_GetInputDimensionsOfFirstTensor(handle())); - } - - DlSystem::Optional getInputDimensions(const char* name) const noexcept{ - return makeOptional(Snpe_SNPE_GetInputDimensions(handle(), name)); - } - - DlSystem::Optional getOutputLayerNames() const noexcept{ - return makeOptional(Snpe_SNPE_GetOutputLayerNames(handle())); - } - - - DlSystem::Optional getInputOutputBufferAttributes(const char* name) const noexcept{ - return DlSystem::Optional( - new DlSystem::IBufferAttributes(moveHandle(Snpe_SNPE_GetInputOutputBufferAttributes(handle(), name))), - DlSystem::Optional::LIFECYCLE::POINTER_OWNED - ); - } - - DlSystem::Optional getDiagLogInterface() noexcept{ - auto diagLogHandle = Snpe_SNPE_GetDiagLogInterface_Ref(handle()); - if(!diagLogHandle) return {}; - // Bind lifespan of this reference to this object - auto toret = makeReference(diagLogHandle); - return {toret, DlSystem::Optional::LIFECYCLE::POINTER_NOT_OWNED}; - } - -private: - SNPE(const SNPE&) = delete; - SNPE& operator=(const SNPE&) = delete; - -}; - -} // ns SNPE - -ALIAS_IN_ZDL_NAMESPACE(SNPE, SNPE) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.h deleted file mode 100644 index 6adcebad..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.h +++ /dev/null @@ -1,334 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _SNPE_BUILDER_H_ -#define _SNPE_BUILDER_H_ - -#include "SNPE/SNPE.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/DlError.h" -#include "DlSystem/IOBufferDataTypeMap.h" -#include "DlSystem/TensorShapeMap.h" -#include "DlSystem/RuntimeList.h" -#include "DlSystem/PlatformConfig.h" -#include "DlContainer/DlContainer.h" - -#ifdef __cplusplus -extern "C" { -#endif - - - -/** - * A typedef to indicate a SNPEBuilder handle - */ -typedef void* Snpe_SNPEBuilder_Handle_t; - -/** - * The builder class for creating SNPE objects. - * Not meant to be extended. - */ - - -/** - * @brief Constructor of NeuralNetwork Builder ith a supplied model. - * - * @param[in] containerHandle A DlContainer holding the model. - * - * @return A new instance of a SNPEBuilder object - * that can be used to configure and build - * an instance of SNPE. - * - */ -SNPE_API -Snpe_SNPEBuilder_Handle_t Snpe_SNPEBuilder_Create(Snpe_DlContainer_Handle_t containerHandle); - -/** - * Destroys/frees a SNPEBuilder object - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_Delete(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle); - -/** - * @brief Requests a performance profile. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] performanceProfile The target performance profile. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetPerformanceProfile(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_PerformanceProfile_t performanceProfile); - -/** - * @brief Sets the profiling level. Default profiling level for - * SNPEBuilder is off. Off and basic only applies to DSP runtime. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] profilingLevel The target profiling level. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetProfilingLevel(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_ProfilingLevel_t profilingLevel); - -/** - * @brief Sets a preference for execution priority. - * - * This allows the caller to give coarse hint to SNPE runtime - * about the priority of the network. SNPE runtime is free to use - * this information to co-ordinate between different workloads - * that may or may not extend beyond SNPE. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] priority The target performance profile. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetExecutionPriorityHint(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_ExecutionPriorityHint_t priority); - -/** - * @brief Sets the layers that will generate output. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] outputLayerNames List of layer names to - * output. An empty list will - * result in only the final - * layer of the model being - * the output layer. The list - * will be copied. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetOutputLayers(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_StringList_Handle_t outputLayerNames); - -/** - * @brief Sets the output tensor names. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] outputTensorNames List of tensor names to - * output. An empty list will - * result in producing output for the final - * output tensor of the model. - * The list will be copied. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetOutputTensors(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_StringList_Handle_t outputTensorNames); - -/** - * @brief Sets whether this neural network will perform inference with - * input from user-supplied buffers, and write output to user-supplied - * buffers. Default behaviour is to use tensors created by - * ITensorFactory. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] bufferMode Boolean whether to use user-supplied buffer or not. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetUseUserSuppliedBuffers(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int bufferMode); - -/** - * @brief Sets the debug mode of the runtime. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] debugMode This enables debug mode for the runtime. It - * does two things. For an empty - * outputLayerNames list, all layers will be - * output. It might also disable some internal - * runtime optimizations (e.g., some networks - * might be optimized by combining layers, - * etc.). - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetDebugMode(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int debugMode); - - - -/** - * @brief Sets network's input dimensions to enable resizing of - * the spatial dimensions of each layer for fully convolutional networks, - * and the batch dimension for all networks. - * - * @param[in] tensorShapeMapHandle : Handle to the map of input names and their new dimensions. - * The new dimensions overwrite the input dimensions - * embedded in the model and then resize each layer - * of the model. If the model contains - * layers whose dimensions cannot be resized e.g FullyConnected, - * exception will be thrown when SNPE instance is actually built. - * In general the batch dimension is always resizable. - * After resizing of layers' dimensions in model based - * on new input dimensions, the new model is revalidated - * against all runtime constraints, whose failures may - * result in cpu fallback situation. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetInputDimensions(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_TensorShapeMap_Handle_t inputDimensionsMapHandle); - -/** - * @brief Sets the mode of init caching functionality. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] mode Boolean. This flag enables/disables the functionality of init caching. - * When init caching functionality is enabled, a set of init caches - * will be created during network building/initialization process - * and will be added to DLC container. If such DLC container is saved - * by the user, in subsequent network building/initialization processes - * these init caches will be loaded from the DLC so as to reduce initialization time. - * In disable mode, no init caches will be added to DLC container. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetInitCacheMode(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int cacheMode); - -/** - * @brief Returns an instance of SNPE based on the current parameters. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @return A new instance of a @ref CAPI_SNPE "SNPE" object that can be used - * to execute models or null if any errors occur. - */ -SNPE_API -Snpe_SNPE_Handle_t Snpe_SNPEBuilder_Build(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle); - -/** - * @brief Sets the platform configuration. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] platformConfig The platform configuration. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetPlatformConfig(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_PlatformConfig_Handle_t platformConfigHandle); - -/** - * @brief Sets network's runtime order of precedence. Example: - * CPU_FLOAT32, GPU_FLOAT16, AIP_FIXED8_TF - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] runtimeListHandle The list of runtime in order of precedence - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetRuntimeProcessorOrder(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief Sets the unconsumed tensors as output - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] setOutput Boolean. This enables unconsumed tensors (i.e) - * outputs which are not inputs to any - * layer (basically dead ends) to be marked - * for output - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetUnconsumedTensorsAsOutputs(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int setOutput); - -/** - * @brief Execution terminated when exceeding time limit. - * Only valid for dsp runtime currently. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] timeout Time limit value in microseconds - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetTimeOut(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, uint64_t timeoutMicroSec); - - -/** - * @brief Sets the datatype of the buffer. - * Only valid for dsp runtime currently. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] dataTypeMapHandle Map of the buffer names and the datatype that needs to be set. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetBufferDataType(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_IOBufferDataTypeMap_Handle_t dataTypeMapHandle); - -/** - * @brief Sets up the entire initialization callflow to - * happen on the user's thread - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] singleThreadedInit Flag indicating user's intent to perform initialization callflow - * on caller's thread. - * When set to 1, initialization will happen on the user's thread - * When set to 0, initialization will happen on a new thread. This is the default - * behavior (analogous to not calling this API) -*/ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetSingleThreadedInit(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int singleThreadedInit); - -/** - * @brief Sets the fixed point execution mode for CPU runtime. - * If a floating point DLC is executed with this option set, the program will be terminated with an exception. - * If a quantized DLC is executed without this option set, the execution will be in floating point mode in CPU. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] cpuFxpMode Boolean If set to true, enables the fixed point mode. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetCpuFixedPointMode( - Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, bool cpuFxpMode); - -/** - * @brief Sets model name for logging - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] modelName String Model name for logging. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetModelName( - Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, const char *modelName); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_BUILDER_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.hpp deleted file mode 100644 index 37995f4e..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.hpp +++ /dev/null @@ -1,136 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - - -#include "Wrapper.hpp" -#include "SNPE.hpp" -#include "DlSystem/RuntimeList.hpp" -#include "DlContainer/IDlContainer.hpp" -#include "DlSystem/PlatformConfig.hpp" -#include "DlSystem/TensorShapeMap.hpp" - -#include "DlSystem/DlEnums.hpp" - -#include "DlSystem/IOBufferDataTypeMap.hpp" - -#include "SNPE/SNPEBuilder.h" - - -namespace SNPE { - -class SNPEBuilder : public Wrapper { - friend BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_SNPEBuilder_Delete}; -public: - - explicit SNPEBuilder(DlContainer::IDlContainer *container) - : BaseType(Snpe_SNPEBuilder_Create(getHandle(container))) - { } - - - SNPEBuilder& setPerformanceProfile(DlSystem::PerformanceProfile_t performanceProfile){ - Snpe_SNPEBuilder_SetPerformanceProfile(handle(), static_cast(performanceProfile)); - return *this; - } - - SNPEBuilder& setProfilingLevel(DlSystem::ProfilingLevel_t profilingLevel){ - Snpe_SNPEBuilder_SetProfilingLevel(handle(), static_cast(profilingLevel)); - return *this; - } - - SNPEBuilder& setExecutionPriorityHint(DlSystem::ExecutionPriorityHint_t priority){ - Snpe_SNPEBuilder_SetExecutionPriorityHint(handle(), static_cast(priority)); - return *this; - } - - SNPEBuilder& setOutputLayers(const DlSystem::StringList& outputLayerNames){ - Snpe_SNPEBuilder_SetOutputLayers(handle(), getHandle(outputLayerNames)); - return *this; - } - - SNPEBuilder& setOutputTensors(const DlSystem::StringList& outputTensorNames){ - Snpe_SNPEBuilder_SetOutputTensors(handle(), getHandle(outputTensorNames)); - return *this; - } - - SNPEBuilder& setUseUserSuppliedBuffers(int bufferMode){ - Snpe_SNPEBuilder_SetUseUserSuppliedBuffers(handle(), bufferMode); - return *this; - } - - SNPEBuilder& setDebugMode(int debugMode){ - Snpe_SNPEBuilder_SetDebugMode(handle(), debugMode); - return *this; - } - - SNPEBuilder& setInputDimensions(const DlSystem::TensorShapeMap& inputDimensionsMap){ - Snpe_SNPEBuilder_SetInputDimensions(handle(), getHandle(inputDimensionsMap)); - return *this; - } - - SNPEBuilder& setInitCacheMode(int cacheMode){ - Snpe_SNPEBuilder_SetInitCacheMode(handle(), cacheMode); - return *this; - } - - SNPEBuilder& setPlatformConfig(const DlSystem::PlatformConfig& platformConfigHandle){ - Snpe_SNPEBuilder_SetPlatformConfig(handle(), getHandle(platformConfigHandle)); - return *this; - } - - SNPEBuilder& setRuntimeProcessorOrder(const DlSystem::RuntimeList& runtimeList){ - Snpe_SNPEBuilder_SetRuntimeProcessorOrder(handle(), getHandle(runtimeList)); - return *this; - } - - SNPEBuilder& setUnconsumedTensorsAsOutputs(int setOutput){ - Snpe_SNPEBuilder_SetUnconsumedTensorsAsOutputs(handle(), setOutput); - return *this; - } - - SNPEBuilder& setTimeOut(uint64_t timeoutMicroSec){ - Snpe_SNPEBuilder_SetTimeOut(handle(), timeoutMicroSec); - return *this; - } - - - SNPEBuilder& setBufferDataType(const DlSystem::IOBufferDataTypeMap& dataTypeMap){ - Snpe_SNPEBuilder_SetBufferDataType(handle(), getHandle(dataTypeMap)); - return *this; - } - - SNPEBuilder& setSingleThreadedInit(int singleThreadedInit){ - Snpe_SNPEBuilder_SetSingleThreadedInit(handle(), singleThreadedInit); - return *this; - } - - SNPEBuilder& setCpuFixedPointMode(bool cpuFxpMode){ - Snpe_SNPEBuilder_SetCpuFixedPointMode(handle(), cpuFxpMode); - return *this; - } - - SNPEBuilder& setModelName(DlSystem::String modelName){ - Snpe_SNPEBuilder_SetModelName(handle(), modelName.c_str()); - return *this; - } - - std::unique_ptr build() noexcept{ - auto h = Snpe_SNPEBuilder_Build(handle()); - return h ? makeUnique(h) : nullptr; - } - -}; - -} // ns SNPE - - -ALIAS_IN_ZDL_NAMESPACE(SNPE, SNPEBuilder) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEFactory.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEFactory.hpp deleted file mode 100644 index 6c2486ee..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEFactory.hpp +++ /dev/null @@ -1,88 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/DlVersion.hpp" -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/IUserBufferFactory.hpp" - - -#include "SNPE/SNPEUtil.h" -#include "DlSystem/DlEnums.h" - -namespace SNPE { - - -class SNPEFactory { -public: - - - static bool isRuntimeAvailable(DlSystem::Runtime_t runtime){ - return Snpe_Util_IsRuntimeAvailable(static_cast(runtime)); - } - - static bool isRuntimeAvailable(DlSystem::Runtime_t runtime, DlSystem::RuntimeCheckOption_t option){ - return Snpe_Util_IsRuntimeAvailableCheckOption(static_cast(runtime), - static_cast(option)); - } - - static DlSystem::ITensorFactory& getTensorFactory(){ - static DlSystem::ITensorFactory iTensorFactory; - return iTensorFactory; - } - - static DlSystem::IUserBufferFactory& getUserBufferFactory(){ - static DlSystem::IUserBufferFactory iUserBufferFactory; - return iUserBufferFactory; - } - - static DlSystem::Version_t getLibraryVersion(){ - return WrapperDetail::moveHandle(Snpe_Util_GetLibraryVersion()); - } - - static bool setSNPEStorageLocation(const char* storagePath){ - return SNPE_SUCCESS == Snpe_Util_SetSNPEStorageLocation(storagePath); - } - - static bool addOpPackage(const std::string& regLibraryPath){ - return SNPE_SUCCESS == Snpe_Util_AddOpPackage(regLibraryPath.c_str()); - } - - static bool isGLCLInteropSupported(){ - return Snpe_Util_IsGLCLInteropSupported(); - } - - static const char* getLastError(){ - return Snpe_Util_GetLastError(); - } - - static bool initializeLogging(const DlSystem::LogLevel_t& level){ - return Snpe_Util_InitializeLogging(static_cast(level)); - } - - static bool initializeLogging(const DlSystem::LogLevel_t& level, const std::string& logPath){ - return Snpe_Util_InitializeLoggingPath(static_cast(level), logPath.c_str()); - } - - static bool setLogLevel(const DlSystem::LogLevel_t& level){ - return Snpe_Util_SetLogLevel(static_cast(level)); - } - - static bool terminateLogging(){ - return Snpe_Util_TerminateLogging(); - } -}; - - -} // ns SNPE - - -ALIAS_IN_ZDL_NAMESPACE(SNPE, SNPEFactory) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEUtil.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEUtil.h deleted file mode 100644 index a3e1d1e1..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/SNPEUtil.h +++ /dev/null @@ -1,354 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _SNPE_UTIL_H_ -#define _SNPE_UTIL_H_ - -#include "SNPE/SNPE.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/ITensor.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlVersion.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * @brief Creates a UserBuffer - * - * @param[in] buffer Pointer to the buffer that the caller supplies - * - * @param[in] bufSize Buffer size, in bytes - * - * @param[in] stridesHandle Total number of bytes between elements in each dimension. - * E.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would have strides of [24, 8, 4]. - * - * @param[in] userBufferEncodingHandle Handle to a UserBufferEncoding object - * - * @note Caller has to ensure that memory pointed to by buffer stays accessible - * for the lifetime of the object created - * - * @return Handle to the created UserBuffer - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_Util_CreateUserBuffer(void *buffer, - size_t bufSize, - Snpe_TensorShape_Handle_t stridesHandle, - Snpe_IUserBuffer_Handle_t userBufferEncodingHandle); - -/** - * @brief Creates a UserBuffer with a provided UserBufferSource - * - * @param[in] buffer Pointer to the buffer that the caller supplies - * - * @param[in] bufSize Buffer size, in bytes - * - * @param[in] stridesHandle Total number of bytes between elements in each dimension. - * E.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would have strides of [24, 8, 4]. - * - * @param[in] userBufferEncodingHandle Handle to a UserBufferEncoding object - * - * @param[in] userBufferSourceHandle Handle to a UserBufferSource object - * - * @return Handle to the created UserBuffer - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_Util_CreateUserBufferFromSource(void *buffer, - size_t bufSize, - Snpe_TensorShape_Handle_t stridesHandle, - Snpe_IUserBuffer_Handle_t userBufferEncodingHandle, - Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -/** - * @brief Creates a UserBuffer - * - * @param[in] buffer Pointer to the buffer that the caller supplies - * - * @param[in] bufSize Buffer size, in bytes - * - * @param[in] stridesHandle Total number of bytes between elements in each dimension. - * E.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would have strides of [24, 8, 4]. - * - * @param[in] userBufferEncodingHandle Reference to an UserBufferEncoding object - * - * @param[in] userBufferSourceHandle Reference to an UserBufferSource object - * - * @note Caller has to ensure that memory pointed to by buffer stays accessible - * for the lifetime of the object created - * - * @return the created UserBuffer - * - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_Util_CreateUserGlBuffer(void *buffer, - size_t bufSize, - Snpe_TensorShape_Handle_t stridesHandle, - Snpe_IUserBuffer_Handle_t userBufferEncodingHandle, - Snpe_IUserBuffer_Handle_t userBufferSourceHandle); - -/** - * Creates a new ITensor with uninitialized data. - * - * ITensor buffer size assumes float32 encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 4 = 24 bytes in memory) - * - * The strides for the tensor will match the tensor dimensions - * (i.e., the tensor data is contiguous in memory). - * - * @param[in] shapeHandle The dimensions for the tensor in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @return The created tensor - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_Util_CreateITensor(Snpe_TensorShape_Handle_t shapeHandle); - -/** - * Create a new ITensor with specific data. - * (i.e. the tensor data is contiguous in memory). This tensor is - * primarily used to create a tensor where tensor size can't be - * computed directly from dimension. One such example is - * NV21-formatted image, or any YUV formatted image - * - * @param[in] shapeHandle The dimensions for the tensor in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] data The actual data with which the Tensor object is filled. - * - * @param[in] dataSize The size of data - * - * @return A handle to the created tensor - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_Util_CreateITensorDataSize(Snpe_TensorShape_Handle_t shapeHandle, const uint8_t* data, size_t dataSize); - -/** - * Create a new ITensor with specific data. - * (i.e. the tensor data is contiguous in memory). This tensor is - * primarily used to create a tensor where tensor size can't be - * computed directly from dimension. One such example is - * NV21-formatted image, or any YUV formatted image - * - * @param[in] shapeHandle The dimensions for the tensor in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] data The actual data with which the Tensor object is filled. - * - * @param[in] dataSize The size of data - * - * @return the created tensor - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_Util_CreateITensor_NV21(Snpe_TensorShape_Handle_t shapeHandle, unsigned char *data, size_t dataSize); - -/** - * Indicates whether the supplied runtime is available on the - * current platform. - * - * @param[in] runtime The target runtime to check. - * - * @return Boolean: Non-zero if the supplied runtime is available; 0 otherwise - * - */ -SNPE_API -int Snpe_Util_IsRuntimeAvailable(Snpe_Runtime_t runtime); - -/** - * Indicates whether the supplied runtime is available on the - * current platform. - * - * @param[in] runtime The target runtime to check. - * - * @param[in] runtimeCheckOption Extent to perform runtime available check. - * - * @return Boolean: Non-zero if the supplied runtime is available; 0 otherwise - * - */ -SNPE_API -int Snpe_Util_IsRuntimeAvailableCheckOption(Snpe_Runtime_t runtime, Snpe_RuntimeCheckOption_t runtimeCheckOption); - - -/** - * Gets the version of the SNPE library. - * - * @return Version of the SNPE library. - * - */ -SNPE_API -Snpe_DlVersion_Handle_t Snpe_Util_GetLibraryVersion(); - -/** - * Set the SNPE storage location for all SNPE instances in this - * process. Note that this may only be called once, and if so - * must be called before creating any SNPE instances. - * - * @param[in] storagePath Absolute path to a directory which SNPE may - * use for caching and other storage purposes. - * - * @return Boolean: Non-zero if the supplied path was succesfully set as - * the SNPE storage location, 0 otherwise. - * - */ -SNPE_API -int Snpe_Util_SetSNPEStorageLocation(const char* storagePath); - -/** - * @brief Register a user-defined op package with SNPE. - * - * @param[in] regLibraryPath Path to the registration library - * that allows clients to register a set of operations that are - * part of the package, and share op info with SNPE - * - * @return Boolean: Non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_AddOpPackage(const char* regLibraryPath ); - -/** - * Indicates whether the OpenGL and OpenCL interoperability is supported - * on GPU platform. - * - * @return Boolean: Non-zero if the OpenGL and OpenCl interop is supported; 0 otherwise - * - */ -SNPE_API -int Snpe_Util_IsGLCLInteropSupported(); - -/** - * @return A string description of the last error - */ -SNPE_API -const char* Snpe_Util_GetLastError(); - -/** - * Initializes logging with the specified log level. - * initializeLogging with level, is used on Android platforms - * and after successful initialization, SNPE - * logs are printed in android logcat logs. - * - * It is recommended to initializeLogging before creating any - * SNPE instances, in order to capture information related to - * core initialization. If this is called again after first - * time initialization, subsequent calls are ignored. - * Also, Logging can be re-initialized after a call to - * terminateLogging API by calling initializeLogging again. - * - * A typical usage of Logging life cycle can be - * initializeLogging() - * any other SNPE API like isRuntimeAvailable() - * * setLogLevel() - optional - can be called anytime - * between initializeLogging & terminateLogging - * SNPE instance creation, inference, destroy - * terminateLogging(). - * - * Please note, enabling logging can have performance impact. - * - * @param[in] level Log level (LOG_INFO, LOG_WARN, etc.). - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_InitializeLogging(Snpe_LogLevel_t level); - -/** - * Initializes logging with the specified log level and log path. - * initializeLogging with level & log path, is used on non Android - * platforms and after successful initialization, SNPE - * logs are printed in std output & into log files created in the - * log path. - * - * It is recommended to initializeLogging before creating any - * SNPE instances, in order to capture information related to - * core initialization. If this is called again after first - * time initialization, subsequent calls are ignored. - * Also, Logging can be re-initialized after a call to - * terminateLogging API by calling initializeLogging again. - * - * A typical usage of Logging life cycle can be - * initializeLogging() - * any other SNPE API like isRuntimeAvailable() - * * setLogLevel() - optional - can be called anytime - * between initializeLogging & terminateLogging - * SNPE instance creation, inference, destroy - * terminateLogging() - * - * Please note, enabling logging can have performance impact - * - * @param[in] level Log level (LOG_INFO, LOG_WARN, etc.). - * - * @param[in] logPath of directory to store logs. - * If path is empty, the default path is "./Log". - * For android, the log path is ignored. - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_InitializeLoggingPath(Snpe_LogLevel_t level, const char* logPath); - -/** - * Updates the current logging level with the specified level. - * setLogLevel is optional, called anytime after initializeLogging - * and before terminateLogging, to update the log level set. - * Log levels can be updated multiple times by calling setLogLevel - * A call to setLogLevel() is ignored if it is made before - * initializeLogging() or after terminateLogging() - * - * @param[in] level Log level (LOG_INFO, LOG_WARN, etc.). - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_SetLogLevel(Snpe_LogLevel_t level); - -/** - * Terminates logging. - * - * It is recommended to terminateLogging after initializeLogging - * in order to disable logging information. - * If this is called before initialization or after first time termination, - * calls are ignored. - * - * @warning Snpe_Util_TerminateLogging() must not be called while another thread is executing. - * In a multi-threaded use case, the individual threads must have a cooperative life cycle - * management strategy for the logger. - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_TerminateLogging(); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_UTIL_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/UserBufferList.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/UserBufferList.h deleted file mode 100644 index e6a42ddb..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/UserBufferList.h +++ /dev/null @@ -1,77 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022,2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef _SNPE_USER_BUFFER_LIST_H_ -#define _SNPE_USER_BUFFER_LIST_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/UserBufferMap.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void* Snpe_UserBufferList_Handle_t; - -SNPE_API -Snpe_UserBufferList_Handle_t Snpe_UserBufferList_Create(); - -SNPE_API -Snpe_UserBufferList_Handle_t Snpe_UserBufferList_CreateCopy(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -Snpe_UserBufferList_Handle_t Snpe_UserBufferList_CreateSize(size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_Delete(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_PushBack(Snpe_UserBufferList_Handle_t userBufferListHandle, - Snpe_UserBufferMap_Handle_t userBufferMapHandle); - -SNPE_API -Snpe_UserBufferMap_Handle_t Snpe_UserBufferList_At_Ref(Snpe_UserBufferList_Handle_t userBufferListHandle, - size_t idx); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_Assign(Snpe_UserBufferList_Handle_t srcUserBufferListHandle, - Snpe_UserBufferList_Handle_t dstUserBufferListHandle); - -SNPE_API -size_t Snpe_UserBufferList_Size(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -size_t Snpe_UserBufferList_Capacity(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_Clear(Snpe_UserBufferList_Handle_t userBufferListHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_USER_BUFFER_LIST_H_ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/UserBufferList.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/UserBufferList.hpp deleted file mode 100644 index fec82dbc..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SNPE/UserBufferList.hpp +++ /dev/null @@ -1,76 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/UserBufferMap.hpp" - -#include "SNPE/UserBufferList.h" - - -namespace PSNPE { - -class UserBufferList : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferList_Delete}; - -public: - UserBufferList() - : BaseType(Snpe_UserBufferList_Create()) - { } - explicit UserBufferList(size_t size) - : BaseType(Snpe_UserBufferList_CreateSize(size)) - { } - - UserBufferList(const UserBufferList& other) - : BaseType(Snpe_UserBufferList_CreateCopy(other.handle())) - { } - UserBufferList(UserBufferList&& other) noexcept - : BaseType(std::move(other)) - { } - - UserBufferList& operator=(const UserBufferList& other){ - if(this != &other){ - Snpe_UserBufferList_Assign(other.handle(), handle()); - } - return *this; - } - UserBufferList& operator=(UserBufferList&& other){ - return moveAssign(std::move(other)); - } - - - void push_back(const DlSystem::UserBufferMap& userBufferMap){ - Snpe_UserBufferList_PushBack(handle(), getHandle(userBufferMap)); - } - - DlSystem::UserBufferMap& operator[](size_t idx){ - return *makeReference(Snpe_UserBufferList_At_Ref(handle(), idx)); - } - - size_t size() const noexcept{ - return Snpe_UserBufferList_Size(handle()); - } - - size_t capacity() const noexcept{ - return Snpe_UserBufferList_Capacity(handle()); - } - - void clear() noexcept{ - Snpe_UserBufferList_Clear(handle()); - } -}; - - -} // ns PSNPE - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, UserBufferList) diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SnpeUdo/UdoBase.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SnpeUdo/UdoBase.h deleted file mode 100644 index f7af604a..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SnpeUdo/UdoBase.h +++ /dev/null @@ -1,546 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SNPE_UDO_BASE_H -#define SNPE_UDO_BASE_H - -#include - -// Provide values to use for API version. -#define API_VERSION_MAJOR 1 -#define API_VERSION_MINOR 6 -#define API_VERSION_TEENY 0 - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -// Defines a bitmask of enum values. -typedef uint32_t SnpeUdo_Bitmask_t; -typedef SnpeUdo_Bitmask_t Udo_Bitmask_t; - -// A string of characters, rather than an array of bytes. -// Assumed to be UTF-8. -typedef char* SnpeUdo_String_t; -typedef SnpeUdo_String_t Udo_String_t; - -// The maximum allowable length of a SnpeUdo_String_t in bytes, -// including null terminator. SNPE will truncate strings longer -// than this. -#define SNPE_UDO_MAX_STRING_SIZE 1024 - -/** - * An enum which holds the various error types. - * The error types are divided to classes : - * 0 - 99 : generic errors - * 100 - 200 : errors related to configuration - * - */ -typedef enum -{ - /// No Error - SNPE_UDO_NO_ERROR = 0, UDO_NO_ERROR = 0, - /// Unsupported value for core type - SNPE_UDO_WRONG_CORE = 1, UDO_WRONG_CORE = 1, - /// Invalid attribute/argument passed into UDO API - SNPE_UDO_INVALID_ARGUMENT = 2, UDO_INVALID_ARGUMENT = 2, - /// Unsupported feature error - SNPE_UDO_UNSUPPORTED_FEATURE = 3, UDO_UNSUPPORTED_FEATURE = 3, - /// Error relating to memory allocation - SNPE_UDO_MEM_ALLOC_ERROR = 4, UDO_MEM_ALLOC_ERROR = 4, - /* Configuration Specific errors */ - /// No op with given attributes available in library - SNPE_UDO_WRONG_OPERATION = 100, UDO_WRONG_OPERATION = 100, - /// Unsupported value for core type in UDO configuration - SNPE_UDO_WRONG_CORE_TYPE = 101, UDO_WRONG_CORE_TYPE = 101, - /// Wrong number of params in UDO definition - SNPE_UDO_WRONG_NUM_OF_PARAMS = 102, UDO_WRONG_NUM_OF_PARAMS = 102, - /// Wrong number of dimensions for tensor(s) in UDO definition - SNPE_UDO_WRONG_NUM_OF_DIMENSIONS = 103, UDO_WRONG_NUM_OF_DIMENSIONS = 103, - /// Wrong number of input tensors in UDO definition - SNPE_UDO_WRONG_NUM_OF_INPUTS = 104, UDO_WRONG_NUM_OF_INPUTS = 104, - /// Wrong number of output tensors in UDO definition - SNPE_UDO_WRONG_NUM_OF_OUTPUTS = 105, UDO_WRONG_NUM_OF_OUTPUTS = 105, - SNPE_UDO_PROGRAM_CACHE_NOT_FOUND = 106, UDO_PROGRAM_CACHE_NOT_FOUND = 106, - SNPE_UDO_UNKNOWN_ERROR = 0xFFFFFFFF, UDO_UNKNOWN_ERROR = 0xFFFFFFFF -} SnpeUdo_ErrorType_t; - -typedef SnpeUdo_ErrorType_t Udo_ErrorType_t; - -/** - * An enum which holds the various data types. - * Designed to be used as single values or combined into a bitfield parameter - * (0x1, 0x2, 0x4, etc) - * \n FIXED_XX types are targeted for data in tensors. - * \n UINT / INT types are targeted for scalar params - */ -typedef enum -{ - /// data type: 16-bit floating point - SNPE_UDO_DATATYPE_FLOAT_16 = 0x01, UDO_DATATYPE_FLOAT_16 = 0x01, - /// data type: 32-bit floating point - SNPE_UDO_DATATYPE_FLOAT_32 = 0x02, UDO_DATATYPE_FLOAT_32 = 0x02, - /// data type: 4-bit fixed point - SNPE_UDO_DATATYPE_FIXED_4 = 0x04, UDO_DATATYPE_FIXED_4 = 0x04, - /// data type: 8-bit fixed point - SNPE_UDO_DATATYPE_FIXED_8 = 0x08, UDO_DATATYPE_FIXED_8 = 0x08, - /// data type: 16-bit fixed point - SNPE_UDO_DATATYPE_FIXED_16 = 0x10, UDO_DATATYPE_FIXED_16 = 0x10, - /// data type: 32-bit fixed point - SNPE_UDO_DATATYPE_FIXED_32 = 0x20, UDO_DATATYPE_FIXED_32 = 0x20, - /// data type: 8-bit unsigned integer - SNPE_UDO_DATATYPE_UINT_8 = 0x100, UDO_DATATYPE_UINT_8 = 0x100, - /// data type: 16-bit unsigned integer - SNPE_UDO_DATATYPE_UINT_16 = 0x200, UDO_DATATYPE_UINT_16 = 0x200, - /// data type: 32-bit unsigned integer - SNPE_UDO_DATATYPE_UINT_32 = 0x400, UDO_DATATYPE_UINT_32 = 0x400, - /// data type: 8-bit signed integer - SNPE_UDO_DATATYPE_INT_8 = 0x1000, UDO_DATATYPE_INT_8 = 0x1000, - /// data type: 16-bit signed integer - SNPE_UDO_DATATYPE_INT_16 = 0x2000, UDO_DATATYPE_INT_16 = 0x2000, - /// data type: 32-bit signed integer - SNPE_UDO_DATATYPE_INT_32 = 0x4000, UDO_DATATYPE_INT_32 = 0x4000, - SNPE_UDO_DATATYPE_LAST = 0xFFFFFFFF, UDO_DATATYPE_LAST = 0xFFFFFFFF -} SnpeUdo_DataType_t; - -typedef SnpeUdo_DataType_t Udo_DataType_t; - -/** - * An enum which holds the various layouts. - * Designed to be used as single values or combined into a bitfield parameter - * (0x1, 0x2, 0x4, etc) - */ -typedef enum -{ - /// data layout (4D): NHWC (batch-height-width-channel) - SNPE_UDO_LAYOUT_NHWC = 0x01, UDO_LAYOUT_NHWC = 0x01, - /// data layout (4D): NCHW (batch-channel-height-width) - SNPE_UDO_LAYOUT_NCHW = 0x02, UDO_LAYOUT_NCHW = 0x02, - /// data layout (5D): NDHWC (batch-depth-height-width-channel) - SNPE_UDO_LAYOUT_NDHWC = 0x04, UDO_LAYOUT_NDHWC = 0x04, - SNPE_UDO_LAYOUT_GPU_OPTIMAL1 = 0x08, UDO_LAYOUT_GPU_OPTIMAL1 = 0x08, - SNPE_UDO_LAYOUT_GPU_OPTIMAL2 = 0x10, UDO_LAYOUT_GPU_OPTIMAL2 = 0x10, - SNPE_UDO_LAYOUT_DSP_OPTIMAL1 = 0x11, UDO_LAYOUT_DSP_OPTIMAL1 = 0x11, - SNPE_UDO_LAYOUT_DSP_OPTIMAL2 = 0x12, UDO_LAYOUT_DSP_OPTIMAL2 = 0x12, - // Indicates no data will be allocated for this tensor. - // Used to specify optional inputs/outputs positionally. - SNPE_UDO_LAYOUT_NULL = 0x13, UDO_LAYOUT_NULL = 0x13, - SNPE_UDO_LAYOUT_LAST = 0xFFFFFFFF, UDO_LAYOUT_LAST = 0xFFFFFFFF -} SnpeUdo_TensorLayout_t; - -typedef SnpeUdo_TensorLayout_t Udo_TensorLayout_t; - -/** - * An enum which holds the UDO library Core type . - * Designed to be used as single values or combined into a bitfield parameter - * (0x1, 0x2, 0x4, etc) - */ -typedef enum -{ - /// Library target IP Core is undefined - SNPE_UDO_CORETYPE_UNDEFINED = 0x00, UDO_CORETYPE_UNDEFINED = 0x00, - /// Library target IP Core is CPU - SNPE_UDO_CORETYPE_CPU = 0x01, UDO_CORETYPE_CPU = 0x01, - /// Library target IP Core is GPU - SNPE_UDO_CORETYPE_GPU = 0x02, UDO_CORETYPE_GPU = 0x02, - /// Library target IP Core is DSP - SNPE_UDO_CORETYPE_DSP = 0x04, UDO_CORETYPE_DSP = 0x04, - SNPE_UDO_CORETYPE_LAST = 0xFFFFFFFF, UDO_CORETYPE_LAST = 0xFFFFFFFF -} SnpeUdo_CoreType_t; - -typedef SnpeUdo_CoreType_t Udo_CoreType_t; - -/** - * An enum to specify the parameter type : Scalar or Tensor - */ -typedef enum -{ - /// UDO static param type: scalar - SNPE_UDO_PARAMTYPE_SCALAR = 0x00, UDO_PARAMTYPE_SCALAR = 0x00, - /// UDO static param type: string - SNPE_UDO_PARAMTYPE_STRING = 0x01, UDO_PARAMTYPE_STRING = 0x01, - /// UDO static param type: tensor - SNPE_UDO_PARAMTYPE_TENSOR = 0x02, UDO_PARAMTYPE_TENSOR = 0x02, - SNPE_UDO_PARAMTYPE_LAST = 0xFFFFFFFF, UDO_PARAMTYPE_LAST = 0xFFFFFFFF -} SnpeUdo_ParamType_t; - -typedef SnpeUdo_ParamType_t Udo_ParamType_t; - -/** - * An enum to specify quantization type - */ -typedef enum -{ - /// Tensor Quantization type: NONE. Signifies unquantized tensor data - SNPE_UDO_QUANTIZATION_NONE = 0x00, UDO_QUANTIZATION_NONE = 0x00, - /// Tensor Quantization type: Tensorflow-style - SNPE_UDO_QUANTIZATION_TF = 0x01, UDO_QUANTIZATION_TF = 0x01, - SNPE_UDO_QUANTIZATION_QMN = 0x02, UDO_QUANTIZATION_QMN = 0x02, - SNPE_UDO_QUANTIZATION_LAST = 0xFFFFFFFF, UDO_QUANTIZATION_LAST = 0xFFFFFFFF -} SnpeUdo_QuantizationType_t; - -typedef SnpeUdo_QuantizationType_t Udo_QuantizationType_t; - -/** - * @brief A struct which is used to provide a version number using 3 values : major, minor, teeny - * - */ -typedef struct -{ - /// version field: major - for backward-incompatible changes - uint32_t major; - /// version field: minor - for backward-compatible feature updates - uint32_t minor; - /// version field: teeny - for minor bug-fixes and clean-up - uint32_t teeny; -} SnpeUdo_Version_t; - -typedef SnpeUdo_Version_t Udo_Version_t; - -/** - * @brief A struct returned from version query, contains the Library version and API version - * - */ -typedef struct -{ - /// Version of UDO library. Controlled by users - SnpeUdo_Version_t libVersion; - /// Version of SNPE UDO API used in compiling library. Determined by SNPE - SnpeUdo_Version_t apiVersion; -} SnpeUdo_LibVersion_t; - -/** - * @brief A struct returned from version query, contains the package version - * - */ -typedef struct -{ - /// Version of UDO API used in package. - Udo_Version_t apiVersion; -} Udo_PkgVersion_t; - -/** - * @brief A union to hold the value of a generic type. Allows defining a parameter struct - * in a generic way, with a "value" location that holds the data regardless of the type. - * - */ -typedef union -{ - /// value type: float - float floatValue; - /// value type: unsigned 32-bit integer - uint32_t uint32Value; - /// value type: signed 32-bit integer - int32_t int32Value; - /// value type: unsigned 16-bit integer - uint16_t uint16Value; - /// value type: signed 16-bit integer - int16_t int16Value; - /// value type: unsigned 8-bit integer - uint8_t uint8Value; - /// value type: signed 8-bit integer - int8_t int8Value; -} SnpeUdo_Value_t; - -typedef SnpeUdo_Value_t Udo_Value_t; - -/** - * @brief A struct which defines a scalar parameter : name, data type, and union of values - * - */ -typedef struct -{ - /// The parameter data type : float, int, etc. - SnpeUdo_DataType_t dataType; - /// a union of specified type which holds the data - SnpeUdo_Value_t dataValue; -} SnpeUdo_ScalarParam_t; - -typedef SnpeUdo_ScalarParam_t Udo_ScalarParam_t; - -/** - * @brief A struct which defines the quantization parameters in case of Tensorflow style quantization - * - */ -typedef struct -{ - /// minimum value of the quantization range of data - float minValue; - /// maximum value of the quantization range of data - float maxValue; -} SnpeUdo_TFQuantize_t; - -typedef SnpeUdo_TFQuantize_t Udo_TFQuantize_t; - -/** - * @brief A struct which defines the quantization type, and union of supported quantization structs - * - */ -typedef struct -{ - /// quantization type (only TF-style currently supported) - SnpeUdo_QuantizationType_t quantizeType; - union - { - /// TF-style min-max quantization ranges - SnpeUdo_TFQuantize_t TFParams; - }; -} SnpeUdo_QuantizeParams_t; - -typedef SnpeUdo_QuantizeParams_t Udo_QuantizeParams_t; - -/** - * @brief A struct which defines the datatype associated with a specified core-type - * This should be used to denote the datatypes for a single tensor info, depending - * on the intended execution core. - * - */ -typedef struct -{ - /// The IP Core - SnpeUdo_CoreType_t coreType; - /// The associated datatype for this coreType - SnpeUdo_DataType_t dataType; -} SnpeUdo_PerCoreDatatype_t; - -typedef SnpeUdo_PerCoreDatatype_t Udo_PerCoreDatatype_t; - -/** - * @brief A struct which defines a tensor parameter : name, data type, layout, quantization, more. - * Also holds a pointer to the tensor data. - * - */ -typedef struct -{ - /// The maximum allowable dimensions of the tensor. The memory held in - /// _tensorData_ is guaranteed to be large enough for this. - uint32_t* maxDimensions; - /// The current dimensions of the tensor. An operation may modify the current - /// dimensions of its output, to indicate cases where the output has been - /// "resized". - /// Note that for static parameters, the current and max dimensions must - /// match. - uint32_t* currDimensions; - /// Quantization params applicable to the tensor. Currently only supports - /// Tensorflow quantization style. - SnpeUdo_QuantizeParams_t quantizeParams; - /// Number of dimensions to the tensor: 3D, 4D, etc. - uint32_t tensorRank; - /// The parameter data type: float, int, etc. - SnpeUdo_DataType_t dataType; - /// The tensor layout type: NCHW, NHWC, etc. - SnpeUdo_TensorLayout_t layout; - /// Opaque pointer to tensor data. User may be required to re-interpret the pointer - /// based on core-specific definitions. - void* tensorData; -} SnpeUdo_TensorParam_t; - -typedef SnpeUdo_TensorParam_t Udo_TensorParam_t; - -/** - * @brief A struct which defines tensor information for activation tensors only - * - * It describes an activation tensor object using its name, the intended layout and the datatype - * it will take depending on the intended runtime core. The repeated field indicates that - * that the tensor info describes several input/output activation tensors, which all share the - * aforementioned properties. - */ -typedef struct -{ - /// The tensor name - SnpeUdo_String_t tensorName; - /// The tensor layout type: NCHW, NHWC, etc. - SnpeUdo_TensorLayout_t layout; - /// The per core datatype: {SNPE_UDO_DATATYPE, SNPE_UDO_CORE_TYPE} - SnpeUdo_PerCoreDatatype_t* perCoreDatatype; - /// A boolean field indicating that this tensorinfo will be repeated e.x for ops such as Concat or Split - bool repeated; - /// A boolean field indicating whether input is static or not. - bool isStatic; -} SnpeUdo_TensorInfo_t; - -typedef SnpeUdo_TensorInfo_t Udo_TensorInfo_t; - -/** - * @brief struct which defines a UDO parameter - a union of scalar, tensor and string parameters - * - */ -typedef struct -{ - /// Type is scalar or tensor - SnpeUdo_ParamType_t paramType; - /// The param name, for example : "offset", "activation_type" - SnpeUdo_String_t paramName; - union - { - /// scalar param value - SnpeUdo_ScalarParam_t scalarParam; - /// tensor param value - SnpeUdo_TensorParam_t tensorParam; - /// string param value - SnpeUdo_String_t stringParam; - }; -} SnpeUdo_Param_t; - -typedef SnpeUdo_Param_t Udo_Param_t; - -/** - * @brief A struct which defines Operation information which is specific for IP core (CPU, GPU, DSP ...) - * - */ -typedef struct -{ - /// The IP Core - SnpeUdo_CoreType_t udoCoreType; - /// Bitmask, defines supported internal calculation types (like FLOAT_32, etc) - /// Based on SnpeUdo_DataType - SnpeUdo_Bitmask_t operationCalculationTypes; -} SnpeUdo_OpCoreInfo_t; - -typedef SnpeUdo_OpCoreInfo_t Udo_OpCoreInfo_t; - -/** - * @brief A struct which defines the common and core-specific Operation information - * - */ -typedef struct -{ - /// Operation type - SnpeUdo_String_t operationType; - /// A bitmask describing which IP Cores (CPU, GPU, DSP ...) support this operation - /// Translated based on SnpeUdo_CoreType - SnpeUdo_Bitmask_t supportedByCores; - /// Number of static parameters defined by the op - uint32_t numOfStaticParams; - /// Array of static parameters. Can be scalar or tensor params - SnpeUdo_Param_t* staticParams; - /// Number of input tensors this op receives - uint32_t numOfInputs; - /// Array of input tensor names to this operation - SnpeUdo_String_t* inputNames; - /// Number of output tensors this op receives - uint32_t numOfOutputs; - /// Array of output tensor names to this operation - SnpeUdo_String_t* outputNames; - /// Number of cores that the op can execute on - uint32_t numOfCoreInfo; - /// Array of per-core information entries - SnpeUdo_OpCoreInfo_t* opPerCoreInfo; - /// Array of input tensor infos for this operation - SnpeUdo_TensorInfo_t* inputInfos; - /// Array of output tensor infos for this operation - SnpeUdo_TensorInfo_t* outputInfos; -} SnpeUdo_OperationInfo_t; - -typedef SnpeUdo_OperationInfo_t Udo_OperationInfo_t; - -/** - * @brief A struct which provides the implementation library info : type, name - * - */ -typedef struct -{ - /// Defines the IP Core that this implementation library is targeting - SnpeUdo_CoreType_t udoCoreType; - /// library name. will be looked at in the standard library path - SnpeUdo_String_t libraryName; -} SnpeUdo_LibraryInfo_t; - -typedef SnpeUdo_LibraryInfo_t Udo_LibraryInfo_t; - -/** - * @brief A struct returned by the registration library and contains information on the UDO package : - * name, operations, libraries, etc. - * - */ -typedef struct -{ - /// A string containing the package name - SnpeUdo_String_t packageName; - /// A bitmask describing supported IP cores (CPU, GPU, DSP ...) - /// Translated based on SnpeUdo_CoreType - SnpeUdo_Bitmask_t supportedCoreTypes; - /// The number of implementation libraries in the package - uint32_t numOfImplementationLib; - /// Array of implementation libraries names/types - SnpeUdo_LibraryInfo_t* implementationLib; - /// A string containing all operation types separated by space - SnpeUdo_String_t operationsString; - /// Number of supported operations - uint32_t numOfOperations; - /// Array of Operation info structs. Each entry describes one - /// Operation (name, params, inputs, outputs) - SnpeUdo_OperationInfo_t* operationsInfo; -} SnpeUdo_RegInfo_t; - -typedef SnpeUdo_RegInfo_t Udo_RegInfo_t; - -/** -* @brief A struct returned by the implementation library and contains information on the -* specific library: name, IP Core, operations, etc. -* -*/ -typedef struct -{ - /// Defines the IP Core that this implementation library is targeting - SnpeUdo_CoreType_t udoCoreType; - /// A string containing the package name - SnpeUdo_String_t packageName; - /// A string containing all operation types separated by space - SnpeUdo_String_t operationsString; - /// Number of supported operations - uint32_t numOfOperations; -} SnpeUdo_ImpInfo_t; - -typedef SnpeUdo_ImpInfo_t Udo_ImpInfo_t; - -/** - * @brief This struct defines an operation. It is used for validation - * or creation of an operation. - * In case of using it for creation, the static params which are tensors - * contain pointers to the real data (weights, for example), and input/output - * tensors also include pointers to the buffers used. - */ -typedef struct -{ - /// The IP Core that the operation is defined for - CPU, GPU, DSP... - SnpeUdo_CoreType_t udoCoreType; - /// Operation type - SnpeUdo_String_t operationType; - /// The number of static parameters provided in the staticParams array. - /// this number has to match the number provided by the UDO Registration library information - uint32_t numOfStaticParams; - /// Array of static parameters - SnpeUdo_Param_t* staticParams; - /// The number of input parameters provided in inputs array. - /// this number has to match the number provided by the UDO Registration library information - uint32_t numOfInputs; - /// Array of input tensors, providing layout, data type, sizes, etc - /// When used to create an operation, also contains the initial location of the data - SnpeUdo_TensorParam_t* inputs; - /// The number of output parameters provided in inputs array. - /// this number has to match the number provided by the UDO Registration library information - uint32_t numOfOutputs; - /// Array of output tensors, providing layout, data type, sizes, etc - /// When used to create an operation, also contains the initial location of the data - SnpeUdo_TensorParam_t* outputs; -} SnpeUdo_OpDefinition_t; - -typedef SnpeUdo_OpDefinition_t Udo_OpDefinition_t; - -/** @} */ /* end_addtogroup c_plus_plus_apis C++ */ - -#endif //SNPE_UDO_BASE_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SnpeUdo/UdoReg.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SnpeUdo/UdoReg.h deleted file mode 100644 index 2166be59..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SnpeUdo/UdoReg.h +++ /dev/null @@ -1,117 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2019-2020 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SNPE_UDO_REG_H -#define SNPE_UDO_REG_H - -#include "SnpeUdo/UdoShared.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * @brief Initialize the shared library's data structures. Calling any other - * library function before this one will result in an error being returned. - * - * @return Error code - */ -SnpeUdo_ErrorType_t -SnpeUdo_initRegLibrary(void); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_InitRegLibraryFunction_t)(void); - -/** - * @brief A function to query the API version of the UDO registration library. - * The function populates a SnpeUdo_LibVersion_t struct, which contains a SnpeUdo_Version_t - * struct for API version and library version. - * - * @param[in, out] version A pointer to struct which contains major, minor, teeny information for - * library and api versions. - * - * @return Error code - */ -SnpeUdo_ErrorType_t -SnpeUdo_getRegLibraryVersion(SnpeUdo_LibVersion_t** version); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_getRegLibraryVersion_t)(SnpeUdo_LibVersion_t** version); - -/** - * @brief Release the shared library's data structures, and invalidate any - * handles returned by the library. The behavior of any outstanding - * asynchronous calls made to this library when this function is called - * are undefined. All library functions (except SnpeUdo_InitRegLibrary) will - * return an error after this function has been successfully called. - * - * It should be possible to call SnpeUdo_InitRegLibrary after calling this - * function, and re-initialize the library. - * - * @return Error code - */ -SnpeUdo_ErrorType_t -SnpeUdo_terminateRegLibrary(void); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_TerminateRegLibraryFunction_t)(void); - - -/** - * @brief A function to query the info on the UDO set. - * The function populates a structure which contains information about - * the package and operations contained in it. - * - * @param[in, out] registrationInfo A struct which contains information on the set of UDOs - * - * @return Error code - * - */ -SnpeUdo_ErrorType_t -SnpeUdo_getRegInfo(SnpeUdo_RegInfo_t** registrationInfo); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_GetRegInfoFunction_t)(SnpeUdo_RegInfo_t** registrationInfo); - -/** - * @brief A function to validate that a set of params is supported by an operation - * The function receives an operation definition struct, and returns if this configuration is - * supported (e.g. if an operation can be created using this configuration) - * - * @param[in] opDefinition A struct of SnpeUdo_OpDefinition type, containing the information needed to - * validate that an operation can be created with this configuration. - * - * @return Error code, indicating is the operation can be created on this set or not. - * - */ -SnpeUdo_ErrorType_t -SnpeUdo_validateOperation(SnpeUdo_OpDefinition_t* opDefinition); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_ValidateOperationFunction_t)(SnpeUdo_OpDefinition_t* opDefinition); - -/** @} */ /* end_addtogroup c_plus_plus_apis C++ */ - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif //SNPE_UDO_REG_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SnpeUdo/UdoShared.h b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SnpeUdo/UdoShared.h deleted file mode 100644 index 816a8a74..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/SnpeUdo/UdoShared.h +++ /dev/null @@ -1,57 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2019-2021 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SNPE_UDO_SHARED_H -#define SNPE_UDO_SHARED_H - -#include "SnpeUdo/UdoBase.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * @brief A function to return the various versions as they relate to the UDO - * The function returns a struct containing the the following: - * libVersion: the version of the implementation library compiled for the UDO. Set by user - * apiVersion: the version of the UDO API used in compiling the implementation library. - * Set by SNPE - * - * @param[in, out] version A pointer to Version struct of type SnpeUdo_LibVersion_t - * - * @return Error code - * - */ -SnpeUdo_ErrorType_t -SnpeUdo_getVersion (SnpeUdo_LibVersion_t** version); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_GetVersionFunction_t) (SnpeUdo_LibVersion_t** version); - -typedef SnpeUdo_GetVersionFunction_t Udo_GetVersionFunction_t; - -#ifdef __cplusplus -} // extern "C" -#endif - -/** @} */ /* end_addtogroup c_plus_plus_apis C++ */ - -#endif // SNPE_UDO_SHARED_H diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/Wrapper.hpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/Wrapper.hpp deleted file mode 100644 index 5f908f15..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inc/zdl/Wrapper.hpp +++ /dev/null @@ -1,449 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#define SNPE_WRAPPER_TYPES - -#include -#include -#include -#include - -#include - -#include - - -#include "DlSystem/DlError.h" - -// Put type aliases in zdl::namespace -#define ALIAS_IN_ZDL_NAMESPACE(ns, type) namespace zdl{ namespace ns { using type = ::ns::type; }} - - -// Uncomment to print info from the Wrapper base class -//#define WRAPPER_DEBUG_PRINTS - - -#ifdef WRAPPER_DEBUG_PRINTS - -#ifdef _MSC_VER -#define WRAPPER_FUNCTION_NAME __FUNCTION__ -#define WRAPPER_TRACE() std::cout << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << std::endl -#define WRAPPER_ETRACE() std::cout << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << std::endl -#else -#define WRAPPER_FUNCTION_NAME __PRETTY_FUNCTION__ -#define WRAPPER_TRACE() std::cout << "\e[33m" << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << "\e[0m" << std::endl -#define WRAPPER_ETRACE() std::cout << "\e[31m" << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << "\e[0m" << std::endl -#endif - -#include -#else -#define WRAPPER_TRACE() do{}while(0) -#define WRAPPER_ETRACE() do{}while(0) -#endif - - -namespace WrapperDetail { - - -template -using GetterFuncType = MemberType(*)(HandleType); - -template -using SetterFuncType = Snpe_ErrorCode_t(*)(HandleType, MemberType); - - - -// Allow Wrappers to have members that require CAPI calls for access -template GetterFunc, - SetterFuncType SetterFunc -> -class GenericMemberReference{ - OwnerType& owner; -public: - - - ~GenericMemberReference() = default; - GenericMemberReference() = delete; - - GenericMemberReference(const GenericMemberReference&) = delete; - GenericMemberReference(GenericMemberReference&&) noexcept = default; - - GenericMemberReference(OwnerType& owner) - : owner{owner} - { } - explicit GenericMemberReference(OwnerType& owner, MemberType member) - : owner{owner} - { - operator=(member); - } - GenericMemberReference& operator=(MemberType member){ - SetterFunc(owner.handle(), member); - return *this; - } - - operator MemberType() const{ - return GetterFunc(owner.handle()); - } - - GenericMemberReference& - operator=(const GenericMemberReference& other){ - return operator=(other.operator MemberType()); - } - - MemberType operator()() const{ - return operator MemberType(); - } - -}; - -// Allow Wrappers to have members that require CAPI calls for access -template GetterFunc -> -class GenericConstMemberReference{ - - OwnerType& owner; - -public: - ~GenericConstMemberReference() = default; - GenericConstMemberReference() = delete; - - GenericConstMemberReference(const GenericConstMemberReference&) = delete; - GenericConstMemberReference(GenericConstMemberReference&&) noexcept = default; - - GenericConstMemberReference(OwnerType& owner) - : owner{owner} - { } - - operator MemberType() const{ - return GetterFunc(owner.handle()); - } - - - template::value,int>::Type=0> - operator const char*() const{ - thread_local std::string tlss; - tlss = operator MemberType(); - return tlss.c_str(); - } - - MemberType operator()() const{ - return operator MemberType(); - } - -}; - - - -// Allows returning references to literals through the CAPI's _Get and _Set functions -template -using GetterIndexedFuncType = MemberType(*)(HandleType, IndexType); - -template -using SetterIndexedFuncType = Snpe_ErrorCode_t(*)(HandleType, IndexType, MemberType); - -template GetterFunc, - SetterIndexedFuncType SetterFunc -> -class MemberIndexedReference{ - OwnerType& owner; - IndexType idx; - -public: - MemberIndexedReference(OwnerType& owner, IndexType idx) - : owner{owner}, - idx{idx} - { } - MemberIndexedReference(const MemberIndexedReference&) noexcept = default; - MemberIndexedReference(MemberIndexedReference&&) noexcept = default; - - MemberIndexedReference& operator=(const MemberIndexedReference&) noexcept = default; - MemberIndexedReference& operator=(MemberIndexedReference&&) noexcept = default; - - MemberIndexedReference operator=(MemberType member){ - SetterFunc(owner.handle(), idx, member); - return *this; - } - - operator MemberType() const{ - return GetterFunc(owner.handle(), idx); - } - -}; - - - -// Allow moving ownership of handles -template -struct HandleMover { - Handle handle; - bool isReference; -}; - -template -HandleMover moveHandle(Handle handle, bool isReference = false){ - return {handle, isReference}; -} - -// Virtual base class to allow for WrapperStorage to hold pointers to any Wrapper type -class WrapperBase{ -public: - virtual ~WrapperBase() = default; -}; - -// Storage type for Wrappers. Will have a set if the CAPI type is capable of creating reference handles -template -struct WrapperStorage{ - Handle handle; - bool isReference; - constexpr WrapperStorage(Handle handle = {}, bool isReference = false) noexcept - : handle{handle}, - isReference{isReference} - { } -}; - -template -struct WrapperStorage{ - Handle handle; - bool isReference; - mutable std::set> referencedObjects; - WrapperStorage(Handle handle = {}, bool isReference = false) noexcept - : handle{handle}, - isReference{isReference} - { } -}; - -// Allow a handle to be unbound from a Wrapper -struct HandleReleaser{ - template - static typename WrapperType::HandleType release(WrapperType& wrapper){ - auto toret = wrapper.m_Storage.handle; - wrapper.m_Storage.handle = {}; - return toret; - } -}; - -} // ns WrapperDetail - - - -// The base class for all Wrappers around the CAPI -// NOTE: This Wrapper class leverages the Curiously Recurring Template Pattern (CRTP) -template -class Wrapper : public WrapperDetail::WrapperBase{ - friend struct WrapperDetail::HandleReleaser; - // Allow certain types to access getHandle() and handle() - template - friend class Wrapper; - - template, - WrapperDetail::SetterIndexedFuncType> - friend class WrapperDetail::MemberIndexedReference; - - template> - friend class WrapperDetail::GenericConstMemberReference; - - template, WrapperDetail::SetterFuncType> - friend class WrapperDetail::GenericMemberReference; - - - -protected: - using HandleType = Handle; - using BaseType = Wrapper; - using DeleteFunctionType = Snpe_ErrorCode_t(*)(Handle); - - using StorageType = WrapperDetail::WrapperStorage; - - - template Getter> - static WrapperValueType CastingGetter(HandleType handle){ - return static_cast(Getter(handle)); - } - template Setter> - static Snpe_ErrorCode_t CastingSetter(HandleType handle, WrapperValueType value){ - return Setter(handle,static_cast(value)); - } - - - template - struct WrapperMemberReference{ - Derived& owner; - - WrapperMemberReference(Derived& owner) - : owner{owner} - { } - WrapperMemberReference(Derived& owner, const RlType& other) - : owner{owner} - { - operator=(other); - } - - WrapperMemberReference& operator=(const RlType& rl){ - Setter(getHandle(owner), getHandle(rl)); - return *this; - } - - operator RlType&() { - return *owner.template makeReference( Getter(getHandle(owner)) ); - } - operator RlType&() const { - return *owner.template makeReference( Getter(getHandle(owner)) ); - } - - RlType& operator()(){ - return operator RlType&(); - } - const RlType& operator()() const{ - return operator RlType&(); - } - }; - - // For Factory/Singleton types, we need a way for the deleter to do nothing - static Snpe_ErrorCode_t NoOpDeleter(Handle){ - return SNPE_SUCCESS; - } - - // Simplify calls to WrapperDetail::moveHandle. Can be removed, but will require updating all calls to moveHandle - template - static WrapperDetail::HandleMover moveHandle(H handle, bool isReference = false){ - return WrapperDetail::moveHandle(handle, isReference); - } - - - HandleType& handle() noexcept{ return m_Storage.handle; } - const HandleType& handle() const noexcept{ return m_Storage.handle; } - - bool isReference() const noexcept{ return m_Storage.isReference; } - - void Dtor(){ - if(!isReference() && !handle()){ - if(Derived::DeleteFunction != NoOpDeleter){ - WRAPPER_ETRACE(); - } - } - if(!isReference() && handle()){ - WRAPPER_TRACE(); -#ifdef WRAPPER_DEBUG_PRINTS - auto status = Derived::DeleteFunction(handle()); - if(status != SNPE_SUCCESS){ - WRAPPER_ETRACE(); - } -#else - Derived::DeleteFunction(handle()); -#endif - - handle() = nullptr; - } else { - WRAPPER_TRACE(); - } - } - -protected: - - // Only compile these if the class creates references. This will save memory and time - template::type=0> - void addReference(WrapperBase* wrapperBase) const{ // accesses mutable member - if(!wrapperBase){ - WRAPPER_ETRACE(); - } - m_Storage.referencedObjects.insert(std::unique_ptr(wrapperBase)); - } - - template::type=0> - T* makeReference(H referenceHandle) const{ - if(!referenceHandle){ - WRAPPER_ETRACE(); - return nullptr; - } - auto refObj = new T(moveHandle(referenceHandle, true)); - addReference(refObj); - return refObj; - } - - // This will be used to access another Wrapped type's handles once handle() is made protected - template - static OtherHandle getHandle(const Wrapper& otherObject){ - return otherObject.handle(); - } - - template - static OtherHandle getHandle(const Wrapper* otherObject){ - if(!otherObject) return {}; - return getHandle(*otherObject); - } - - template - static std::unique_ptr makeUnique(H handle){ - if(!handle) return {}; - return std::unique_ptr(new T(moveHandle(handle))); - } - - -public: - ~Wrapper(){ - Dtor(); - } -protected: - // Only derived types should have access to this - Wrapper(HandleType handle, bool isReference = false) - : m_Storage{handle, isReference} - { WRAPPER_TRACE(); } - -public: - // We should never have an empty wrapper - Wrapper() = delete; - - // Move semantics are essentially free for all wrapper types - Wrapper(Wrapper&& other) noexcept - : m_Storage{std::move(other.m_Storage)} - { - WRAPPER_TRACE(); - other.handle() = nullptr; - } - Wrapper(const Wrapper&) = delete; - - - Wrapper& operator=(Wrapper&& other) noexcept{ - WRAPPER_TRACE(); - if(this != &other){ - std::swap(m_Storage, other.m_Storage); - other.Dtor(); - } - return *this; - } - Wrapper& operator=(const Wrapper&) = delete; - - - // Allow a CAPI handle to be taken over by a Wrapper - Wrapper(WrapperDetail::HandleMover handleMover) noexcept - : Wrapper(handleMover.handle, handleMover.isReference) - { WRAPPER_TRACE(); } - -protected: - // Simplify Derived's move assignment operators - Derived& moveAssign(Derived&& other) noexcept{ WRAPPER_TRACE(); - return static_cast(operator=(std::move(other))); - } - - -private: - StorageType m_Storage; - -}; diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inference.cpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inference.cpp deleted file mode 100644 index 2f31d5c4..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inference.cpp +++ /dev/null @@ -1,193 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "android/log.h" - -#include "hpp/CheckRuntime.hpp" -#include "hpp/SetBuilderOptions.hpp" -#include "hpp/Util.hpp" -#include "LoadContainer.hpp" -#include "CreateUserBuffer.hpp" -#include "LoadInputTensor.hpp" - -#include -#include -#include - -std::unique_ptr snpe; - -std::mutex mtx; -static zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::Runtime_t::CPU; -static zdl::DlSystem::RuntimeList runtimeList; -bool useUserSuppliedBuffers = true; -bool useIntBuffer = false; - -bool execStatus_thread = false; -zdl::DlSystem::UserBufferMap inputMap, outputMap; -std::vector > snpeUserBackedInputBuffers, snpeUserBackedOutputBuffers; -std::unordered_map > applicationOutputBuffers; -std::unordered_map > applicationInputBuffers; -int bitWidth = 32; - - -#include -#include -#include - -std::string build_network(const uint8_t * dlc_buffer, const size_t dlc_size, const char runtime_arg) -{ - std::string outputLogger; - bool usingInitCaching = false; //TODO check with true - - std::unique_ptr container_snpe = nullptr ; - - container_snpe = loadContainerFromBuffer(dlc_buffer, dlc_size); - - if (container_snpe == nullptr) { - LOGE("Error while opening the container file."); - return "Error while opening the container file.\n"; - } - - runtimeList.clear(); - LOGI("runtime arg %c",runtime_arg); - zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::Runtime_t::CPU; - if (runtime_arg == 'D'){ - runtime = zdl::DlSystem::Runtime_t::DSP; - LOGI("Added DSP"); - } - else if (runtime_arg == 'G') - { - runtime = zdl::DlSystem::Runtime_t::GPU_FLOAT32_16_HYBRID; //can be written as GPU - LOGI("Added GPU"); - } - - if(runtime != zdl::DlSystem::Runtime_t::UNSET) - { - bool ret = runtimeList.add(checkRuntime(runtime)); - if(ret == false){ - LOGE("Cannot set runtime"); - return outputLogger + "\nCannot set runtime"; - } - } else { - return outputLogger + "\nCannot set runtime"; - } - - - mtx.lock(); - snpe = setBuilderOptions(container_snpe, runtime, runtimeList, useUserSuppliedBuffers, usingInitCaching); - mtx.unlock(); - - if (snpe == nullptr) { - LOGE("SNPE Prepare failed: Builder option failed"); - outputLogger += "Model Prepare failed"; - return outputLogger + "SNPE Prepare failed"; - } - - outputLogger += "\nModel Network Prepare success !!!\n"; - - //Creating Buffer - createInputBufferMap(inputMap, applicationInputBuffers, snpeUserBackedInputBuffers, snpe, useIntBuffer, bitWidth); - createOutputBufferMap(outputMap, applicationOutputBuffers, snpeUserBackedOutputBuffers, snpe, useIntBuffer, bitWidth); - return outputLogger; -} - -void executeonthread() -{ - if(snpe== nullptr) - LOGE("SNPE IS NULL"); - execStatus_thread = snpe->execute(inputMap, outputMap); -} - -bool executeDLC(cv::Mat &inputimg, cv::Mat &outputimg, float &milli_time, Model *modelobj) { - - LOGI("execute_DLC"); - ATrace_beginSection("preprocessing"); - - struct timeval start_time, end_time; - float seconds, useconds; - - mtx.lock(); - assert(snpe != nullptr); - - if(!loadInputUserBuffer(applicationInputBuffers, snpe, inputimg, inputMap, bitWidth, modelobj)) - { - LOGE("Failed to load Input UserBuffer"); - mtx.unlock(); - return false; - } - - ATrace_endSection(); - gettimeofday(&start_time, NULL); - ATrace_beginSection("inference time"); - - std::thread t1(executeonthread); - //Waiting for SNPE execute to finish - t1.join(); - - bool execStatus = execStatus_thread; -// bool execStatus = snpe->execute(inputMap, outputMap); - ATrace_endSection(); - ATrace_beginSection("postprocessing time"); - gettimeofday(&end_time, NULL); - seconds = end_time.tv_sec - start_time.tv_sec; //seconds - useconds = end_time.tv_usec - start_time.tv_usec; //milliseconds - milli_time = ((seconds) * 1000 + useconds/1000.0); - //LOGI("Inference time %f ms", milli_time); - - if(execStatus== true){ - LOGI("Exec status is true"); - } - else{ - LOGE("Exec status is false"); - mtx.unlock(); - return false; - } - - const auto& outputNamesOpt = snpe->getOutputTensorNames(); - const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; - - const char* name = outputNames.at(0); - - LOGI("outbut buffers: %s", name); - std::vector databuffer = applicationOutputBuffers.at(name); - std::vector dims; - auto bufferAttributesOpt = snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) throw std::runtime_error(std::string("Error obtaining attributes for input tensor ") + name); - - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - int num_dims = bufferShape.rank(); - for(int i=0;ipostprocess(outputimg); - - ATrace_endSection(); - mtx.unlock(); - return true; -} - diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inference_helper.cpp b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inference_helper.cpp deleted file mode 100644 index 950e482e..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/cpp/inference_helper.cpp +++ /dev/null @@ -1,292 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include -#include -#include -#include -#include "android/log.h" - -#include "zdl/SNPE/SNPE.hpp" -#include "zdl/SNPE/SNPEFactory.hpp" -#include "zdl/DlSystem/DlVersion.hpp" -#include "zdl/DlSystem/DlEnums.hpp" -#include "zdl/DlSystem/String.hpp" -#include "zdl/DlContainer/IDlContainer.hpp" -#include "zdl/SNPE/SNPEBuilder.hpp" -#include "zdl/DlSystem/ITensor.hpp" -#include "zdl/DlSystem/StringList.hpp" -#include "zdl/DlSystem/TensorMap.hpp" -#include "zdl/DlSystem/TensorShape.hpp" -#include "DlSystem/ITensorFactory.hpp" - -#include "hpp/LoadInputTensor.hpp" -#include "hpp/Util.hpp" -#include "inference.h" - -bool SetAdspLibraryPath(std::string nativeLibPath) { - nativeLibPath += ";/data/local/tmp/mv_dlc;/vendor/lib/rfsa/adsp;/vendor/dsp/cdsp;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp"; - - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "ADSP Lib Path = %s \n", nativeLibPath.c_str()); - std::cout << "ADSP Lib Path = " << nativeLibPath << std::endl; - - return setenv("ADSP_LIBRARY_PATH", nativeLibPath.c_str(), 1 /*override*/) == 0; -} - - -std::unique_ptr loadContainerFromBuffer(const uint8_t * buffer, const size_t size) -{ - std::unique_ptr container; - container = zdl::DlContainer::IDlContainer::open(buffer, size); - return container; -} - - -zdl::DlSystem::Runtime_t checkRuntime(zdl::DlSystem::Runtime_t runtime) -{ - static zdl::DlSystem::Version_t Version = zdl::SNPE::SNPEFactory::getLibraryVersion(); - - LOGI("SNPE Version = %s", Version.asString().c_str()); //Print Version number - - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(runtime)) { - LOGE("Selected runtime not present. Falling back to GPU."); - runtime = zdl::DlSystem::Runtime_t::GPU; - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(runtime)){ - LOGE("Selected runtime not present. Falling back to CPU."); - runtime = zdl::DlSystem::Runtime_t::CPU; - } - } - - return runtime; -} - -std::unique_ptr setBuilderOptions(std::unique_ptr & container, - zdl::DlSystem::Runtime_t runtime, - zdl::DlSystem::RuntimeList runtimeList, - bool useUserSuppliedBuffers, - bool useCaching) -{ - std::unique_ptr snpe; - zdl::SNPE::SNPEBuilder snpeBuilder(container.get()); - - if(runtimeList.empty()) - { - runtimeList.add(runtime); - } - - std::string platformOptionStr = "useAdaptivePD:ON"; -// if (isSignedStatus == UNSIGNED_PD) { - // use unsignedPD feature for untrusted app. - // platformOptionStr += "unsignedPD:ON"; -// } - zdl::DlSystem::PlatformConfig platformConfig; - bool setSuccess = platformConfig.setPlatformOptions(platformOptionStr); - if (!setSuccess) - LOGE("=========> failed to set platformconfig: %s", platformOptionStr.c_str()); - else - LOGI("=========> platformconfig set: %s", platformOptionStr.c_str()); - - bool isValid = platformConfig.isOptionsValid(); - if (!isValid) - LOGE("=========> platformconfig option is invalid"); - else - LOGI("=========> platformconfig option: valid"); - - - zdl::DlSystem::StringList stringruntime = runtimeList.getRuntimeListNames(); - for (const char *name : stringruntime) - LOGI("runtime sh %s", name); - - snpe = snpeBuilder.setOutputLayers({}) - .setPerformanceProfile(zdl::DlSystem::PerformanceProfile_t::BURST) - .setExecutionPriorityHint( - zdl::DlSystem::ExecutionPriorityHint_t::HIGH) - .setRuntimeProcessorOrder(runtimeList) - .setUseUserSuppliedBuffers(useUserSuppliedBuffers) - .setPlatformConfig(platformConfig) - .setInitCacheMode(useCaching) - .build(); - -// .setCPUFallbackMode(true) -// .setUnconsumedTensorsAsOutputs(true) - return snpe; -} - -// ==============================User Buffer func=================================== // -// ================================================================================= // - - -//CreateUserbuffer INPUT/OUTPUT -void createUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const char * name, - const bool isTfNBuffer, - int bitWidth) -{ - - auto bufferAttributesOpt = snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) throw std::runtime_error(std::string("Error obtaining attributes for input tensor ") + name); - - // calculate the size of buffer required by the input tensor - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - - size_t bufferElementSize = 0; - if (isTfNBuffer) { - bufferElementSize = bitWidth / 8; - } - else { - bufferElementSize = sizeof(float); - } - - // Calculate the stride based on buffer strides. - // Note: Strides = Number of bytes to advance to the next element in each dimension. - // For example, if a float tensor of dimension 2x4x3 is tightly packed in a buffer of 96 bytes, then the strides would be (48,12,4) - // Note: Buffer stride is usually known and does not need to be calculated. - -// 1x128x128x3 -// [196608,1536,12,4] - int num_dims = bufferShape.rank(); //bufferShape rank is generally 1 more than expected, as it add 1 for batchSize, so 320x320x3 will look like 1x320x320x3 - LOGI("num_dims %d",num_dims); - std::vector strides(num_dims); - - //stride [196608 1536 12 4] - //buffershape [ 1 128 128 3] - //stride 4*3*128 - strides[strides.size() - 1] = bufferElementSize; - size_t stride = strides[strides.size() - 1]; - for (size_t i = num_dims - 1; i > 0; i--) { - stride *= bufferShape[i]; - strides[i - 1] = stride; - // LOGI("\nstrides[%d]: %d",i-1,stride); - // LOGI("\nbuffershape[%d]: %d",i,bufferShape[i]); - } - - size_t bufSize=bufferElementSize; - for(int i=0;i userBufferEncoding; - if (isTfNBuffer) - userBufferEncoding = std::unique_ptr( - new zdl::DlSystem::UserBufferEncodingTfN(0, 1.0, bitWidth)); - else - userBufferEncoding = std::unique_ptr( - new zdl::DlSystem::UserBufferEncodingFloat()); - - // create user-backed storage to load input data onto it - applicationBuffers.emplace(name, std::vector(bufSize)); - - // create SNPE user buffer from the user-backed buffer - zdl::DlSystem::IUserBufferFactory &ubFactory = zdl::SNPE::SNPEFactory::getUserBufferFactory(); - snpeUserBackedBuffers.push_back( - ubFactory.createUserBuffer(applicationBuffers.at(name).data(), - bufSize, - strides, - userBufferEncoding.get())); - if (snpeUserBackedBuffers.back() == nullptr) - throw std::runtime_error(std::string("Error while creating user buffer.")); - - // add the user-backed buffer to the inputMap, which is later on fed to the network for execution - userBufferMap.add(name, snpeUserBackedBuffers.back().get()); - -} - -/* - Cretae OutPut Buffer Map - */ -void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - bool isTfNBuffer, - int bitWidth) -{ - //LOGI("Creating Output Buffer"); - const auto& outputNamesOpt = snpe->getOutputTensorNames(); - if (!outputNamesOpt) throw std::runtime_error("Error obtaining output tensor names"); - const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; - - // create SNPE user buffers for each application storage buffer - for (const char *name : outputNames) { - LOGI("Creating output buffer %s", name); - createUserBuffer(outputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name, isTfNBuffer, bitWidth); - } -} -/* - * Create Input Buffer Map - */ -void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - bool isTfNBuffer, - int bitWidth) { - //LOGI("Creating Input Buffer"); - const auto &inputNamesOpt = snpe->getInputTensorNames(); - if (!inputNamesOpt) throw std::runtime_error("Error obtaining input tensor names"); - const zdl::DlSystem::StringList &inputNames = *inputNamesOpt; - assert(inputNames.size() > 0); - - // create SNPE user buffers for each application storage buffer - for (const char *name: inputNames) { - LOGI("Creating Input Buffer = %s", name); - createUserBuffer(inputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name, - isTfNBuffer, bitWidth); - } -} - -//Preprocessing and loading in application Input Buffer -bool loadInputUserBuffer(std::unordered_map>& applicationBuffers, - std::unique_ptr& snpe, - cv::Mat &img, - zdl::DlSystem::UserBufferMap& inputMap, - int bitWidth, Model *modelobj) { - - // get input tensor names of the network that need to be populated - const auto &inputNamesOpt = snpe->getInputTensorNames(); - if (!inputNamesOpt) throw std::runtime_error("Error obtaining input tensor names"); - const zdl::DlSystem::StringList &inputNames = *inputNamesOpt; - assert(inputNames.size() > 0); - - if (inputNames.size()) LOGI("Preprocessing and loading in application Input Buffer"); - - - for (size_t j = 0; j < inputNames.size(); j++) { - const char *name = inputNames.at(j); - LOGI("Filling %s buffer ", name); - - if(bitWidth == 8 || bitWidth == 16) { - LOGE("bitwidth 8 and 16 are NOT DEFINED"); - return false; - } else { - - std::vector dims; - auto bufferAttributesOpt = snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) throw std::runtime_error(std::string("Error obtaining attributes for input tensor ") + name); - - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - int num_dims = bufferShape.rank(); - for(int i=0;ipreprocess(applicationBuffers.at(name), img, dims); //functions loads data in applicationBuffer - } - } - return true; -} diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/ic_launcher-playstore.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/ic_launcher-playstore.png deleted file mode 100644 index 63790b35..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/ic_launcher-playstore.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/java/com/qcom/aistack_lowlightenhance/SNPEActivity.java b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/java/com/qcom/aistack_lowlightenhance/SNPEActivity.java deleted file mode 100644 index 8d6c6642..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/java/com/qcom/aistack_lowlightenhance/SNPEActivity.java +++ /dev/null @@ -1,348 +0,0 @@ -// -*- mode: java -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -package com.qcom.aistack_lowlightenhance; - -import android.graphics.Bitmap; -import android.graphics.BitmapFactory; -import android.os.Bundle; -import android.view.MotionEvent; -import android.view.View; -import android.view.WindowManager; -import android.widget.AdapterView; -import android.widget.ArrayAdapter; -import android.widget.ImageView; -import android.widget.ProgressBar; -import android.widget.RadioButton; -import android.widget.RadioGroup; -import android.widget.Spinner; -import android.widget.TextView; -import android.widget.Toast; - -import androidx.appcompat.app.AppCompatActivity; - -import org.opencv.android.OpenCVLoader; - -import java.io.IOException; -import java.io.InputStream; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -public class SNPEActivity extends AppCompatActivity { - - static { - System.loadLibrary("ImageEnhancement"); - OpenCVLoader.initDebug(); - } - - SNPEHelper mSnpeHelper; - Boolean mNetworkLoaded; - - float infer_time=0.0f; - - public static InputStream originalFile = null; - - //creating objects for UI element used in layout files (activity_snpe.xml) - TextView txt_stat, tx_pr, tx_out, tx_sug; - ImageView imageView, imageView2; - RadioGroup radioGroup; - Bitmap bmps = null; - Bitmap outbmps = null; - Spinner inputImageSpin; - Spinner modelspin; - final String[] options = {"No Selection","Sample1.jpg","Sample2.jpg"}; //Image filenames on which model inference is made - final String[] modeldlcname = {"None", "ruas_Q.dlc", "sci_difficult_Q.dlc", "StableLLVE_Q.dlc", "quant_zeroDCE_640_480_212_8550_out80.dlc"}; - final String[] modeloptions = { "No Selection", "RUAS", "SCI", "StableLLVE", "ZeroDCE"}; - protected void executeRadioButton(int checkedId) { - - ProgressBar progressBar; - progressBar = findViewById(R.id.indeterminateBar); - ExecutorService service = Executors.newSingleThreadExecutor(); - progressBar.setVisibility(View.VISIBLE); - getWindow().setFlags(WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE, - WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE); - - service.execute(new Runnable() { - @Override - public void run() { - try { - boolean status = false; - String timestr = null; - switch (checkedId) { - case R.id.rb1: - // set text for your textview here - System.out.println("CPU instance running"); - - status = process(bmps, 'C', modeldlcname[modelspin.getSelectedItemPosition()]); - timestr = "CPU inference time : " + infer_time + " ms"; - break; - - case R.id.rb2: - // set text for your textview here - System.out.println("GPU instance running"); - - status = process(bmps, 'G', modeldlcname[modelspin.getSelectedItemPosition()]); - timestr = "GPU inference time : " + infer_time + " ms"; - break; - - case R.id.rb3: - System.out.println("DSP instance running"); - - status = process(bmps, 'D', modeldlcname[modelspin.getSelectedItemPosition()]); - timestr = "DSP Inference time : " + infer_time + "ms"; - break; - - default: - System.out.println("Do Nothing"); - break; - - } - - boolean final_status = status; - final String final_timestr = timestr; - runOnUiThread(new Runnable() { - @Override - public void run() { - txt_stat.setText(final_timestr); - progressBar.setVisibility(View.INVISIBLE); - - //making UI responsive - getWindow().clearFlags(WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE); - - if (final_status) { - imageView2.setImageBitmap(outbmps); - imageView2.setVisibility(View.VISIBLE); - txt_stat.setVisibility(View.VISIBLE); - tx_pr.setVisibility(View.INVISIBLE); - tx_out.setVisibility(View.VISIBLE); - tx_sug.setVisibility(View.VISIBLE); - } - } - }); - } - catch(Exception e) - { - getWindow().clearFlags(WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE); - e.printStackTrace(); - } - } - - }); - } - @Override - protected void onCreate(Bundle savedInstanceState) { - super.onCreate(savedInstanceState); - setContentView(R.layout.activity_snpe); - txt_stat = findViewById(R.id.textView4); - imageView = findViewById(R.id.im1); - imageView2 = findViewById(R.id.im2); - radioGroup = findViewById(R.id.rg1); - inputImageSpin = findViewById((R.id.spinner)); - modelspin = findViewById((R.id.spinner7)); - tx_pr = findViewById(R.id.textView); - tx_out = findViewById(R.id.textView2); - tx_sug = findViewById(R.id.textView_suggest); - imageView2.setVisibility(View.INVISIBLE); - tx_out.setVisibility(View.INVISIBLE); - tx_sug.setVisibility(View.INVISIBLE); - - - imageView2.setOnTouchListener((view, motionEvent) -> { - switch (motionEvent.getAction()) { - case MotionEvent.ACTION_DOWN: { - imageView2.setVisibility(View.INVISIBLE); - System.out.println("MotionEvent.ACTION_DOWN"); - tx_out.setVisibility(View.INVISIBLE); - tx_pr.setVisibility(View.VISIBLE); - break; - } - case MotionEvent.ACTION_UP: { - imageView2.setVisibility(View.VISIBLE); - System.out.println("MotionEvent.ACTION_UP"); - tx_out.setVisibility(View.VISIBLE); - tx_pr.setVisibility(View.INVISIBLE); - break; - } - } - return false; - }); - - ArrayAdapter ad = new ArrayAdapter(this, android.R.layout.simple_spinner_item, options); - ad.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); - inputImageSpin.setAdapter(ad); - - ArrayAdapter ad7 = new ArrayAdapter(this, android.R.layout.simple_spinner_item, modeloptions); - ad7.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); - modelspin.setAdapter(ad7); - - // Listener to check the change in HW accelerator input in APP UI - radioGroup.setOnCheckedChangeListener(new RadioGroup.OnCheckedChangeListener() { - @Override - public void onCheckedChanged(RadioGroup group, int checkedId) { - if (!inputImageSpin.getSelectedItem().toString().equals("No Selection") && !modelspin.getSelectedItem().toString().equals("No Selection")){ - executeRadioButton(checkedId); - } - else if (checkedId!=-1 && inputImageSpin.getSelectedItem().toString().equals("No Selection") && modelspin.getSelectedItem().toString().equals("No Selection")){ - Toast.makeText(getApplicationContext(), "Please select model and image", Toast.LENGTH_SHORT).show(); - } - else if (checkedId!=-1 && inputImageSpin.getSelectedItem().toString().equals("No Selection")) - { - Toast.makeText(getApplicationContext(), "Please select image to model ", Toast.LENGTH_SHORT).show(); - } - else if(checkedId!=-1 && modelspin.getSelectedItem().toString().equals("No Selection")) - { - Toast.makeText(getApplicationContext(), "Please select appropriate model ", Toast.LENGTH_SHORT).show(); - } - } - }); - - modelspin.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() { - @Override - public void onItemSelected(AdapterView parent, View view, int position, long id) { - - //Model and Runtime is mentioned - if (!parent.getItemAtPosition(position).equals("No Selection") && !inputImageSpin.getSelectedItem().toString().equals("No Selection")) {//if no selection of image - txt_stat.setText("Stats"); - - //Loading assets - try { - originalFile = getAssets().open((String) inputImageSpin.getSelectedItem().toString()); - } catch (IOException e) { - e.printStackTrace(); - } - - // Convert input image to Bitmap - bmps = BitmapFactory.decodeStream(originalFile); - try { - // Set the input image in UI view - imageView.setImageBitmap(bmps); - System.out.println("modelspin: INPUT wxh:"+bmps.getWidth()+"-----"+bmps.getHeight()); - } catch (Exception e) { - e.printStackTrace(); - } - - int checkedID_RB = radioGroup.getCheckedRadioButtonId(); - if (originalFile!=null && bmps!=null && checkedID_RB !=-1){ - executeRadioButton(checkedID_RB); - } - } - else if (!inputImageSpin.getSelectedItem().toString().equals("No Selection")) { - - try { - originalFile = getAssets().open((String) inputImageSpin.getSelectedItem().toString()); - // Set the input image in UI view - imageView.setImageBitmap(BitmapFactory.decodeStream(originalFile)); - imageView2.setImageResource(R.drawable.ic_launcher_background); - imageView2.setVisibility(View.INVISIBLE); - - } catch (Exception e) { - e.printStackTrace(); - } - - } - else{ - originalFile=null; - bmps=null; - imageView.setImageResource(R.drawable.ic_launcher_background); - imageView2.setImageResource(R.drawable.ic_launcher_background); - imageView2.setVisibility(View.INVISIBLE); - txt_stat.setText("Stats"); - radioGroup.clearCheck(); - } - } - @Override - public void onNothingSelected(AdapterView parent) { - System.out.println("Nothing"); - } - }); - - inputImageSpin.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() { - @Override - public void onItemSelected(AdapterView parent, View view, int position, long id) { - - //If model and image is selected - if (!parent.getItemAtPosition(position).equals("No Selection") && !modelspin.getSelectedItem().toString().equals("No Selection")) {//if no selection of image - txt_stat.setText("Stats"); - try { - // loading picture from assets... - originalFile = getAssets().open((String) parent.getItemAtPosition(position)); - } catch (IOException e) { - e.printStackTrace(); - } - - // Convert input image to Bitmap - bmps = BitmapFactory.decodeStream(originalFile); - try { - // Set the input image in UI view - imageView.setImageBitmap(bmps); - - System.out.println("INPUT wxh: "+bmps.getWidth()+"-----"+bmps.getHeight()); - } catch (Exception e) { - e.printStackTrace(); - } - int checkedID_RB = radioGroup.getCheckedRadioButtonId(); - if (originalFile!=null && bmps!=null && checkedID_RB !=-1){ - executeRadioButton(checkedID_RB); - } - } - //if only input image is selected - else if (!inputImageSpin.getSelectedItem().toString().equals("No Selection")) { - - try { - originalFile = getAssets().open((String) inputImageSpin.getSelectedItem().toString()); - - // Set the input image in UI view - imageView.setImageBitmap(BitmapFactory.decodeStream(originalFile)); - imageView2.setImageResource(R.drawable.ic_launcher_background); - imageView2.setVisibility(View.INVISIBLE); - - - } catch (Exception e) { - e.printStackTrace(); - } - } - else{ - originalFile=null; - bmps=null; - imageView.setImageResource(R.drawable.ic_launcher_background); - imageView2.setImageResource(R.drawable.ic_launcher_background); - imageView2.setVisibility(View.INVISIBLE); - txt_stat.setText("Stats"); - radioGroup.clearCheck(); - } - } - @Override - public void onNothingSelected(AdapterView parent) { - System.out.println("Nothing"); - } - }); - } - - //Function to load model and get inference from it - public boolean process(Bitmap bmps, char runtime_var, String dlc_name) { - - mSnpeHelper = new SNPEHelper(getApplication()); - - mNetworkLoaded = mSnpeHelper.loadingMODELS(runtime_var, dlc_name); - - if (mNetworkLoaded) - { - outbmps = mSnpeHelper.snpeInference(bmps); - infer_time = mSnpeHelper.getInfer_time(); - } - - if (outbmps == null) - { - System.out.println("outbmps is null"); - return false; - } - return true; - } -} - diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/java/com/qcom/aistack_lowlightenhance/SNPEHelper.java b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/java/com/qcom/aistack_lowlightenhance/SNPEHelper.java deleted file mode 100644 index 44079fc0..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/java/com/qcom/aistack_lowlightenhance/SNPEHelper.java +++ /dev/null @@ -1,92 +0,0 @@ -// -*- mode: java -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -package com.qcom.aistack_lowlightenhance; - -import static android.graphics.Color.rgb; - -import android.app.Application; -import android.content.res.AssetManager; -import android.graphics.Bitmap; - -import org.opencv.android.Utils; -import org.opencv.core.Mat; - -public class SNPEHelper { - private final Application mApplication; - private AssetManager assetManager; - - private float infer_time=0; - - // Constructor - public SNPEHelper(Application application) { - mApplication = application; - } - public float getInfer_time() - {return infer_time;} - - //Native functions - public native String queryRuntimes(String a); - public native String initSNPE(AssetManager assetManager, char a, String dlc_name); - public native float inferSNPE(long inputmataddress, long outputmataddress); - - /** - * This method loads ML models on selected runtime - */ - public boolean loadingMODELS(char runtime_var, String dlc_name) { - - assetManager = mApplication.getAssets(); - String nativeDirPath = mApplication.getApplicationInfo().nativeLibraryDir; - String res_query = queryRuntimes(nativeDirPath); - System.out.println(res_query); - String init_str = initSNPE(assetManager, runtime_var, dlc_name); - System.out.println("RESULT:"+init_str); - - int success_count = init_str.split("success", -1).length -1; - - if(success_count==1) - { - System.out.println("Model built successfully"); - return true; - } - - return false; - } - - /* - This method makes inference on bitmap. - */ - public Bitmap snpeInference(Bitmap modelInputBitmap) { - - try{ - - Mat inputMat = new Mat(); - Utils.bitmapToMat(modelInputBitmap, inputMat); - - Mat outputMat = new Mat(); - - infer_time = inferSNPE(inputMat.getNativeObjAddr(), outputMat.getNativeObjAddr()); - - - if(infer_time==0.0) - System.out.println("ERROR"); - else - { - Bitmap outputBitmap = Bitmap.createBitmap(outputMat.cols(), outputMat.rows(), Bitmap.Config.ARGB_8888); - Utils.matToBitmap(outputMat,outputBitmap); - return outputBitmap; - } - }catch (Exception e) { - e.printStackTrace(); - } - return null; - } - - -} \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/jniLibs/arm64-v8a/ReadMe.txt b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/jniLibs/arm64-v8a/ReadMe.txt deleted file mode 100644 index b1b7342e..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/jniLibs/arm64-v8a/ReadMe.txt +++ /dev/null @@ -1,2 +0,0 @@ -User needs to place Qualcomm Neural Processing SDK files here. -Please refer to resolveDependencies.sh \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/drawable-v24/ic_launcher_foreground.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/drawable-v24/ic_launcher_foreground.xml deleted file mode 100644 index 971add5e..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/drawable-v24/ic_launcher_foreground.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/drawable/ic_launcher_background.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/drawable/ic_launcher_background.xml deleted file mode 100644 index 50ae786e..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/drawable/ic_launcher_background.xml +++ /dev/null @@ -1,74 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/drawable/ic_launcher_foreground.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/drawable/ic_launcher_foreground.xml deleted file mode 100644 index 971add5e..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/drawable/ic_launcher_foreground.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/layout/activity_snpe.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/layout/activity_snpe.xml deleted file mode 100644 index 2e5ea83b..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/layout/activity_snpe.xml +++ /dev/null @@ -1,163 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-anydpi-v26/ic_launcher.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-anydpi-v26/ic_launcher.xml deleted file mode 100644 index 67820c56..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-anydpi-v26/ic_launcher.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml deleted file mode 100644 index 67820c56..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-hdpi/ic_launcher.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-hdpi/ic_launcher.png deleted file mode 100644 index c0bc6f69..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-hdpi/ic_launcher.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-hdpi/ic_launcher_foreground.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-hdpi/ic_launcher_foreground.png deleted file mode 100644 index 7f4fa9f2..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-hdpi/ic_launcher_foreground.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-hdpi/ic_launcher_round.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-hdpi/ic_launcher_round.png deleted file mode 100644 index 8d8adb51..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-hdpi/ic_launcher_round.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-mdpi/ic_launcher.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-mdpi/ic_launcher.png deleted file mode 100644 index e85093a0..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-mdpi/ic_launcher.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-mdpi/ic_launcher_foreground.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-mdpi/ic_launcher_foreground.png deleted file mode 100644 index 494b6dae..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-mdpi/ic_launcher_foreground.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-mdpi/ic_launcher_round.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-mdpi/ic_launcher_round.png deleted file mode 100644 index 45464054..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-mdpi/ic_launcher_round.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xhdpi/ic_launcher.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xhdpi/ic_launcher.png deleted file mode 100644 index ba5e874d..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xhdpi/ic_launcher.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xhdpi/ic_launcher_foreground.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xhdpi/ic_launcher_foreground.png deleted file mode 100644 index 3ee228b9..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xhdpi/ic_launcher_foreground.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xhdpi/ic_launcher_round.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xhdpi/ic_launcher_round.png deleted file mode 100644 index e1abe119..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xhdpi/ic_launcher_round.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxhdpi/ic_launcher.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxhdpi/ic_launcher.png deleted file mode 100644 index f43f9233..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxhdpi/ic_launcher.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxhdpi/ic_launcher_foreground.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxhdpi/ic_launcher_foreground.png deleted file mode 100644 index 528d8b32..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxhdpi/ic_launcher_foreground.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxhdpi/ic_launcher_round.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxhdpi/ic_launcher_round.png deleted file mode 100644 index bb35b048..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxhdpi/ic_launcher_round.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxxhdpi/ic_launcher.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxxhdpi/ic_launcher.png deleted file mode 100644 index e2749b79..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxxhdpi/ic_launcher.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxxhdpi/ic_launcher_foreground.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxxhdpi/ic_launcher_foreground.png deleted file mode 100644 index 6aab2d60..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxxhdpi/ic_launcher_foreground.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png deleted file mode 100644 index f4d153e3..00000000 Binary files a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png and /dev/null differ diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values-night/themes.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values-night/themes.xml deleted file mode 100644 index cc4fe14e..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values-night/themes.xml +++ /dev/null @@ -1,17 +0,0 @@ - - - - - \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/colors.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/colors.xml deleted file mode 100644 index 742b3a6a..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/colors.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - #FFBB86FC - #FF6200EE - #FF3700B3 - #FF03DAC5 - #FF018786 - #FF000000 - #FFFFFFFF - \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/ic_launcher_background.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/ic_launcher_background.xml deleted file mode 100644 index cfa9be08..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/ic_launcher_background.xml +++ /dev/null @@ -1,4 +0,0 @@ - - - #FFFFFF - \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/strings.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/strings.xml deleted file mode 100644 index 3c0ab177..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/strings.xml +++ /dev/null @@ -1,4 +0,0 @@ - - - Image Enhancement - \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/themes.xml b/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/themes.xml deleted file mode 100644 index cbd4ed94..00000000 --- a/ai-solutions/android/02-ImageEnhancement/enhancement/src/main/res/values/themes.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/gradle.properties b/ai-solutions/android/02-ImageEnhancement/gradle.properties deleted file mode 100644 index be1bd26a..00000000 --- a/ai-solutions/android/02-ImageEnhancement/gradle.properties +++ /dev/null @@ -1,19 +0,0 @@ -# Project-wide Gradle settings. -# IDE (e.g. Android Studio) users: -# Gradle settings configured through the IDE *will override* -# any settings specified in this file. -# For more details on how to configure your build environment visit -# http://www.gradle.org/docs/current/userguide/build_environment.html -# Specifies the JVM arguments used for the daemon process. -# The setting is particularly useful for tweaking memory settings. -org.gradle.jvmargs=-Xmx2048m -# When configured, Gradle will run in incubating parallel mode. -# This option should only be used with decoupled projects. More details, visit -# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects -# org.gradle.parallel=true -# AndroidX package structure to make it clearer which packages are bundled with the -# Android operating system, and which are packaged with your app"s APK -# https://developer.android.com/topic/libraries/support-library/androidx-rn -android.useAndroidX=true -# Automatically convert third-party libraries to use AndroidX -android.enableJetifier=true diff --git a/ai-solutions/android/02-ImageEnhancement/gradle/wrapper/gradle-wrapper.properties b/ai-solutions/android/02-ImageEnhancement/gradle/wrapper/gradle-wrapper.properties deleted file mode 100644 index cb6bd4ea..00000000 --- a/ai-solutions/android/02-ImageEnhancement/gradle/wrapper/gradle-wrapper.properties +++ /dev/null @@ -1,6 +0,0 @@ -#Fri Sep 09 10:14:39 IST 2022 -distributionBase=GRADLE_USER_HOME -distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-bin.zip -distributionPath=wrapper/dists -zipStorePath=wrapper/dists -zipStoreBase=GRADLE_USER_HOME diff --git a/ai-solutions/android/02-ImageEnhancement/gradlew b/ai-solutions/android/02-ImageEnhancement/gradlew deleted file mode 100644 index 4e395898..00000000 --- a/ai-solutions/android/02-ImageEnhancement/gradlew +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/env sh - -# -# Copyright 2015 the original author or authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -############################################################################## -## -## Gradle start up script for UN*X -## -############################################################################## - -# Attempt to set APP_HOME -# Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi -done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null - -APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' - -# Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" - -warn () { - echo "$*" -} - -die () { - echo - echo "$*" - echo - exit 1 -} - -# OS specific support (must be 'true' or 'false'). -cygwin=false -msys=false -darwin=false -nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MSYS* | MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; -esac - -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar - - -# Determine the Java command to use to start the JVM. -if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - if [ ! -x "$JAVACMD" ] ; then - die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." - fi -else - JAVACMD="java" - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." -fi - -# Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi -fi - -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi - -# For Cygwin or MSYS, switch paths to Windows format before running java -if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi - # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" - fi - i=`expr $i + 1` - done - case $i in - 0) set -- ;; - 1) set -- "$args0" ;; - 2) set -- "$args0" "$args1" ;; - 3) set -- "$args0" "$args1" "$args2" ;; - 4) set -- "$args0" "$args1" "$args2" "$args3" ;; - 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac -fi - -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=`save "$@"` - -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" - -exec "$JAVACMD" "$@" diff --git a/ai-solutions/android/02-ImageEnhancement/gradlew.bat b/ai-solutions/android/02-ImageEnhancement/gradlew.bat deleted file mode 100644 index ac1b06f9..00000000 --- a/ai-solutions/android/02-ImageEnhancement/gradlew.bat +++ /dev/null @@ -1,89 +0,0 @@ -@rem -@rem Copyright 2015 the original author or authors. -@rem -@rem Licensed under the Apache License, Version 2.0 (the "License"); -@rem you may not use this file except in compliance with the License. -@rem You may obtain a copy of the License at -@rem -@rem https://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. -@rem - -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Resolve any "." and ".." in APP_HOME to make it shorter. -for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto execute - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto execute - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/ai-solutions/android/02-ImageEnhancement/resolveDependencies.sh b/ai-solutions/android/02-ImageEnhancement/resolveDependencies.sh deleted file mode 100644 index ac881bd4..00000000 --- a/ai-solutions/android/02-ImageEnhancement/resolveDependencies.sh +++ /dev/null @@ -1,36 +0,0 @@ -# -*- mode: shell script -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= - -#RESOLVING DEPENDENCIES - -# steps to copy opencv -wget https://sourceforge.net/projects/opencvlibrary/files/4.5.5/opencv-4.5.5-android-sdk.zip/download -unzip download -rm download -mkdir sdk -mv OpenCV-android-sdk/sdk/* sdk -rm -r OpenCV-android-sdk - -#Steps to paste files in JNI -##copying snpe-release.aar file -## Change $SNPE_ROOT/lib/android/snpe-release.aar to $SNPE_ROOT/android/snpe-release.aar for SNPE<=2.10 -mkdir snpe-release -cp $SNPE_ROOT/lib/android/snpe-release.aar snpe-release -unzip -o snpe-release/snpe-release.aar -d snpe-release/snpe-release - -mkdir -p app/src/main/jniLibs/arm64-v8a - -##writing jniLibs -cp snpe-release/snpe-release/jni/arm64-v8a/libc++_shared.so app/src/main/jniLibs/arm64-v8a/ -cp snpe-release/snpe-release/jni/arm64-v8a/libSNPE.so app/src/main/jniLibs/arm64-v8a/ -cp snpe-release/snpe-release/jni/arm64-v8a/libsnpe-android.so app/src/main/jniLibs/arm64-v8a/ -cp snpe-release/snpe-release/jni/arm64-v8a/libSnpeHtpPrepare.so app/src/main/jniLibs/arm64-v8a/ -cp snpe-release/snpe-release/jni/arm64-v8a/libSnpeHtpV73Skel.so app/src/main/jniLibs/arm64-v8a/ -cp snpe-release/snpe-release/jni/arm64-v8a/libSnpeHtpV73Stub.so app/src/main/jniLibs/arm64-v8a/ \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/sdk/ReadMe.txt b/ai-solutions/android/02-ImageEnhancement/sdk/ReadMe.txt deleted file mode 100644 index 885d6d66..00000000 --- a/ai-solutions/android/02-ImageEnhancement/sdk/ReadMe.txt +++ /dev/null @@ -1,2 +0,0 @@ -OpenCV SDK needs to be placed here. -Please refer to : resolveDependencies.sh \ No newline at end of file diff --git a/ai-solutions/android/02-ImageEnhancement/settings.gradle b/ai-solutions/android/02-ImageEnhancement/settings.gradle deleted file mode 100644 index 0618c06e..00000000 --- a/ai-solutions/android/02-ImageEnhancement/settings.gradle +++ /dev/null @@ -1,3 +0,0 @@ -include ':enhancement' -rootProject.name = "ImageEnhancement" -include ':sdk' diff --git a/ai-solutions/android/03-ObjectDetection/GenerateDLC.ipynb b/ai-solutions/android/03-ObjectDetection/GenerateDLC.ipynb deleted file mode 100644 index cfc14630..00000000 --- a/ai-solutions/android/03-ObjectDetection/GenerateDLC.ipynb +++ /dev/null @@ -1,376 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "721491e1", - "metadata": {}, - "source": [ - "## Steps for generating YoloNAS dlc\n", - "#### Note->Use python3.8 or above for generating onnx and python3.6 for generating dlc" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "d1d3b4eb", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: super-gradients==3.1.2 in /usr/local/lib/python3.8/site-packages (3.1.2)\n", - "Requirement already satisfied: torch>=1.9.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.13.1)\n", - "Requirement already satisfied: tqdm>=4.57.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (4.65.0)\n", - "Requirement already satisfied: boto3>=1.17.15 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.28.2)\n", - "Requirement already satisfied: jsonschema>=3.2.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (4.17.3)\n", - "Requirement already satisfied: Deprecated>=1.2.11 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.2.14)\n", - "Requirement already satisfied: opencv-python>=4.5.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (4.5.2.52)\n", - "Requirement already satisfied: scipy>=1.6.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.10.1)\n", - "Requirement already satisfied: matplotlib>=3.3.4 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (3.3.4)\n", - "Requirement already satisfied: psutil>=5.8.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (5.9.5)\n", - "Requirement already satisfied: tensorboard>=2.4.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (2.13.0)\n", - "Requirement already satisfied: setuptools>=21.0.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (57.5.0)\n", - "Requirement already satisfied: coverage~=5.3.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (5.3.1)\n", - "Requirement already satisfied: torchvision>=0.10.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (0.14.1)\n", - "Requirement already satisfied: sphinx~=4.0.2 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (4.0.3)\n", - "Requirement already satisfied: sphinx-rtd-theme in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.2.2)\n", - "Requirement already satisfied: torchmetrics==0.8 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (0.8.0)\n", - "Requirement already satisfied: hydra-core>=1.2.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.3.2)\n", - "Requirement already satisfied: omegaconf in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (2.3.0)\n", - "Requirement already satisfied: onnxruntime==1.13.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.13.1)\n", - "Requirement already satisfied: onnx==1.13.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.13.0)\n", - "Requirement already satisfied: pillow!=8.3,>=5.3.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (10.0.0)\n", - "Requirement already satisfied: pip-tools>=6.12.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (6.14.0)\n", - "Requirement already satisfied: pyparsing==2.4.5 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (2.4.5)\n", - "Requirement already satisfied: einops==0.3.2 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (0.3.2)\n", - "Requirement already satisfied: pycocotools==2.0.6 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (2.0.6)\n", - "Requirement already satisfied: protobuf==3.20.3 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (3.20.3)\n", - "Requirement already satisfied: treelib==1.6.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.6.1)\n", - "Requirement already satisfied: termcolor==1.1.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.1.0)\n", - "Requirement already satisfied: packaging>=20.4 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (21.0)\n", - "Requirement already satisfied: wheel>=0.38.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (0.40.0)\n", - "Requirement already satisfied: pygments>=2.7.4 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (2.15.1)\n", - "Requirement already satisfied: stringcase>=1.2.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.2.0)\n", - "Requirement already satisfied: numpy<=1.23 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.23.0)\n", - "Requirement already satisfied: rapidfuzz in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (3.1.1)\n", - "Requirement already satisfied: json-tricks==3.16.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (3.16.1)\n", - "Requirement already satisfied: onnx-simplifier<1.0,>=0.3.6 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (0.4.33)\n", - "Requirement already satisfied: typing-extensions>=3.6.2.1 in /usr/local/lib/python3.8/site-packages (from onnx==1.13.0->super-gradients==3.1.2) (4.7.1)\n", - "Requirement already satisfied: coloredlogs in /usr/local/lib/python3.8/site-packages (from onnxruntime==1.13.1->super-gradients==3.1.2) (15.0.1)\n", - "Requirement already satisfied: flatbuffers in /usr/local/lib/python3.8/site-packages (from onnxruntime==1.13.1->super-gradients==3.1.2) (23.5.26)\n", - "Requirement already satisfied: sympy in /usr/local/lib/python3.8/site-packages (from onnxruntime==1.13.1->super-gradients==3.1.2) (1.12)\n", - "Requirement already satisfied: pyDeprecate==0.3.* in /usr/local/lib/python3.8/site-packages (from torchmetrics==0.8->super-gradients==3.1.2) (0.3.2)\n", - "Requirement already satisfied: future in /usr/local/lib/python3.8/site-packages (from treelib==1.6.1->super-gradients==3.1.2) (0.18.3)\n", - "Requirement already satisfied: botocore<1.32.0,>=1.31.2 in /usr/local/lib/python3.8/site-packages (from boto3>=1.17.15->super-gradients==3.1.2) (1.31.2)\n", - "Requirement already satisfied: jmespath<2.0.0,>=0.7.1 in /usr/local/lib/python3.8/site-packages (from boto3>=1.17.15->super-gradients==3.1.2) (1.0.1)\n", - "Requirement already satisfied: s3transfer<0.7.0,>=0.6.0 in /usr/local/lib/python3.8/site-packages (from boto3>=1.17.15->super-gradients==3.1.2) (0.6.1)\n", - "Requirement already satisfied: wrapt<2,>=1.10 in /usr/local/lib/python3.8/site-packages (from Deprecated>=1.2.11->super-gradients==3.1.2) (1.15.0)\n", - "Requirement already satisfied: antlr4-python3-runtime==4.9.* in /usr/local/lib/python3.8/site-packages (from hydra-core>=1.2.0->super-gradients==3.1.2) (4.9.3)\n", - "Requirement already satisfied: importlib-resources in /usr/local/lib/python3.8/site-packages (from hydra-core>=1.2.0->super-gradients==3.1.2) (5.12.0)\n", - "Requirement already satisfied: attrs>=17.4.0 in /usr/local/lib/python3.8/site-packages (from jsonschema>=3.2.0->super-gradients==3.1.2) (23.1.0)\n", - "Requirement already satisfied: pkgutil-resolve-name>=1.3.10 in /usr/local/lib/python3.8/site-packages (from jsonschema>=3.2.0->super-gradients==3.1.2) (1.3.10)\n", - "Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /usr/local/lib/python3.8/site-packages (from jsonschema>=3.2.0->super-gradients==3.1.2) (0.19.3)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.8/site-packages (from matplotlib>=3.3.4->super-gradients==3.1.2) (0.11.0)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.8/site-packages (from matplotlib>=3.3.4->super-gradients==3.1.2) (1.4.4)\n", - "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.8/site-packages (from matplotlib>=3.3.4->super-gradients==3.1.2) (2.8.2)\n", - "Requirement already satisfied: PyYAML>=5.1.0 in /usr/local/lib/python3.8/site-packages (from omegaconf->super-gradients==3.1.2) (6.0)\n", - "Requirement already satisfied: rich in /usr/local/lib/python3.8/site-packages (from onnx-simplifier<1.0,>=0.3.6->super-gradients==3.1.2) (13.4.2)\n", - "Requirement already satisfied: build in /usr/local/lib/python3.8/site-packages (from pip-tools>=6.12.1->super-gradients==3.1.2) (0.10.0)\n", - "Requirement already satisfied: click>=8 in /usr/local/lib/python3.8/site-packages (from pip-tools>=6.12.1->super-gradients==3.1.2) (8.1.5)\n", - "Requirement already satisfied: pip>=22.2 in /usr/local/lib/python3.8/site-packages (from pip-tools>=6.12.1->super-gradients==3.1.2) (23.1.2)\n", - "Requirement already satisfied: tomli in /usr/local/lib/python3.8/site-packages (from pip-tools>=6.12.1->super-gradients==3.1.2) (2.0.1)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: sphinxcontrib-applehelp in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.0.4)\n", - "Requirement already satisfied: sphinxcontrib-devhelp in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.0.2)\n", - "Requirement already satisfied: sphinxcontrib-jsmath in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.0.1)\n", - "Requirement already satisfied: sphinxcontrib-htmlhelp in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (2.0.1)\n", - "Requirement already satisfied: sphinxcontrib-serializinghtml in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.1.5)\n", - "Requirement already satisfied: sphinxcontrib-qthelp in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.0.3)\n", - "Requirement already satisfied: Jinja2>=2.3 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (3.1.2)\n", - "Requirement already satisfied: docutils<0.18,>=0.14 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (0.17.1)\n", - "Requirement already satisfied: snowballstemmer>=1.1 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (2.2.0)\n", - "Requirement already satisfied: babel>=1.3 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (2.12.1)\n", - "Requirement already satisfied: alabaster<0.8,>=0.7 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (0.7.13)\n", - "Requirement already satisfied: imagesize in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.4.1)\n", - "Requirement already satisfied: requests>=2.5.0 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (2.31.0)\n", - "Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (1.0.0)\n", - "Requirement already satisfied: grpcio>=1.48.2 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (1.56.0)\n", - "Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (2.21.0)\n", - "Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (1.0.0)\n", - "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (3.4.3)\n", - "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (0.7.1)\n", - "Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (2.3.6)\n", - "Requirement already satisfied: nvidia-cuda-runtime-cu11==11.7.99 in /usr/local/lib/python3.8/site-packages (from torch>=1.9.0->super-gradients==3.1.2) (11.7.99)\n", - "Requirement already satisfied: nvidia-cudnn-cu11==8.5.0.96 in /usr/local/lib/python3.8/site-packages (from torch>=1.9.0->super-gradients==3.1.2) (8.5.0.96)\n", - "Requirement already satisfied: nvidia-cublas-cu11==11.10.3.66 in /usr/local/lib/python3.8/site-packages (from torch>=1.9.0->super-gradients==3.1.2) (11.10.3.66)\n", - "Requirement already satisfied: nvidia-cuda-nvrtc-cu11==11.7.99 in /usr/local/lib/python3.8/site-packages (from torch>=1.9.0->super-gradients==3.1.2) (11.7.99)\n", - "Requirement already satisfied: sphinxcontrib-jquery<5,>=4 in /usr/local/lib/python3.8/site-packages (from sphinx-rtd-theme->super-gradients==3.1.2) (4.1)\n", - "Requirement already satisfied: six in /usr/local/lib/python3.8/site-packages (from absl-py>=0.4->tensorboard>=2.4.1->super-gradients==3.1.2) (1.16.0)\n", - "Requirement already satisfied: pytz>=2015.7 in /usr/local/lib/python3.8/site-packages (from babel>=1.3->sphinx~=4.0.2->super-gradients==3.1.2) (2023.3)\n", - "Requirement already satisfied: urllib3<1.27,>=1.25.4 in /usr/local/lib/python3.8/site-packages (from botocore<1.32.0,>=1.31.2->boto3>=1.17.15->super-gradients==3.1.2) (1.26.16)\n", - "Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->super-gradients==3.1.2) (5.3.1)\n", - "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->super-gradients==3.1.2) (0.3.0)\n", - "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->super-gradients==3.1.2) (4.9)\n", - "Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.8/site-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard>=2.4.1->super-gradients==3.1.2) (1.3.1)\n", - "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.8/site-packages (from importlib-resources->hydra-core>=1.2.0->super-gradients==3.1.2) (3.15.0)\n", - "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.8/site-packages (from Jinja2>=2.3->sphinx~=4.0.2->super-gradients==3.1.2) (2.1.3)\n", - "Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.8/site-packages (from markdown>=2.6.8->tensorboard>=2.4.1->super-gradients==3.1.2) (6.7.0)\n", - "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.8/site-packages (from requests>=2.5.0->sphinx~=4.0.2->super-gradients==3.1.2) (3.1.0)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.8/site-packages (from requests>=2.5.0->sphinx~=4.0.2->super-gradients==3.1.2) (3.4)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.8/site-packages (from requests>=2.5.0->sphinx~=4.0.2->super-gradients==3.1.2) (2023.5.7)\n", - "Requirement already satisfied: pyproject_hooks in /usr/local/lib/python3.8/site-packages (from build->pip-tools>=6.12.1->super-gradients==3.1.2) (1.0.0)\n", - "Requirement already satisfied: humanfriendly>=9.1 in /usr/local/lib/python3.8/site-packages (from coloredlogs->onnxruntime==1.13.1->super-gradients==3.1.2) (10.0)\n", - "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.8/site-packages (from rich->onnx-simplifier<1.0,>=0.3.6->super-gradients==3.1.2) (3.0.0)\n", - "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.8/site-packages (from sympy->onnxruntime==1.13.1->super-gradients==3.1.2) (1.3.0)\n", - "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.8/site-packages (from markdown-it-py>=2.2.0->rich->onnx-simplifier<1.0,>=0.3.6->super-gradients==3.1.2) (0.1.2)\n", - "Requirement already satisfied: pyasn1<0.6.0,>=0.4.6 in /usr/local/lib/python3.8/site-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard>=2.4.1->super-gradients==3.1.2) (0.5.0)\n", - "Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.8/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard>=2.4.1->super-gradients==3.1.2) (3.2.2)\n", - "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n", - "\u001b[0m" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/usr/local/lib/python3.8/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n", - "[2023-07-13 17:15:54] INFO - crash_tips_setup.py - Crash tips is enabled. You can set your environment variable to CRASH_HANDLER=FALSE to disable it\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The console stream is logged into /root/sg_logs/console.log\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[2023-07-13 17:15:57] WARNING - __init__.py - Failed to import pytorch_quantization\n", - "[2023-07-13 17:15:57] WARNING - calibrator.py - Failed to import pytorch_quantization\n", - "[2023-07-13 17:15:57] WARNING - export.py - Failed to import pytorch_quantization\n", - "[2023-07-13 17:15:57] WARNING - selective_quantization_utils.py - Failed to import pytorch_quantization\n", - "[2023-07-13 17:15:58] INFO - checkpoint_utils.py - License Notification: YOLO-NAS pre-trained weights are subjected to the specific license terms and conditions detailed in \n", - "https://github.com/Deci-AI/super-gradients/blob/master/LICENSE.YOLONAS.md\n", - "By downloading the pre-trained weight files you agree to comply with these terms.\n" - ] - } - ], - "source": [ - "## Note- Use python3.8 or above for generating onnx\n", - "\n", - "!pip install super-gradients==3.1.2\n", - "\n", - "\n", - "## Downloading Model from git repo\n", - "import torch\n", - "# Load model with pretrained weights\n", - "from super_gradients.training import models\n", - "from super_gradients.common.object_names import Models\n", - "\n", - "model = models.get(Models.YOLO_NAS_S, pretrained_weights=\"coco\")\n", - "\n", - "# Prepare model for conversion\n", - "# Input size is in format of [Batch x Channels x Width x Height] where 640 is the standard COCO dataset dimensions\n", - "model.eval()\n", - "model.prep_model_for_conversion(input_size=[1, 3, 320, 320])\n", - "\n", - "# Create dummy_input\n", - "dummy_input = torch.randn([1, 3, 320, 320], device=\"cpu\")\n", - "\n", - "# Convert model to onnx\n", - "torch.onnx.export(model, dummy_input, \"yolo_nas_s.onnx\", opset_version=11)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "bb97b534", - "metadata": {}, - "source": [ - "#### Enable python3.6 environment, to use SNPE SDK and then convert onnx to dlc" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "c466b9aa", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-07-13 17:16:03,073 - 235 - INFO - Successfully simplified the onnx model in child process\n", - "2023-07-13 17:16:03,547 - 235 - INFO - Successfully receive the simplified onnx model in main process\n", - "2023-07-13 17:16:06,272 - 235 - INFO - INFO_INITIALIZATION_SUCCESS: \n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-onnx-to-dlc -i yolo_nas_s.onnx -o app/src/main/assets/yolo_nas_s.dlc" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "b2675610", - "metadata": {}, - "source": [ - "## Quantizing MobileNetSSD" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "b01ac9cf", - "metadata": {}, - "outputs": [], - "source": [ - "##STEPS to preprocess images\n", - "\n", - "def preprocess(original_image):\n", - " resized_image = cv2.resize(original_image, (320, 320))\n", - " resized_image = resized_image/255\n", - " return resized_image\n", - "\n", - "import cv2\n", - "import numpy as np\n", - "import os\n", - "\n", - "##Please download Coco2014 dataset and give the path here\n", - "dataset_path = \"/workspace/val2014/\"\n", - "\n", - "!mkdir -p rawYoloNAS\n", - "\n", - "filenames=[]\n", - "for path in os.listdir(dataset_path)[:5]:\n", - " # check if current path is a file\n", - " if os.path.isfile(os.path.join(dataset_path, path)):\n", - " filenames.append(os.path.join(dataset_path, path))\n", - "\n", - "for filename in filenames:\n", - " original_image = cv2.imread(filename)\n", - " img = preprocess(original_image)\n", - " img = img.astype(np.float32)\n", - " img.tofile(\"rawYoloNAS/\"+filename.split(\"/\")[-1].split(\".\")[0]+\".raw\")" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "7370c51c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "find rawYoloNAS -name *.raw > YoloInputlist.txt" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "40b37c70", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "rawYoloNAS/COCO_val2014_000000000400.raw\n", - "rawYoloNAS/COCO_val2014_000000000042.raw\n", - "rawYoloNAS/COCO_val2014_000000000073.raw\n", - "rawYoloNAS/COCO_val2014_000000000074.raw\n", - "rawYoloNAS/COCO_val2014_000000000133.raw\n" - ] - } - ], - "source": [ - "%%bash\n", - "cat YoloInputlist.txt" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "39b97591", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] InitializeStderr: DebugLog initialized.\n", - "[INFO] Processed command-line arguments\n", - "[INFO] Quantized parameters\n", - "[INFO] Generated activations\n", - "[INFO] Saved quantized dlc to: app/src/main/assets/Quant_yoloNas_s_320.dlc\n", - "[INFO] DebugLog shutting down.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " 0.1ms [ INFO ] Initializing logging in the backend. Callback: [0xc42410], Log Level: [3]\n", - " 0.1ms [ INFO ] No BackendExtensions lib provided;initializing NetRunBackend Interface\n", - " 875.4ms [ INFO ] cleaning up resources for input tensors\n", - " 875.5ms [ INFO ] cleaning up resources for output tensors\n", - " 1524.6ms [ INFO ] cleaning up resources for input tensors\n", - " 1524.6ms [ INFO ] cleaning up resources for output tensors\n", - " 2136.3ms [ INFO ] cleaning up resources for input tensors\n", - " 2136.3ms [ INFO ] cleaning up resources for output tensors\n", - " 2852.1ms [ INFO ] cleaning up resources for input tensors\n", - " 2852.1ms [ INFO ] cleaning up resources for output tensors\n", - " 3461.0ms [ INFO ] cleaning up resources for input tensors\n", - " 3461.0ms [ INFO ] cleaning up resources for output tensors\n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-dlc-quantize --input_dlc app/src/main/assets/yolo_nas_s.dlc --input_list YoloInputlist.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc app/src/main/assets/Quant_yoloNas_s_320.dlc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a13d7629", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.17" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/ai-solutions/android/03-ObjectDetection/README.md b/ai-solutions/android/03-ObjectDetection/README.md deleted file mode 100644 index d9917ca0..00000000 --- a/ai-solutions/android/03-ObjectDetection/README.md +++ /dev/null @@ -1,215 +0,0 @@ -## Object Detection with YoloNAS / SSDMobilenetV2 / YoloX -The project is designed to utilize the [Qualcomm® Neural Processing SDK for AI ](https://developer.qualcomm.com/sites/default/files/docs/snpe/index.html), a deep learning software from Snapdragon platforms for Object Detection in Android. The Android application can be designed to use any built-in/connected camera to capture the objects and use Machine Learning model to get the prediction/inference and location of the respective objects. - -# Pre-requisites - -* Before starting the Android application, please follow the instructions for setting up Qualcomm Neural Processing SDK using the link provided. https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html -* Android device 6.0 and above which uses below mentioned Snapdragon processors/Snapdragon HDK with display can be used to test the application -* Download CocoDataset 2014 and give its path to Generate_DLC.ipynb. Change variable "dataset_path" in Quantization Section in notebook. - - -## List of Supported Devices - -- Snapdragon® SM8550 - -The above targets supports the application with CPU, GPU and DSP. For more information on the supported devices, please follow this link https://developer.qualcomm.com/docs/snpe/overview.html - -# Source Overview - -## Source Organization - -demo : Contains demo GIF - -app : Contains source files in standard Android app format - -app\src\main\assets : Contains Model binary DLC - -app\src\main\java\com\qc\objectdetectionYoloNas : Application java source code - -app\src\main\cpp : native source code - -sdk: Contains openCV sdk - -## DLC Generation - -Run jupyter notebook GenerateDLC.ipynb. This notebook will generate YoloNAS quantized dlc. - -YoloNAS model is trained on COCO dataset for 80 classes of everyday objects. -List of the classes can be found in dataset at : https://cocodataset.org/#explore - -## Code Implementation - -This application opens a camera preview, collects all the frames and converts them to bitmap. The network is built via Neural Network builder by passing model dlc name and runtime as the input. The bitmap is then given to the model for inference, which returns object prediction and localization of the respective object. - - -### Prerequisite for Camera Preview. - -Permission to obtain camera preview frames is granted in the following file: -```python -/app/src/main/AndroidManifest.xml - - ``` -In order to use camera2 APIs, add the below feature -```python - -``` -### Loading Model -Code snippet for neural network connection and loading model: -```java - snpe = snpeBuilder.setOutputLayers({}) - .setPerformanceProfile(zdl::DlSystem::PerformanceProfile_t::BURST) - .setExecutionPriorityHint( - zdl::DlSystem::ExecutionPriorityHint_t::HIGH) - .setRuntimeProcessorOrder(runtimeList) - .setUseUserSuppliedBuffers(useUserSuppliedBuffers) - .setPlatformConfig(platformConfig) - .setInitCacheMode(useCaching) - .setCPUFallbackMode(true) - .setUnconsumedTensorsAsOutputs(true) - .build(); -``` -### Preprocessing -The bitmap image is passed as openCV Mat to native and then converted to BGR Mat. DLC models can work with specific image sizes. -Therefore, we need to resize the input image to the size accepted by the corresponding selected model DLC before passing image to DLC. -Below code reference for YoloNAS preprocessing. Similarly for other models based on model requirements, the preprocessing may change. -```java - cv::Mat img320; - //Resize and get the size from model itself (320x320 for YOLONAS) - cv::resize(img,img320,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR); - - float inputScale = 0.00392156862745f; - - float * accumulator = reinterpret_cast (&dest_buffer[0]); - - //opencv read in BGRA by default - cvtColor(img320, img320, CV_BGRA2BGR); - int lim = img320.rows*img320.cols*3; - for(int idx = 0; idx=0.5 ) - { - int x1 = BBout_boxcoords[i * 4 + 0]; - int y1 = BBout_boxcoords[i * 4 + 1]; - int x2 = BBout_boxcoords[i * 4 + 2]; - int y2 = BBout_boxcoords[i * 4 + 3]; - Boxlist.push_back(BoxCornerEncoding(x1, y1, x2, y2,*it,classname)); - } - } - - std::vector reslist = NonMaxSuppression(Boxlist,0.20); -``` -then we just scale the coords for original image - -```python - float top,bottom,left,right; - left = reslist[k].y1 * ratio_1; //y1 - right = reslist[k].y2 * ratio_1; //y2 - - bottom = reslist[k].x1 * ratio_2; //x1 - top = reslist[k].x2 * ratio_2; //x2 -``` - -## Drawing bounding boxes - -```python - RectangleBox rbox = boxlist.get(j); - float y = rbox.left; - float y1 = rbox.right; - float x = rbox.top; - float x1 = rbox.bottom; - - String fps_textLabel = "FPS: "+String.valueOf(rbox.fps); - canvas.drawText(fps_textLabel,10,70,mTextColor); - - String processingTimeTextLabel= rbox.processing_time+"ms"; - - canvas.drawRect(x1, y, x, y1, mBorderColor); - canvas.drawText(rbox.label,x1+10, y+40, mTextColor); - canvas.drawText(processingTimeTextLabel,x1+10, y+90, mTextColor); -``` - -# Build and run with Android Studio - -## Build APK file with Android Studio - -1. Clone QIDK repo. - -2. Run below script, from the directory where it is present, to resolve dependencies of this project. - -* This will copy snpe-release.aar file from $SNPE_ROOT to "snpe-release" directory in Android project. - - **NOTE - If you are using SNPE version 2.11 or greater, please change following line in resolveDependencies.sh.** - ``` - From: cp $SNPE_ROOT/android/snpe-release.aar snpe-release - To : cp $SNPE_ROOT/lib/android/snpe-release.aar snpe-release - ``` -* Download opencv and paste to sdk directory, to enable OpenCv for android Java. - -```java - bash resolveDependencies.sh -``` - - -3. Run jupyter notebook GenerateDLC.ipynb to generate DLC(s) for quantized YOLO_NAS DLC. Also, **change the dataset_path with Coco Dataset Path**. -* This script generates required dlc(s) and paste them to appropriate location. - - -4. Do gradle sync -5. Compile the project. -6. Output APK file should get generated : app-debug.apk -7. Prepare the Qualcomm Innovators development kit to install the application (Do not run APK on emulator) - -8. If Unsigned or Signed DSP runtime is not getting detected, then please check the logcat logs for the FastRPC error. DSP runtime may not get detected due to SE Linux security policy. Please try out following commands to set permissive SE Linux policy. - -It is recommended to run below commands. -```java -adb disable-verity -adb reboot -adb root -adb remount -adb shell setenforce 0 -``` - -9. Install and test application : app-debug.apk -```java -adb install -r -t app-debug.apk -``` - -10. launch the application - -Following is the basic "Pose Detection" Android App - -1. On launch of application, from home screen user can select the model and runtime and then press start camera button. -2. On first launch of camera, user needs to provide camera permissions. -3. After camera launched, the selected model with runtime starts loading in the background. User will see a dialogue box till model is being loaded. -4. Once the model is loaded, it will start detecting objects and box will be seen around the object if respective object is detected on the screen -5. User can go back to home screen by pressing back button and select appropriate model and run-time and observe performance difference. - -Same results for the application are : - -## Demo of the application -![Screenshot](.//demo/ObjectDetectYoloNAS.gif) - -# References -1. SSD - Single shot Multi box detector - https://arxiv.org/pdf/1512.02325.pdf -2. https://github.com/Deci-AI/super-gradients -3. https://zenodo.org/record/7789328 - - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/ai-solutions/android/03-ObjectDetection/app/build.gradle b/ai-solutions/android/03-ObjectDetection/app/build.gradle deleted file mode 100644 index 4f8293a0..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/build.gradle +++ /dev/null @@ -1,68 +0,0 @@ -apply plugin: 'com.android.application' - -android { - compileSdkVersion 30 - buildToolsVersion "30.0.3" - - defaultConfig { - applicationId "com.qcom.aistack_objdetect" - minSdkVersion 24 - targetSdkVersion 30 - versionCode 1 - versionName "1.0" - - testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" - externalNativeBuild { - cmake { -// cppFlags '' - cppFlags "-std=c++11 -frtti -fexceptions" - arguments "-DOpenCV_DIR=" + project(':sdk').projectDir + "/native/jni", - "-DANDROID_TOOLCHAIN=clang" -// "-DANDROID_STL=c++_shared", -// "-DANDROID_ARM_NEON=TRUE" - targets "objectdetectionYoloNas" - } - ndk { - abiFilters 'arm64-v8a' - } - } - } - - packagingOptions { - pickFirst 'lib/x86/libc++_shared.so' - pickFirst 'lib/x86_64/libc++_shared.so' - pickFirst 'lib/arm64-v8a/libc++_shared.so' - pickFirst 'lib/armeabi-v7a/libc++_shared.so' - } - - buildTypes { - release { - minifyEnabled false - proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' - } - } - - compileOptions { - sourceCompatibility JavaVersion.VERSION_1_8 - targetCompatibility JavaVersion.VERSION_1_8 - } - ndkVersion '21.4.7075529' - externalNativeBuild { - cmake { - path file('src/main/cpp/CMakeLists.txt') - } - } -} - -dependencies { - implementation fileTree(dir: 'libs', include: ['*.jar']) - implementation project(path: ':sdk') - testImplementation 'junit:junit:4.12' - androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.1' - androidTestImplementation 'com.android.support.test.espresso:espresso-contrib:3.0.1' - implementation 'com.android.support:design:26.0.0' - implementation 'com.android.support:support-v4:26.0.0' - - - -} diff --git a/ai-solutions/android/03-ObjectDetection/app/local.properties b/ai-solutions/android/03-ObjectDetection/app/local.properties deleted file mode 100644 index 0a5b4775..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/local.properties +++ /dev/null @@ -1,8 +0,0 @@ -## This file must *NOT* be checked into Version Control Systems, -# as it contains information specific to your local configuration. -# -# Location of the SDK. This is only used by Gradle. -# For customization when using a Version Control System, please read the -# header note. -#Sat Jan 07 01:53:02 IST 2023 -sdk.dir=C\:\\Users\\shubgoya\\AppData\\Local\\Android\\Sdk diff --git a/ai-solutions/android/03-ObjectDetection/app/proguard-rules.pro b/ai-solutions/android/03-ObjectDetection/app/proguard-rules.pro deleted file mode 100644 index 6e7ffa99..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/proguard-rules.pro +++ /dev/null @@ -1,21 +0,0 @@ -# Add project specific ProGuard rules here. -# You can control the set of applied configuration files using the -# proguardFiles setting in build.gradle. -# -# For more details, see -# http://developer.android.com/guide/developing/tools/proguard.html - -# If your project uses WebView with JS, uncomment the following -# and specify the fully qualified class name to the JavaScript interface -# class: -#-keepclassmembers class fqcn.of.javascript.interface.for.webview { -# public *; -#} - -# Uncomment this to preserve the line number information for -# debugging stack traces. -#-keepattributes SourceFile,LineNumberTable - -# If you keep the line number information, uncomment this to -# hide the original source file name. -#-renamesourcefileattribute SourceFile diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/AndroidManifest.xml b/ai-solutions/android/03-ObjectDetection/app/src/main/AndroidManifest.xml deleted file mode 100644 index e387bd86..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/AndroidManifest.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/assets/ReadMe.txt b/ai-solutions/android/03-ObjectDetection/app/src/main/assets/ReadMe.txt deleted file mode 100644 index aca5c44b..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/assets/ReadMe.txt +++ /dev/null @@ -1 +0,0 @@ -Generate model DLC and place here \ No newline at end of file diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/CMakeLists.txt b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/CMakeLists.txt deleted file mode 100644 index adeaf8e0..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/CMakeLists.txt +++ /dev/null @@ -1,66 +0,0 @@ - -# For more information about using CMake with Android Studio, read the -# documentation: https://d.android.com/studio/projects/add-native-code.html - -# Sets the minimum version of CMake required to build the native library. - -cmake_minimum_required(VERSION 3.18.1) - -# Declares and names the project. - -project("objectdetectionYoloNas") - -# Creates and names a library, sets it as either STATIC -# or SHARED, and provides the relative paths to its source code. -# You can define multiple libraries, and CMake builds them for you. -# Gradle automatically packages shared libraries with your APK. - -###OPENCV -#find_package(OpenCV REQUIRED) ##FAILED, cannot find libcpufeatures.so -#set(OpenCV_STATIC on) -#set(OpenCV_DIR C:/Users/shubgoya/Desktop/SNPEworkspace/github_workspace/HRNET_posenet/opencv45/native/jni) -find_package(OpenCV REQUIRED) -#INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) - - -###INCLUDE_DIRECTORIES -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/zdl) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/hpp) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) - -add_library( # Sets the name of the library. - objectdetectionYoloNas - - # Sets the library as a shared library. - SHARED - - # Provides a relative path to your source file(s). - inference.cpp inference_helper.cpp objectdetectionYoloNas.cpp Model.h Model.cpp YOLONAS_Model.h YOLONAS_Model.cpp - SSDMobileNetV2_Model.h SSDMobileNetV2_Model.cpp YOLO_X_Model.h YOLO_X_Model.cpp) - -# Searches for a specified prebuilt library and stores the path as a -# variable. Because CMake includes system libraries in the search path by -# default, you only need to specify the name of the public NDK library -# you want to add. CMake verifies that the library exists before -# completing its build. - -find_library( # Sets the name of the path variable. - log-lib - - # Specifies the name of the NDK library that - # you want CMake to locate. - log ) - -# Specifies libraries CMake should link to your target library. You -# can link multiple libraries, such as libraries you define in this -# build script, prebuilt third-party libraries, or system libraries. - -target_link_libraries( # Specifies the target library. - objectdetectionYoloNas - - # Links the target library to the log library - # included in the NDK. - ${CMAKE_CURRENT_SOURCE_DIR}/../jniLibs/arm64-v8a/libSNPE.so - - ${log-lib} ${OpenCV_LIBS}) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.cpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.cpp deleted file mode 100644 index c15814fc..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.cpp +++ /dev/null @@ -1,14 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by gsanjeev on 8/30/2023. -// - -// #include "Model.h" diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.h deleted file mode 100644 index 06e5d491..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.h +++ /dev/null @@ -1,60 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by gsanjeev on 8/30/2023. -// - -#ifndef APP_MODEL_H -#define APP_MODEL_H - - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "android/log.h" - - -#include -#include -#include - -#define LOG_TAG "SNPE_INF" -#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__) -#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__) - -// List of All the supported models by the current application -enum ModelName -{ - YOLONAS, - SSDMobilenetV2, - YoloX -}; - -class Model { - -public: - virtual void preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) = 0; - virtual void postprocess(int orig_width, int orig_height, int &numberofobj, std::vector> &BB_coords, std::vector &BB_names, std::vector &BBout_boxcoords, std::vector &BBout_class, float milli_time) = 0; - virtual void msg() = 0; - - ModelName model_name=YOLONAS; //initialized - -}; - - -#endif //APP_MODEL_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.cpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.cpp deleted file mode 100644 index 72750b9e..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.cpp +++ /dev/null @@ -1,111 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by gsanjeev on 9/11/2023. -// - -#include "SSDMobileNetV2_Model.h" - - -void SSDMobileNetV2_Model::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("SSDMobileNetV2_Model preprocess"); - cv::Mat img320; - //Resize and get the size from model itself (320x320 for SSDMobileNetV2) - cv::resize(img,img320,cv::Size(dims[2],dims[1]),0,0,cv::INTER_LINEAR); - - //float inputScale = 0.00392156862745f; //normalization value, this is 1/255 - float inputScale = 1.070312500000f; //normalization value, this is 1/255 - - float * accumulator = reinterpret_cast (&dest_buffer[0]); - - cvtColor(img320, img320, CV_BGRA2RGB); - //LOGI("num of channels: %d",img320.channels()); - int lim = img320.rows*img320.cols*3; - for(int idx = 0; idx> &BB_coords, std::vector &BB_names, std::vector &BBout_boxcoords, std::vector &BBout_class, float milli_time) { - LOGI("SSDMobileNetV2_Model postprocess"); - std::vector Boxlist; - std::vector Classlist; - - //sanjeev temp sanity check for sometimes stability issue in SSDMobileNetV2 Model - if (BBout_boxcoords.size() == 0) - { - numberofobj=-1; - LOGE("sanjeev BBout_boxcoords is zero. Returning Error.."); - return; - } - - //Post Processing - for(int i =1;i<(21);i++) // [21 classes supported by SSDMobileNetV2] - { - - int row_index; - float max_element; - - std::string classname = classnamemapping[i]; - - for (int j=i; j<(67914); j+=21) // [67914 = 21 (no of classes) x 3234 (total boxes output by model)] - { - if (BBout_class[j] > 0.4) - { - max_element = BBout_class[j]; - row_index = j/21; - - float x1 = BBout_boxcoords[row_index * 4 + 0]; - float y1 = BBout_boxcoords[row_index * 4 + 1]; - float x2 = BBout_boxcoords[row_index * 4 + 2]; - float y2 = BBout_boxcoords[row_index * 4 + 3]; - - Boxlist.push_back(SSDMobileNetV2BoxCornerEncoding(x1, y1, x2, y2,max_element,classname)); - } - } - - } - - //LOGI("Boxlist size:: %d",Boxlist.size()); - std::vector reslist = NonMaxSuppression(Boxlist,0.20); - //LOGI("reslist ssize %d", reslist.size()); - - numberofobj = reslist.size(); - - //LOGI("numberofobj detected = %d", numberofobj); - - float ratio_2 = orig_width; - float ratio_1 = orig_height; - - //LOGI("ratio1 %f :: ratio_2 %f",ratio_1,ratio_2); - - for(int k=0;k singleboxcoords{top, bottom, left, right, milli_time}; - BB_coords.push_back(singleboxcoords); - BB_names.push_back(reslist[k].objlabel); - } - -} - -void SSDMobileNetV2_Model::msg() -{ - LOGI("SSDMobileNetV2_Model Class msg model_name = %d", model_name); -} \ No newline at end of file diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.h deleted file mode 100644 index 379f2a70..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.h +++ /dev/null @@ -1,133 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by gsanjeev on 9/11/2023. -// - -#ifndef APP_SSDMOBILENETV2_MODEL_H -#define APP_SSDMOBILENETV2_MODEL_H - -#include "Model.h" - -#include - -class SSDMobileNetV2BoxCornerEncoding { - -public: - float x1; - float y1; - float x2; - float y2; - float score; - std::string objlabel; - - SSDMobileNetV2BoxCornerEncoding(float a, float b, float c, float d,float sc, std::string name="person") - { - x1 = a; - y1 = b; - x2 = c; - y2 = d; - score = sc; - objlabel = name; - } -}; - - - -class SSDMobileNetV2_Model: public Model{ - - std::map classnamemapping = - { - {0, "BACKGROUND"},{ 1, "aeroplane"},{ 2, "bicycle"},{ 3, "bird"},{ 4, "boat"},{ 5, "bottle"},{ - 6, "bus"},{ 7, "car"},{ 8, "cat"},{ 9, "chair"},{ 10, "cow"},{ 11, "diningtable"},{ 12, "dog"},{ - 13, "horse"},{ 14, "motorbike"},{ 15, "person"},{ 16, "pottedplant"},{ 17, "sheep"},{ 18, "sofa"},{ 19, "train"},{ - 20, "tvmonitor"} - }; - -public: - - SSDMobileNetV2_Model() - { - model_name=SSDMobilenetV2; - } - - void preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(int orig_width, int orig_height, int &numberofobj, std::vector> &BB_coords, std::vector &BB_names, std::vector &BBout_boxcoords, std::vector &BBout_class, float milli_time); - void msg(); - - inline float ComputeIntersectionOverUnion(const SSDMobileNetV2BoxCornerEncoding &box_i,const SSDMobileNetV2BoxCornerEncoding &box_j) - { - const float box_i_y_min = std::min(box_i.y1, box_i.y2); - const float box_i_y_max = std::max(box_i.y1, box_i.y2); - const float box_i_x_min = std::min(box_i.x1, box_i.x2); - const float box_i_x_max = std::max(box_i.x1, box_i.x2); - const float box_j_y_min = std::min(box_j.y1, box_j.y2); - const float box_j_y_max = std::max(box_j.y1, box_j.y2); - const float box_j_x_min = std::min(box_j.x1, box_j.x2); - const float box_j_x_max = std::max(box_j.x1, box_j.x2); - - const float area_i = - (box_i_y_max - box_i_y_min) * (box_i_x_max - box_i_x_min); - const float area_j = - (box_j_y_max - box_j_y_min) * (box_j_x_max - box_j_x_min); - if (area_i <= 0 || area_j <= 0) return 0.0; - const float intersection_ymax = std::min(box_i_y_max, box_j_y_max); - const float intersection_xmax = std::min(box_i_x_max, box_j_x_max); - const float intersection_ymin = std::max(box_i_y_min, box_j_y_min); - const float intersection_xmin = std::max(box_i_x_min, box_j_x_min); - const float intersection_area = - std::max(intersection_ymax - intersection_ymin, 0.0) * - std::max(intersection_xmax - intersection_xmin, 0.0); - return intersection_area / (area_i + area_j - intersection_area); - } - - std::vector NonMaxSuppression(std::vector boxes, - const float iou_threshold) - { - - if (boxes.size()==0) { - return boxes; - } - - std::sort(boxes.begin(), boxes.end(), [] (const SSDMobileNetV2BoxCornerEncoding& left, const SSDMobileNetV2BoxCornerEncoding& right) { - if (left.score > right.score) { - return true; - } else { - return false; - } - }); - - - std::vector flag(boxes.size(), false); - for (unsigned int i = 0; i < boxes.size(); i++) { - if (flag[i]) { - continue; - } - - for (unsigned int j = i + 1; j < boxes.size(); j++) { - if (ComputeIntersectionOverUnion(boxes[i],boxes[j]) > iou_threshold) { - flag[j] = true; - } - } - } - - std::vector ret; - for (unsigned int i = 0; i < boxes.size(); i++) { - if (!flag[i]) - ret.push_back(boxes[i]); - } - - return ret; - } - -}; - - -#endif //APP_SSDMOBILENETV2_MODEL_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLONAS_Model.cpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLONAS_Model.cpp deleted file mode 100644 index 7fa9b630..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLONAS_Model.cpp +++ /dev/null @@ -1,90 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by gsanjeev on 8/30/2023. -// - -#include "YOLONAS_Model.h" - -void YOLONAS_Model::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("YOLONAS_Model preprocess"); - cv::Mat img320; - //Resize and get the size from model itself (320x320 for YOLONAS) - cv::resize(img,img320,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR); - - float inputScale = 0.00392156862745f; //normalization value, this is 1/255 - - float * accumulator = reinterpret_cast (&dest_buffer[0]); - - //opencv read in BGRA by default - cvtColor(img320, img320, CV_BGRA2BGR); - //LOGI("num of channels: %d",img320.channels()); - int lim = img320.rows*img320.cols*3; - for(int idx = 0; idx> &BB_coords, std::vector &BB_names, std::vector &BBout_boxcoords, std::vector &BBout_class, float milli_time) { - LOGI("YOLONAS_Model postprocess"); - std::vector Boxlist; - std::vector Classlist; - - //Post Processing - for(int i =0;i<(2100);i++) //TODO change value of 2100 to soft value - { - int start = i*80; - int end = (i+1)*80; - - auto it = max_element (BBout_class.begin()+start, BBout_class.begin()+end); - int index = distance(BBout_class.begin()+start, it); - - std::string classname = classnamemapping[index]; - if(*it>=0.5 ) - { - int x1 = BBout_boxcoords[i * 4 + 0]; - int y1 = BBout_boxcoords[i * 4 + 1]; - int x2 = BBout_boxcoords[i * 4 + 2]; - int y2 = BBout_boxcoords[i * 4 + 3]; - Boxlist.push_back(BoxCornerEncoding(x1, y1, x2, y2,*it,classname)); - } - } - - //LOGI("Boxlist size:: %d",Boxlist.size()); - std::vector reslist = NonMaxSuppression(Boxlist,0.20); - //LOGI("reslist ssize %d", reslist.size()); - - numberofobj = reslist.size(); - float ratio_2 = orig_width/320.0f; - float ratio_1 = orig_height/320.0f; - //LOGI("ratio1 %f :: ratio_2 %f",ratio_1,ratio_2); - - for(int k=0;k singleboxcoords{top, bottom, left, right, milli_time}; - BB_coords.push_back(singleboxcoords); - BB_names.push_back(reslist[k].objlabel); - } -} - -void YOLONAS_Model::msg() -{ - LOGI("YOLONAS_Model Class msg model_name = %d",model_name); -} \ No newline at end of file diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLONAS_Model.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLONAS_Model.h deleted file mode 100644 index 853af371..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLONAS_Model.h +++ /dev/null @@ -1,142 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by gsanjeev on 8/30/2023. -// - -#ifndef APP_YOLONAS_MODEL_H -#define APP_YOLONAS_MODEL_H - -#include "Model.h" - -#include - -class BoxCornerEncoding { - -public: - int x1; - int y1; - int x2; - int y2; - float score; - std::string objlabel; - - BoxCornerEncoding(int a, int b, int c, int d,int sc, std::string name="person") - { - x1 = a; - y1 = b; - x2 = c; - y2 = d; - score = sc; - objlabel = name; - } -}; - - -class YOLONAS_Model: public Model{ - - - std::map classnamemapping = - { - {0, "person"},{ 1, "bicycle"},{ 2, "car"},{ 3, "motorcycle"},{ 4, "airplane"},{ 5, "bus"},{ - 6, "train"},{ 7, "truck"},{ 8, "boat"},{ 9, "traffic"},{ 10, "fire"},{ 11, "stop"},{ 12, "parking"},{ - 13, "bench"},{ 14, "bird"},{ 15, "cat"},{ 16, "dog"},{ 17, "horse"},{ 18, "sheep"},{ 19, "cow"},{ - 20, "elephant"},{ 21, "bear"},{ 22, "zebra"},{ 23, "giraffe"},{ 24, "backpack"},{ 25, "umbrella"},{ - 26, "handbag"},{ 27, "tie"},{ 28, "suitcase"},{ 29, "frisbee"},{ 30, "skis"},{ 31, "snowboard"},{ - 32, "sports"},{ 33, "kite"},{ 34, "baseball"},{ 35, "baseball"},{ 36, "skateboard"},{ 37, "surfboard"},{ - 38, "tennis"},{ 39, "bottle"},{ 40, "wine"},{ 41, "cup"},{ 42, "fork"},{ 43, "knife"},{ 44, "spoon"},{ - 45, "bowl"},{ 46, "banana"},{ 47, "apple"},{ 48, "sandwich"},{ 49, "orange"},{ 50, "broccoli"},{ - 51, "carrot"},{ 52, "hot"},{ 53, "pizza"},{ 54, "donut"},{ 55, "cake"},{ 56, "chair"},{ 57, "couch"},{ - 58, "potted"},{ 59, "bed"},{ 60, "dining"},{ 61, "toilet"},{ 62, "tv"},{ 63, "laptop"},{ 64, "mouse"},{ - 65, "remote"},{ 66, "keyboard"},{ 67, "cell"},{ 68, "microwave"},{ 69, "oven"},{ 70, "toaster"},{ - 71, "sink"},{ 72, "refrigerator"},{ 73, "book"},{ 74, "clock"},{ 75, "vase"},{ 76, "scissors"},{ - 77, "teddy"},{ 78, "hair"},{ 79, "toothbrush"} - }; - -public: - - YOLONAS_Model() - { - model_name=YOLONAS; - } - - void preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(int orig_width, int orig_height, int &numberofobj, std::vector> &BB_coords, std::vector &BB_names, std::vector &BBout_boxcoords, std::vector &BBout_class, float milli_time); - void msg(); - - inline float ComputeIntersectionOverUnion(const BoxCornerEncoding &box_i,const BoxCornerEncoding &box_j) - { - const float box_i_y_min = std::min(box_i.y1, box_i.y2); - const float box_i_y_max = std::max(box_i.y1, box_i.y2); - const float box_i_x_min = std::min(box_i.x1, box_i.x2); - const float box_i_x_max = std::max(box_i.x1, box_i.x2); - const float box_j_y_min = std::min(box_j.y1, box_j.y2); - const float box_j_y_max = std::max(box_j.y1, box_j.y2); - const float box_j_x_min = std::min(box_j.x1, box_j.x2); - const float box_j_x_max = std::max(box_j.x1, box_j.x2); - - const float area_i = - (box_i_y_max - box_i_y_min) * (box_i_x_max - box_i_x_min); - const float area_j = - (box_j_y_max - box_j_y_min) * (box_j_x_max - box_j_x_min); - if (area_i <= 0 || area_j <= 0) return 0.0; - const float intersection_ymax = std::min(box_i_y_max, box_j_y_max); - const float intersection_xmax = std::min(box_i_x_max, box_j_x_max); - const float intersection_ymin = std::max(box_i_y_min, box_j_y_min); - const float intersection_xmin = std::max(box_i_x_min, box_j_x_min); - const float intersection_area = - std::max(intersection_ymax - intersection_ymin, 0.0) * - std::max(intersection_xmax - intersection_xmin, 0.0); - return intersection_area / (area_i + area_j - intersection_area); - } - - std::vector NonMaxSuppression(std::vector boxes, - const float iou_threshold) - { - - if (boxes.size()==0) { - return boxes; - } - - std::sort(boxes.begin(), boxes.end(), [] (const BoxCornerEncoding& left, const BoxCornerEncoding& right) { - if (left.score > right.score) { - return true; - } else { - return false; - } - }); - - - std::vector flag(boxes.size(), false); - for (unsigned int i = 0; i < boxes.size(); i++) { - if (flag[i]) { - continue; - } - - for (unsigned int j = i + 1; j < boxes.size(); j++) { - if (ComputeIntersectionOverUnion(boxes[i],boxes[j]) > iou_threshold) { - flag[j] = true; - } - } - } - - std::vector ret; - for (unsigned int i = 0; i < boxes.size(); i++) { - if (!flag[i]) - ret.push_back(boxes[i]); - } - - return ret; - } - -}; - - -#endif //APP_YOLONAS_MODEL_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLO_X_Model.cpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLO_X_Model.cpp deleted file mode 100644 index 315a0919..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLO_X_Model.cpp +++ /dev/null @@ -1,145 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by gsanjeev on 9/20/2023. -// - -#include "YOLO_X_Model.h" - - -void YOLO_X_Model::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) -{ - LOGI("YOLO_X_Model preprocess"); - cv::Mat img640; - //Resize and get the size from model itself (640x640 for YoloX) - cv::resize(img,img640,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR); - - //float inputScale = 0.00392156862745f; //normalization value, this is 1/255 - float inputScale = 0.998046875000f; //normalization value, this is 1/255 - - float * accumulator = reinterpret_cast (&dest_buffer[0]); - - //opencv read in BGRA by default - cvtColor(img640, img640, CV_BGRA2BGR); - //LOGI("num of channels: %d",img640.channels()); - int lim = img640.rows*img640.cols*3; - for(int idx = 0; idx> &BB_coords, std::vector &BB_names, std::vector &BBout_boxcoords, std::vector &BBout_class, float milli_time) { - - //YoloX Model has only single tensor output (BBout_class) that includes classes, boxes as well as score - - LOGI("YOLO_X_Model postprocess"); - std::vector Boxlist; - std::vector Classlist; - - std::vector img_size = {640, 640}; - bool p6=false; - - struct Point { - float32_t x,y; - }; - - std::vector grids; - std::vector expanded_strides; - std::vector strides = !p6 ? std::vector{8, 16, 32} : std::vector{8, 16, 32, 64}; - - std::vector hsizes, wsizes; - for (const int stride : strides) { - hsizes.push_back(img_size[0] / stride); - wsizes.push_back(img_size[1] / stride); - } - - for (size_t i=0; i(x), static_cast(y)}; - grids.push_back(point); - expanded_strides.push_back(stride); - } - } - } - - for (size_t i=0; i<8400; i++) { - - BBout_class[i*85+0] = (BBout_class[i*85+0] + grids[i].x) * expanded_strides[i]; - BBout_class[i*85+1] = (BBout_class[i*85+1] + grids[i].y) * expanded_strides[i]; - BBout_class[i*85+2] = std::exp(BBout_class[i*85+2]) * expanded_strides[i]; - BBout_class[i*85+3] = std::exp(BBout_class[i*85+3]) * expanded_strides[i]; - - } - - for(int i =0;i<(8400);i++) // Total tensor output rows - { - int start = i*85+5; // each row contains classes from indexes 5 to 85 - int end = (i+1)*85; - - float score = BBout_class[start-1]; // each row contains score at index 4 - - if(score>=0.2 ) - { - - auto it = max_element (BBout_class.begin()+start, BBout_class.begin()+end); - int index = distance(BBout_class.begin()+start, it); - - std::string classname = classnamemapping[index]; - - // each row contains box co-ordinates from index 0 to 3 - float x1 = BBout_class[start-5]; - float y1 = BBout_class[start-4]; - float x2 = BBout_class[start-3]; - float y2 = BBout_class[start-2]; - Boxlist.push_back(YOLO_X_BoxCornerEncoding(x1, y1, x2, y2,score,classname)); - } - } - - //LOGI("Boxlist size:: %d",Boxlist.size()); - std::vector reslist_temp = Yolo_X_Rescale_boxes(Boxlist,orig_width,orig_height); - - std::vector reslist = NonMaxSuppression(reslist_temp,0.20); - //LOGI("reslist ssize %d", reslist.size()); - - - numberofobj = reslist.size(); - float ratio_2 = orig_width/640.0f; - float ratio_1 = orig_height/640.0f; - //LOGI("ratio1 %f :: ratio_2 %f",ratio_1,ratio_2); - - for(int k=0;k singleboxcoords{top, bottom, left, right, milli_time}; - BB_coords.push_back(singleboxcoords); - BB_names.push_back(reslist[k].objlabel); - } -} - -void YOLO_X_Model::msg() -{ - LOGI("YOLO_X_Model Class msg model_name = %d", model_name); -} \ No newline at end of file diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLO_X_Model.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLO_X_Model.h deleted file mode 100644 index 63acc3fc..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/YOLO_X_Model.h +++ /dev/null @@ -1,165 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by gsanjeev on 9/20/2023. -// - -#ifndef APP_YOLO_X_MODEL_H -#define APP_YOLO_X_MODEL_H - -#include "Model.h" - -#include - -class YOLO_X_BoxCornerEncoding { - -public: - float x1; - float y1; - float x2; - float y2; - float score; - std::string objlabel; - - YOLO_X_BoxCornerEncoding(float a, float b, float c, float d,float sc, std::string name="person") - { - x1 = a; - y1 = b; - x2 = c; - y2 = d; - score = sc; - objlabel = name; - } -}; - - -class YOLO_X_Model: public Model{ - - - std::map classnamemapping = - { - {0, "person"},{ 1, "bicycle"},{ 2, "car"},{ 3, "motorcycle"},{ 4, "airplane"},{ 5, "bus"},{ - 6, "train"},{ 7, "truck"},{ 8, "boat"},{ 9, "traffic"},{ 10, "fire"},{ 11, "stop"},{ 12, "parking"},{ - 13, "bench"},{ 14, "bird"},{ 15, "cat"},{ 16, "dog"},{ 17, "horse"},{ 18, "sheep"},{ 19, "cow"},{ - 20, "elephant"},{ 21, "bear"},{ 22, "zebra"},{ 23, "giraffe"},{ 24, "backpack"},{ 25, "umbrella"},{ - 26, "handbag"},{ 27, "tie"},{ 28, "suitcase"},{ 29, "frisbee"},{ 30, "skis"},{ 31, "snowboard"},{ - 32, "sports"},{ 33, "kite"},{ 34, "baseball"},{ 35, "baseball"},{ 36, "skateboard"},{ 37, "surfboard"},{ - 38, "tennis"},{ 39, "bottle"},{ 40, "wine"},{ 41, "cup"},{ 42, "fork"},{ 43, "knife"},{ 44, "spoon"},{ - 45, "bowl"},{ 46, "banana"},{ 47, "apple"},{ 48, "sandwich"},{ 49, "orange"},{ 50, "broccoli"},{ - 51, "carrot"},{ 52, "hot"},{ 53, "pizza"},{ 54, "donut"},{ 55, "cake"},{ 56, "chair"},{ 57, "couch"},{ - 58, "potted"},{ 59, "bed"},{ 60, "dining"},{ 61, "toilet"},{ 62, "tv"},{ 63, "laptop"},{ 64, "mouse"},{ - 65, "remote"},{ 66, "keyboard"},{ 67, "cell"},{ 68, "microwave"},{ 69, "oven"},{ 70, "toaster"},{ - 71, "sink"},{ 72, "refrigerator"},{ 73, "book"},{ 74, "clock"},{ 75, "vase"},{ 76, "scissors"},{ - 77, "teddy"},{ 78, "hair"},{ 79, "toothbrush"} - }; - -public: - - YOLO_X_Model() - { - model_name=YoloX; - } - - void preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims); - void postprocess(int orig_width, int orig_height, int &numberofobj, std::vector> &BB_coords, std::vector &BB_names, std::vector &BBout_boxcoords, std::vector &BBout_class, float milli_time); - void msg(); - - inline float ComputeIntersectionOverUnion(const YOLO_X_BoxCornerEncoding &box_i,const YOLO_X_BoxCornerEncoding &box_j) - { - const float box_i_y_min = std::min(box_i.y1, box_i.y2); - const float box_i_y_max = std::max(box_i.y1, box_i.y2); - const float box_i_x_min = std::min(box_i.x1, box_i.x2); - const float box_i_x_max = std::max(box_i.x1, box_i.x2); - const float box_j_y_min = std::min(box_j.y1, box_j.y2); - const float box_j_y_max = std::max(box_j.y1, box_j.y2); - const float box_j_x_min = std::min(box_j.x1, box_j.x2); - const float box_j_x_max = std::max(box_j.x1, box_j.x2); - - const float area_i = - (box_i_y_max - box_i_y_min) * (box_i_x_max - box_i_x_min); - const float area_j = - (box_j_y_max - box_j_y_min) * (box_j_x_max - box_j_x_min); - if (area_i <= 0 || area_j <= 0) return 0.0; - const float intersection_ymax = std::min(box_i_y_max, box_j_y_max); - const float intersection_xmax = std::min(box_i_x_max, box_j_x_max); - const float intersection_ymin = std::max(box_i_y_min, box_j_y_min); - const float intersection_xmin = std::max(box_i_x_min, box_j_x_min); - const float intersection_area = - std::max(intersection_ymax - intersection_ymin, 0.0) * - std::max(intersection_xmax - intersection_xmin, 0.0); - return intersection_area / (area_i + area_j - intersection_area); - } - - std::vector NonMaxSuppression(std::vector boxes, - const float iou_threshold) - { - - if (boxes.size()==0) { - return boxes; - } - - std::sort(boxes.begin(), boxes.end(), [] (const YOLO_X_BoxCornerEncoding& left, const YOLO_X_BoxCornerEncoding& right) { - if (left.score > right.score) { - return true; - } else { - return false; - } - }); - - - std::vector flag(boxes.size(), false); - for (unsigned int i = 0; i < boxes.size(); i++) { - if (flag[i]) { - continue; - } - - for (unsigned int j = i + 1; j < boxes.size(); j++) { - if (ComputeIntersectionOverUnion(boxes[i],boxes[j]) > iou_threshold) { - flag[j] = true; - } - } - } - - std::vector ret; - for (unsigned int i = 0; i < boxes.size(); i++) { - if (!flag[i]) - ret.push_back(boxes[i]); - } - - return ret; - } - - std::vector Yolo_X_Rescale_boxes(std::vector boxes, - int img_width, int img_height){ - - std::vector ret; - for (unsigned int i = 0; i < boxes.size(); i++) { - - float x1=boxes[i].x1 - 0.5f * boxes[i].x2; - float y1=boxes[i].y1 - 0.5f * boxes[i].y2; - float x2=boxes[i].x1 + 0.5f * boxes[i].x2; - float y2=boxes[i].y1 + 0.5f * boxes[i].y2; - - boxes[i].x1 =x1; - boxes[i].y1 =y1; - boxes[i].x2 =x2; - boxes[i].y2 =y2; - - ret.push_back(boxes[i]); - } - - return ret; - - } - -}; - - -#endif //APP_YOLO_X_MODEL_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/CheckRuntime.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/CheckRuntime.hpp deleted file mode 100644 index 07538cd0..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/CheckRuntime.hpp +++ /dev/null @@ -1,17 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef CHECKRUNTIME_H -#define CHECKRUNTIME_H - -#include "SNPE/SNPEFactory.hpp" - -zdl::DlSystem::Runtime_t checkRuntime(zdl::DlSystem::Runtime_t runtime); -bool checkGLCLInteropSupport(); - -#endif diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/CreateUserBuffer.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/CreateUserBuffer.hpp deleted file mode 100644 index 68f25407..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/CreateUserBuffer.hpp +++ /dev/null @@ -1,43 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2020 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include"inference.h" -#include -#include -#include -#include "SNPE/SNPE.hpp" -#include "DlSystem/IUserBuffer.hpp" -#include "DlSystem/UserBufferMap.hpp" - -typedef unsigned int GLuint; - -// Helper function to fill a single entry of the UserBufferMap with the given user-backed buffer -void createUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const char * name, - const bool isTfNBuffer, - int bitWidth); - - -// Create a UserBufferMap of the SNPE network outputs -void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const bool isTfNBuffer, - int bitWidth); - -// Create a UserBufferMap of the SNPE network inputs -void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const bool isTfNBuffer, - int bitWidth); diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/LoadContainer.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/LoadContainer.hpp deleted file mode 100644 index 85bf622a..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/LoadContainer.hpp +++ /dev/null @@ -1,19 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef LOADCONTAINER_H -#define LOADCONTAINER_H - -#include - -#include "DlContainer/IDlContainer.hpp" - -std::unique_ptr loadContainerFromFile(std::string containerPath); -std::unique_ptr loadContainerFromBuffer(const uint8_t * buffer, const size_t size); - -#endif diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/LoadInputTensor.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/LoadInputTensor.hpp deleted file mode 100644 index 7aec3b24..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/LoadInputTensor.hpp +++ /dev/null @@ -1,27 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef LOADINPUTTENSOR_H -#define LOADINPUTTENSOR_H - -#include -#include -#include - -#include "SNPE/SNPE.hpp" -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/TensorMap.hpp" -#include "../../Model.h" - - -bool loadInputUserBuffer(std::unordered_map>& applicationBuffers, - std::unique_ptr& snpe, - cv::Mat &model_input, - zdl::DlSystem::UserBufferMap& inputMap, - int bitWidth, Model *modelobj); -#endif diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/SetBuilderOptions.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/SetBuilderOptions.hpp deleted file mode 100644 index e3db3df0..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/SetBuilderOptions.hpp +++ /dev/null @@ -1,26 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SETBUILDEROPTIONS_H -#define SETBUILDEROPTIONS_H - -#include "DlSystem/RuntimeList.hpp" -#include "SNPE/SNPE.hpp" -#include "DlSystem/DlEnums.hpp" -//#include "DlSystem/UDLFunc.hpp" -#include "DlContainer/IDlContainer.hpp" -#include "DlSystem/PlatformConfig.hpp" - -std::unique_ptr setBuilderOptions(std::unique_ptr & container, - zdl::DlSystem::Runtime_t runtime, - zdl::DlSystem::RuntimeList runtimeList, - bool useUserSuppliedBuffers, - bool useCaching, - ModelName modelName); - -#endif //SETBUILDEROPTIONS_H \ No newline at end of file diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/Util.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/Util.hpp deleted file mode 100644 index 346e7ac0..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/Util.hpp +++ /dev/null @@ -1,41 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2017-2019 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef UTIL_H -#define UTIL_H - -#include -#include -#include -#include - -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/TensorShape.hpp" - -template Container& split(Container& result, const typename Container::value_type & s, typename Container::value_type::value_type delimiter ) -{ - result.clear(); - std::istringstream ss( s ); - while (!ss.eof()) - { - typename Container::value_type field; - getline( ss, field, delimiter ); - if (field.empty()) continue; - result.push_back( field ); - } - return result; -} - - -cv::Mat get_affine_transform(int dst_w, int dst_h, int inv, double center[], double scale[]); -//void getcenterscale(int image_width, int image_height, double center[2], double scale[2]); -void getcenterscale(int image_width, int image_height, double center[2], double scale[2],float bottom, float left, float top, float right); -float** getCoords(std::vector buff, double center[], double scale[]); - -#endif - diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/inference.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/inference.h deleted file mode 100644 index a74eaea3..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/hpp/inference.h +++ /dev/null @@ -1,74 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// -// Created by shubpate on 12/11/2021. -// - -#ifndef NATIVEINFERENCE_INFERENCE_H -#define NATIVEINFERENCE_INFERENCE_H - -#include "zdl/DlSystem/TensorShape.hpp" -#include "zdl/DlSystem/TensorMap.hpp" -#include "zdl/DlSystem/TensorShapeMap.hpp" -#include "zdl/DlSystem/IUserBufferFactory.hpp" -#include "zdl/DlSystem/IUserBuffer.hpp" -#include "zdl/DlSystem/UserBufferMap.hpp" -#include "zdl/DlSystem/IBufferAttributes.hpp" - -#include "zdl/DlSystem/StringList.hpp" - -#include "zdl/SNPE/SNPE.hpp" -#include "zdl/SNPE/SNPEFactory.hpp" -#include "zdl/DlSystem/DlVersion.hpp" -#include "zdl/DlSystem/DlEnums.hpp" -#include "zdl/DlSystem/String.hpp" -#include "zdl/DlContainer/IDlContainer.hpp" -#include "zdl/SNPE/SNPEBuilder.hpp" - -#include "zdl/DlSystem/ITensor.hpp" -#include "zdl/DlSystem/ITensorFactory.hpp" - -#include -#include "android/log.h" -#include - -#include "../../Model.h" - -#define LOG_TAG "SNPE_INF" -#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__) -#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__) - -/*class BoxCornerEncoding { - -public: - int x1; - int y1; - int x2; - int y2; - float score; - std::string objlabel; - - BoxCornerEncoding(int a, int b, int c, int d,int sc, std::string name="person") - { - x1 = a; - y1 = b; - x2 = c; - y2 = d; - score = sc; - objlabel = name; - } -};*/ - -std::string build_network_BB(const uint8_t * dlc_buffer, const size_t dlc_size, const char runtime_arg, ModelName modelName); -bool SetAdspLibraryPath(std::string nativeLibPath); - -bool executeDLC(cv::Mat &img, int orig_width, int orig_height, int &numberofhuman, std::vector> &BB_coords, std::vector &BB_names, Model *modelobj); - -#endif //NATIVEINFERENCE_INFERENCE_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/IDiagLog.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/IDiagLog.h deleted file mode 100644 index 9a084071..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/IDiagLog.h +++ /dev/null @@ -1,102 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _DIAGLOG_IDIAGLOG_H_ -#define _DIAGLOG_IDIAGLOG_H_ - -#include "DiagLog/Options.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE IDiagLog handle - */ -typedef void* Snpe_IDiagLog_Handle_t; - -/** - * @brief . - * - * Sets the options after initialization occurs. - * - * @param[in] handle : Handle to access IDiagLog - * @param[in] loggingOptions : The options to set up diagnostic logging. - * - * @return Error code if the options could not be set. Ensure logging is not started/ - * SNPE_SUCCESS otherwise - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_SetOptions(Snpe_IDiagLog_Handle_t handle, Snpe_Options_Handle_t loggingOptionsHandle); - -/** - * @brief . - * - * Gets the curent options for the diag logger. - * - * @param[in] handle : Handle to access IDiagLog - * @return Handle to access DiagLog options. - */ -SNPE_API -Snpe_Options_Handle_t Snpe_IDiagLog_GetOptions(Snpe_IDiagLog_Handle_t handle); - -/** - * @brief . - * - * @param[in] handle : Handle to access IDiagLog - * @param[in] mask : Allows for setting the log mask once diag logging has started - * @return SNPE_SUCCESS if the level was set successfully. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_SetDiagLogMask(Snpe_IDiagLog_Handle_t handle, const char* mask) ; - -/** - * @brief . - * - * Enables logging. - * - * Logging should be started prior to the instantiation of other SNPE_APIs - * to ensure all events are captured. - * - * @param[in] handle : Handle to access IDiagLog - * @return SNPE_SUCCESS if diagnostic logging started successfully. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_Start(Snpe_IDiagLog_Handle_t handle); - -/** - * @brief Disables logging. - * - * @param[in] handle : Handle to access IDiagLog - * - * @return SNPE_SUCCESS if logging stopped successfully. Error code otherwise. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IDiagLog_Stop(Snpe_IDiagLog_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DIAGLOG_IDIAGLOG_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/IDiagLog.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/IDiagLog.hpp deleted file mode 100644 index 64b81eba..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/IDiagLog.hpp +++ /dev/null @@ -1,133 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include -#include - -#include "Options.hpp" -#include "DlSystem/String.hpp" - -#include "DiagLog/IDiagLog.h" - - -namespace DiagLog{ -class IDiagLog : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static Snpe_ErrorCode_t InvalidDeleteCall(Snpe_IDiagLog_Handle_t ){ - return SNPE_ERRORCODE_CAPI_DELETE_FAILURE; - } - - static constexpr DeleteFunctionType DeleteFunction{InvalidDeleteCall}; - - class OptionsInternal : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_Options_Delete}; - public: - OptionsInternal() - : BaseType(Snpe_Options_Create()) - { } - - explicit OptionsInternal(const Options& options) - : BaseType(Snpe_Options_Create()) - { - setDiagLogMask(options.DiagLogMask.c_str()); - setLogFileDirectory(options.LogFileDirectory.c_str()); - setLogFileName(options.LogFileName.c_str()); - setLogFileRotateCount(options.LogFileRotateCount); - setLogFileReplace(options.LogFileReplace); - } - - const char* getDiagLogMask() const{ - return Snpe_Options_GetDiagLogMask(handle()); - } - void setDiagLogMask(const char* diagLogMask){ - Snpe_Options_SetDiagLogMask(handle(), diagLogMask); - } - - const char* getLogFileDirectory() const{ - return Snpe_Options_GetLogFileDirectory(handle()); - } - void setLogFileDirectory(const char* logFileDirectory){ - Snpe_Options_SetLogFileDirectory(handle(), logFileDirectory); - } - - const char* getLogFileName() const{ - return Snpe_Options_GetLogFileName(handle()); - } - void setLogFileName(const char* logFileName){ - Snpe_Options_SetLogFileName(handle(), logFileName); - } - - uint32_t getLogFileRotateCount() const{ - return Snpe_Options_GetLogFileRotateCount(handle()); - } - void setLogFileRotateCount(uint32_t logFileRotateCount){ - Snpe_Options_SetLogFileRotateCount(handle(), logFileRotateCount); - } - - bool getLogFileReplace() const{ - return Snpe_Options_GetLogFileReplace(handle()); - } - void setLogFileReplace(bool logFileReplace){ - Snpe_Options_SetLogFileReplace(handle(), logFileReplace); - } - - explicit operator Options() const{ - return { - getDiagLogMask(), - getLogFileDirectory(), - getLogFileName(), - getLogFileRotateCount(), - getLogFileReplace() - }; - } - - }; - - - -public: - bool setOptions(const Options& loggingOptions){ - OptionsInternal optionsInternal(loggingOptions); - return SNPE_SUCCESS == Snpe_IDiagLog_SetOptions(handle(), getHandle(optionsInternal)); - } - Options getOptions() const{ - OptionsInternal optionsInternal(moveHandle(Snpe_IDiagLog_GetOptions(handle()))); - return Options(optionsInternal); - } - - bool setDiagLogMask(const std::string& mask){ - return SNPE_SUCCESS == Snpe_IDiagLog_SetDiagLogMask(handle(), mask.c_str()); - } - bool setDiagLogMask(const DlSystem::String& mask){ - return setDiagLogMask(static_cast(mask)); - } - - bool start(void){ - return SNPE_SUCCESS == Snpe_IDiagLog_Start(handle()); - } - bool stop(void){ - return SNPE_SUCCESS == Snpe_IDiagLog_Stop(handle()); - } - -}; - -} // ns DiagLog - -ALIAS_IN_ZDL_NAMESPACE(DiagLog, IDiagLog) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/Options.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/Options.h deleted file mode 100644 index ad641cca..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/Options.h +++ /dev/null @@ -1,164 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _DIAGLOG_OPTIONS_H_ -#define _DIAGLOG_OPTIONS_H_ - -#include - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE Options handle - */ -typedef void* Snpe_Options_Handle_t; - - -SNPE_API -Snpe_Options_Handle_t Snpe_Options_Create(); - -/** - * Destroys/frees a Options - * - * @param[in] handle : Handle to access Options object - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_Options_Delete(Snpe_Options_Handle_t handle); - -/** - * Gets DiagLogMask - * diagLogMask: Enables diag logging only on the specified area mask - * - * @param[in] handle : Handle to access Options object - * @return diagLogMask as a const char* - */ -SNPE_API -const char* Snpe_Options_GetDiagLogMask(Snpe_Options_Handle_t handle); - -/** - * Sets DiagLogMask - * diagLogMask: Enables diag logging only on the specified area mask - * - * @param[in] handle : Handle to access Options object - * @param[in] diagLogMask : specific area where logging needs to be enabed - */ -SNPE_API -void Snpe_Options_SetDiagLogMask(Snpe_Options_Handle_t handle, const char* diagLogMask); - -/** - * Gets logFileDirectory - * logFileDirectory: The path to the directory where log files will be written. - * The path may be relative or absolute. Relative paths are interpreted - * - * @param[in] handle : Handle to access Options object - * @return logFileDirectory as a const char* - */ -SNPE_API -const char* Snpe_Options_GetLogFileDirectory(Snpe_Options_Handle_t handle); - -/** - * Sets logFileDirectory - * logFileDirectory: The path to the directory where log files will be written. - * The path may be relative or absolute. Relative paths are interpreted - * - * @param[in] handle : Handle to access Options object - * @param[in] logFileDirectory : path for saving the log files - */ -SNPE_API -void Snpe_Options_SetLogFileDirectory(Snpe_Options_Handle_t handle, const char* logFileDirectory); - - -/** - * Gets logFileName - * logFileName: The name used for log files. If this value is empty then BaseName will be - * used as the default file name. - * - * @param[in] handle : Handle to access Options object - * @return logFileName as a const char* - */ -SNPE_API -const char* Snpe_Options_GetLogFileName(Snpe_Options_Handle_t handle); - -/** - * Sets logFileName - * logFileName: The name used for log files. If this value is empty then BaseName will be - * used as the default file name. - * - * @param[in] handle : Handle to access Options object - * @param[in] logFileName : name of log file - */ -SNPE_API -void Snpe_Options_SetLogFileName(Snpe_Options_Handle_t handle, const char* logFileName); - -/** - * Gets the maximum number of log files to create. If set to 0 no log rotation - * will be used and the log file name specified will be used each time, overwriting - * any existing log file that may exist. - * - * @param[in] handle : Handle to access options object. - * @return max log files to create - */ -SNPE_API -uint32_t Snpe_Options_GetLogFileRotateCount(Snpe_Options_Handle_t handle); - -/** - * Sets the maximum number of log files to create. If set to 0 no log rotation - * will be used and the log file name specified will be used each time, overwriting - * any existing log file that may exist. - * - * @param[in] handle : Handle to access options object. - * @param[in] logFileRotateCount : max log files to create - */ -SNPE_API -void Snpe_Options_SetLogFileRotateCount(Snpe_Options_Handle_t handle, uint32_t logFileRotateCount); - -/** - * If the log file already exists, control whether it will be replaced - * - * @param[in] handle : Handle to access options object - * @return 1 if log file will be replaced, 0 otherwise - */ -SNPE_API -int Snpe_Options_GetLogFileReplace(Snpe_Options_Handle_t handle); - -/** - * If the log file already exists, control whether it will be replaced - * - * @param[in] handle : Handle to access options object - * @param[in] logFileReplace : 1 if log file to be replaced, 0 otherwise - */ -SNPE_API -void Snpe_Options_SetLogFileReplace(Snpe_Options_Handle_t handle, int logFileReplace); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DIAGLOG_OPTIONS_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/Options.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/Options.hpp deleted file mode 100644 index c9ad48b6..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DiagLog/Options.hpp +++ /dev/null @@ -1,50 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include -#include - -#include "DiagLog/IDiagLog.h" - - -namespace DiagLog { - -class Options -{ -public: - Options( - std::string diagLogMask = "", - std::string logFileDirectory = "diaglogs", - std::string logFileName = "DiagLog", - uint32_t logFileRotateCount = 20, - bool logFileReplace = true - ) - : DiagLogMask(std::move(diagLogMask)), - LogFileDirectory(std::move(logFileDirectory)), - LogFileName(std::move(logFileName)), - LogFileRotateCount(logFileRotateCount), - LogFileReplace(logFileReplace) - { - // Solves the empty string problem with multiple std libs - DiagLogMask.reserve(1); - } - - std::string DiagLogMask; - std::string LogFileDirectory; - std::string LogFileName; - uint32_t LogFileRotateCount; - - bool LogFileReplace; -}; - -} // ns DiagLog - -ALIAS_IN_ZDL_NAMESPACE(DiagLog, Options) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlContainer/DlContainer.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlContainer/DlContainer.h deleted file mode 100644 index 6ce7cd25..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlContainer/DlContainer.h +++ /dev/null @@ -1,185 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_CONTAINER_DLCONTAINER_H -#define DL_CONTAINER_DLCONTAINER_H - -#ifdef __cplusplus -#include // uint8_t -#include // size_t -#else -#include -#include -#endif - -#include "DlSystem/DlError.h" -#include "DlSystem/StringList.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE DlcRecord handle - */ -typedef void* Snpe_DlcRecord_Handle_t; - -/** - * Constructs a DlcRecord and returns a handle to it - * - * @return the handle to the created DlcRecord - */ -SNPE_API -Snpe_DlcRecord_Handle_t Snpe_DlcRecord_Create(); - -/** - * Constructs a DlcRecord with a provided name and returns a handle to it - * - * @param[in] name : the name of the record - * - * @return the handle to the created DlcRecord - */ -SNPE_API -Snpe_DlcRecord_Handle_t Snpe_DlcRecord_CreateName(const char* name); - - -/** - * Destroys/frees a DlcRecord - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_DlcRecord_Delete(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * Gets the size of a DlcRecord in bytes - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return the size of the DlcRecord in bytes - */ -SNPE_API -size_t Snpe_DlcRecord_Size(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * Gets a pointer to the start of the DlcRecord's data - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return uint8_t pointer to the DlcRecord's data - */ -SNPE_API -uint8_t* Snpe_DlcRecord_Data(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * Gets the name of the DlcRecord - * - * @param[in] dlcRecordHandle : Handle to access DlcRecord - * - * @return the record's name - */ -SNPE_API -const char* Snpe_DlcRecord_Name(Snpe_DlcRecord_Handle_t dlcRecordHandle); - -/** - * A typedef to indicate a SNPE DlContainer handle - */ -typedef void* Snpe_DlContainer_Handle_t; - -/** - * Destroys/frees a DlContainer - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_DlContainer_Delete(Snpe_DlContainer_Handle_t dlContainerHandle); - - -/** - * Initializes a container from a container archive file. - * - * @param[in] filename Container archive file path. - * - * @return Status of container open call - */ -SNPE_API -Snpe_DlContainer_Handle_t Snpe_DlContainer_Open(const char* filename); - -/** - * Initializes a container from a byte buffer. - * - * @param[in] buffer Byte buffer holding the contents of an archive - * file. - * - * @param[in] size Size of the byte buffer. - * - * @return A Snpe_DlContainer_Handle_t to access the dlContainer - */ -SNPE_API -Snpe_DlContainer_Handle_t Snpe_DlContainer_OpenBuffer(const uint8_t* buffer, const size_t size); - -/** - * Get the record catalog for a container. - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * - * @return A Snpe_StringListHandle_t that holds the record names of the DlContainer - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_DlContainer_GetCatalog(Snpe_DlContainer_Handle_t dlContainerHandle); - -/** - * Get a record from a container by name. - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * @param[in] recordName : Name of the record to fetch. - * - * @return A Snpe_DlcRecordHandle_t that owns the record read from the DlContainer - */ -SNPE_API -Snpe_DlcRecord_Handle_t Snpe_DlContainer_GetRecord(Snpe_DlContainer_Handle_t dlContainerHandle, const char* recordName); - -/** - * Save the container to an archive on disk. This function will save the - * container if the filename is different from the file that it was opened - * from, or if at least one record was modified since the container was - * opened. - * - * It will truncate any existing file at the target path. - * - * @param[in] dlContainerHandle : Handle to access DlContainer - * @param[in] filename : Container archive file path. - * - * @return indication of success/failure - */ -SNPE_API -Snpe_ErrorCode_t Snpe_DlContainer_Save(Snpe_DlContainer_Handle_t dlContainerHandle, const char* filename); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_CONTAINER_DLCONTAINER_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlContainer/IDlContainer.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlContainer/IDlContainer.hpp deleted file mode 100644 index 482dbd02..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlContainer/IDlContainer.hpp +++ /dev/null @@ -1,146 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include -#include -#include - -#include "Wrapper.hpp" -#include "DlSystem/String.hpp" - -#include "DlContainer/DlContainer.h" -#include "DlSystem/StringList.hpp" - - - -namespace DlContainer { - -struct DlcRecord -{ - std::string name; - std::vector data; - - DlcRecord() - : name{}, - data{} - { } - - DlcRecord( DlcRecord&& other ) noexcept - : name(std::move(other.name)), - data(std::move(other.data)) - { } - DlcRecord(const std::string& new_name) - : name(new_name), - data() - { - if(name.empty()) { - name.reserve(1); - } - } - DlcRecord(const DlcRecord&) = delete; -}; - - -class IDlContainer : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_DlContainer_Delete}; - - template - void getCatalog_(std::set& catalog) const{ - DlSystem::StringList sl(moveHandle(Snpe_DlContainer_GetCatalog(handle()))); - for(auto s : sl){ - catalog.emplace(s); - } - } - - - class DlcRecordInternal : public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_DlcRecord_Delete}; - public: - DlcRecordInternal() - : BaseType(Snpe_DlcRecord_Create()) - { } - explicit DlcRecordInternal(const std::string& name) - : BaseType(Snpe_DlcRecord_CreateName(name.c_str())) - { } - - uint8_t* getData(){ - return Snpe_DlcRecord_Data(handle()); - } - size_t size() const{ - return Snpe_DlcRecord_Size(handle()); - } - const char* getName(){ - return Snpe_DlcRecord_Name(handle()); - } - }; - - -public: - static std::unique_ptr open(const std::string& filename) noexcept{ - return makeUnique(Snpe_DlContainer_Open(filename.c_str())); - } - - static std::unique_ptr open(const uint8_t* buffer, const size_t size) noexcept{ - return makeUnique(Snpe_DlContainer_OpenBuffer(buffer, size)); - - } - static std::unique_ptr open(const std::vector& buffer) noexcept{ - return open(buffer.data(), buffer.size()); - } - static std::unique_ptr open(const DlSystem::String &filename) noexcept{ - return open(static_cast(filename)); - } - - - void getCatalog(std::set& catalog) const{ - return getCatalog_(catalog); - } - void getCatalog(std::set& catalog) const{ - return getCatalog_(catalog); - } - - bool getRecord(const std::string& name, DlcRecord& record) const{ - auto h = Snpe_DlContainer_GetRecord(handle(), name.c_str()); - if(!h) return false; - DlcRecordInternal internal(moveHandle(h)); - auto data = internal.getData(); - - record.name.assign(internal.getName()); - record.data.assign(data, data+internal.size()); - return true; - } - - bool getRecord(const DlSystem::String& name, DlcRecord& record) const{ - return getRecord(static_cast(name), record); - } - - bool save(const std::string& filename){ - return Snpe_DlContainer_Save(handle(), filename.c_str()); - } - - bool save(const DlSystem::String& filename){ - return save(static_cast(filename)); - } -}; - - -} // ns DlContainer - -ALIAS_IN_ZDL_NAMESPACE(DlContainer, DlcRecord) -ALIAS_IN_ZDL_NAMESPACE(DlContainer, IDlContainer) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlEnums.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlEnums.h deleted file mode 100644 index 85a0f4d3..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlEnums.h +++ /dev/null @@ -1,267 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _DL_ENUMS_H_ -#define _DL_ENUMS_H_ - -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * Enumeration of supported target runtimes. - */ -typedef enum -{ - /// Special value indicating the property is unset. - SNPE_RUNTIME_UNSET = -1, - /// Run the processing on Snapdragon CPU. - /// Data: float 32bit - /// Math: float 32bit - SNPE_RUNTIME_CPU_FLOAT32 = 0, - /// Default legacy enum to retain backward compatibility. - /// CPU = CPU_FLOAT32 - SNPE_RUNTIME_CPU = SNPE_RUNTIME_CPU_FLOAT32, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 32bit - SNPE_RUNTIME_GPU_FLOAT32_16_HYBRID = 1, - /// Default legacy enum to retain backward compatibility. - /// GPU = GPU_FLOAT32_16_HYBRID - SNPE_RUNTIME_GPU = SNPE_RUNTIME_GPU_FLOAT32_16_HYBRID, - - /// Run the processing on the Hexagon DSP. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - SNPE_RUNTIME_DSP_FIXED8_TF = 2, - /// Default legacy enum to retain backward compatibility. - /// DSP = DSP_FIXED8_TF - SNPE_RUNTIME_DSP = SNPE_RUNTIME_DSP_FIXED8_TF, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 16bit - SNPE_RUNTIME_GPU_FLOAT16 = 3, - - /// Run the processing on Snapdragon AIX+HVX. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - SNPE_RUNTIME_AIP_FIXED8_TF = 5, - SNPE_RUNTIME_AIP_FIXED_TF = SNPE_RUNTIME_AIP_FIXED8_TF -} Snpe_Runtime_t; - -/** - * Enumeration of runtime available check options. - */ -typedef enum -{ - /// Perform standard runtime available check - SNPE_RUNTIME_CHECK_OPTION_DEFAULT = 2, - /// Perform standard runtime available check - SNPE_RUNTIME_CHECK_OPTION_NORMAL_CHECK = 0, - /// Perform basic runtime available check, may be runtime specific - SNPE_RUNTIME_CHECK_OPTION_BASIC_CHECK = 1, - /// Perform unsignedPD runtime available check - SNPE_RUNTIME_CHECK_OPTION_UNSIGNEDPD_CHECK = 2, -} Snpe_RuntimeCheckOption_t; - -/** - * Enumeration of various performance profiles that can be requested. - */ -typedef enum -{ - /// Run in a standard mode. - /// This mode will be deprecated in the future and replaced with BALANCED. - SNPE_PERFORMANCE_PROFILE_DEFAULT = 0, - /// Run in a balanced mode. - SNPE_PERFORMANCE_PROFILE_BALANCED = 0, - - /// Run in high performance mode - SNPE_PERFORMANCE_PROFILE_HIGH_PERFORMANCE = 1, - - /// Run in a power sensitive mode, at the expense of performance. - SNPE_PERFORMANCE_PROFILE_POWER_SAVER = 2, - - /// Use system settings. SNPE makes no calls to any performance related APIs. - SNPE_PERFORMANCE_PROFILE_SYSTEM_SETTINGS = 3, - - /// Run in sustained high performance mode - SNPE_PERFORMANCE_PROFILE_SUSTAINED_HIGH_PERFORMANCE = 4, - - /// Run in burst mode - SNPE_PERFORMANCE_PROFILE_BURST = 5, - - /// Run in lower clock than POWER_SAVER, at the expense of performance. - SNPE_PERFORMANCE_PROFILE_LOW_POWER_SAVER = 6, - - /// Run in higher clock and provides better performance than POWER_SAVER. - SNPE_PERFORMANCE_PROFILE_HIGH_POWER_SAVER = 7, - - /// Run in lower balanced mode - SNPE_PERFORMANCE_PROFILE_LOW_BALANCED = 8, - - /// Run in lowest clock at the expense of performance - SNPE_PERFORMANCE_PROFILE_EXTREME_POWER_SAVER = 9, - -} Snpe_PerformanceProfile_t; - -/** - * Enumeration of various profilngLevels that can be requested. - */ -typedef enum -{ - /// No profiling. - /// Collects no runtime stats in the DiagLog - SNPE_PROFILING_LEVEL_OFF = 0, - - /// Basic profiling - /// Collects some runtime stats in the DiagLog - SNPE_PROFILING_LEVEL_BASIC = 1, - - /// Detailed profiling - /// Collects more runtime stats in the DiagLog, including per-layer statistics - /// Performance may be impacted - SNPE_PROFILING_LEVEL_DETAILED = 2, - - /// Moderate profiling - /// Collects more runtime stats in the DiagLog, no per-layer statistics - SNPE_PROFILING_LEVEL_MODERATE = 3, - - /// Linting profiling - /// HTP exclusive profiling level that collects in-depth performance metrics - /// for each op in the graph including main thread execution time and time spent - /// on parallel background ops - SNPE_PROFILING_LEVEL_LINTING = 4 - -} Snpe_ProfilingLevel_t; - -/** - * Enumeration of various execution priority hints. - */ -typedef enum -{ - /// Normal priority - SNPE_EXECUTION_PRIORITY_NORMAL = 0, - - /// Higher than normal priority - SNPE_EXECUTION_PRIORITY_HIGH = 1, - - /// Lower priority - SNPE_EXECUTION_PRIORITY_LOW = 2, - - /// Between Normal and High priority - SNPE_EXECUTION_PRIORITY_NORMAL_HIGH = 3 - -} Snpe_ExecutionPriorityHint_t; - -/** - * Enumeration that lists the supported image encoding formats. - */ -typedef enum -{ - /// For unknown image type. Also used as a default value for ImageEncoding_t. - SNPE_IMAGE_ENCODING_UNKNOWN = 0, - - /// The RGB format consists of 3 bytes per pixel: one byte for - /// Red, one for Green, and one for Blue. The byte ordering is - /// endian independent and is always in RGB byte order. - SNPE_IMAGE_ENCODING_RGB = 1, - - /// The ARGB32 format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering depends on the - /// underlying CPU. For little endian CPUs, the byte order is BGRA. - /// For big endian CPUs, the byte order is ARGB. - SNPE_IMAGE_ENCODING_ARGB32 = 2, - - /// The RGBA format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering is endian independent - /// and is always in RGBA byte order. - SNPE_IMAGE_ENCODING_RGBA = 3, - - /// The GRAYSCALE format is for 8-bit grayscale. - SNPE_IMAGE_ENCODING_GRAYSCALE = 4, - - /// NV21 is the Android version of YUV. The Chrominance is down - /// sampled and has a subsampling ratio of 4:2:0. Note that this - /// image format has 3 channels, but the U and V channels - /// are subsampled. For every four Y pixels there is one U and one V pixel. @newpage - SNPE_IMAGE_ENCODING_NV21 = 5, - - /// The BGR format consists of 3 bytes per pixel: one byte for - /// Red, one for Green and one for Blue. The byte ordering is - /// endian independent and is always BGR byte order. - SNPE_IMAGE_ENCODING_BGR = 6 -} Snpe_ImageEncoding_t; - -/** - * Enumeration that lists the supported LogLevels that can be set by users. - */ -typedef enum -{ - /// Enumeration variable to be used by user to set logging level to FATAL. - SNPE_LOG_LEVEL_FATAL = 0, - - /// Enumeration variable to be used by user to set logging level to ERROR. - SNPE_LOG_LEVEL_ERROR = 1, - - /// Enumeration variable to be used by user to set logging level to WARN. - SNPE_LOG_LEVEL_WARN = 2, - - /// Enumeration variable to be used by user to set logging level to INFO. - SNPE_LOG_LEVEL_INFO = 3, - - /// Enumeration variable to be used by user to set logging level to VERBOSE. - SNPE_LOG_LEVEL_VERBOSE = 4 -} Snpe_LogLevel_t; - -/** - * Enumeration that list the supported data types for buffers - */ -typedef enum -{ - /// Unspecified - SNPE_IO_BUFFER_DATATYPE_UNSPECIFIED = 0, - - /// 32-bit floating point - SNPE_IO_BUFFER_DATATYPE_FLOATING_POINT_32 = 1, - - /// 16-bit floating point - SNPE_IO_BUFFER_DATATYPE_FLOATING_POINT_16 = 2, - - /// 8-bit fixed point - SNPE_IO_BUFFER_DATATYPE_FIXED_POINT_8 = 3, - - /// 16-bit fixed point - SNPE_IO_BUFFER_DATATYPE_FIXED_POINT_16 = 4 -} Snpe_IOBufferDataType_t; - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DL_ENUMS_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlEnums.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlEnums.hpp deleted file mode 100644 index 9158f594..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlEnums.hpp +++ /dev/null @@ -1,266 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -namespace DlSystem { -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * Enumeration of supported target runtimes. - */ -enum class Runtime_t -{ - /// Special value indicating the property is unset. - UNSET = -1, - /// Run the processing on Snapdragon CPU. - /// Data: float 32bit - /// Math: float 32bit - CPU_FLOAT32 = 0, - /// Default legacy enum to retain backward compatibility. - /// CPU = CPU_FLOAT32 - CPU = CPU_FLOAT32, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 32bit - GPU_FLOAT32_16_HYBRID = 1, - /// Default legacy enum to retain backward compatibility. - /// GPU = GPU_FLOAT32_16_HYBRID - GPU = GPU_FLOAT32_16_HYBRID, - - /// Run the processing on the Hexagon DSP. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - DSP_FIXED8_TF = 2, - /// Default legacy enum to retain backward compatibility. - /// DSP = DSP_FIXED8_TF - DSP = DSP_FIXED8_TF, - - /// Run the processing on the Adreno GPU. - /// Data: float 16bit - /// Math: float 16bit - GPU_FLOAT16 = 3, - - /// Run the processing on Snapdragon AIX+HVX. - /// Data: 8bit fixed point Tensorflow style format - /// Math: 8bit fixed point Tensorflow style format - AIP_FIXED8_TF = 5, - AIP_FIXED_TF = AIP_FIXED8_TF, - - /// Any new enums should be added above this line - NUM_RUNTIME_TARGETS -}; - -/** - * Enumeration of runtime available check options. - */ -enum class RuntimeCheckOption_t -{ - /// Perform standard runtime available check - NORMAL_CHECK = 0, - /// Perform basic runtime available check, may be runtime specific - BASIC_CHECK = 1, - /// Perform unsignedPD runtime available check - UNSIGNEDPD_CHECK = 2, - /// Perform standard runtime available check - DEFAULT = 2, - /// Any new enums should be added above this line - NUM_RUNTIMECHECK_OPTIONS -}; - -/** - * Enumeration of various performance profiles that can be requested. - */ -enum class PerformanceProfile_t -{ - /// Run in a standard mode. - /// This mode will be deprecated in the future and replaced with BALANCED. - DEFAULT = 0, - /// Run in a balanced mode. - BALANCED = 0, - - /// Run in high performance mode - HIGH_PERFORMANCE = 1, - - /// Run in a power sensitive mode, at the expense of performance. - POWER_SAVER = 2, - - /// Use system settings. SNPE makes no calls to any performance related APIs. - SYSTEM_SETTINGS = 3, - - /// Run in sustained high performance mode - SUSTAINED_HIGH_PERFORMANCE = 4, - - /// Run in burst mode - BURST = 5, - - /// Run in lower clock than POWER_SAVER, at the expense of performance. - LOW_POWER_SAVER = 6, - - /// Run in higher clock and provides better performance than POWER_SAVER. - HIGH_POWER_SAVER = 7, - - /// Run in lower balanced mode - LOW_BALANCED = 8, - - /// Run in lowest clock at the expense of performance - EXTREME_POWER_SAVER = 9, - - /// Any new enums should be added above this line - NUM_PERF_PROFILES -}; - -/** - * Enumeration of various profilngLevels that can be requested. - */ -enum class ProfilingLevel_t -{ - /// No profiling. - /// Collects no runtime stats in the DiagLog - OFF = 0, - - /// Basic profiling - /// Collects some runtime stats in the DiagLog - BASIC = 1, - - /// Detailed profiling - /// Collects more runtime stats in the DiagLog, including per-layer statistics - /// Performance may be impacted - DETAILED = 2, - - /// Moderate profiling - /// Collects more runtime stats in the DiagLog, no per-layer statistics - MODERATE = 3, - - /// Linting profiling - /// HTP exclusive profiling level that collects in-depth performance metrics - /// for each op in the graph including main thread execution time and time spent - /// on parallel background ops - LINTING = 4 -}; - -/** - * Enumeration of various execution priority hints. - */ -enum class ExecutionPriorityHint_t -{ - /// Normal priority - NORMAL = 0, - - /// Higher than normal priority - HIGH = 1, - - /// Lower priority - LOW = 2, - - /// Between Normal and High priority - NORMAL_HIGH = 3, - - /// Any new enums should be added above this line - NUM_EXECUTION_PRIORITY_HINTS -}; - -/** @} */ /* end_addtogroup c_plus_plus_apis C++*/ - -/** - * Enumeration that lists the supported image encoding formats. - */ -enum class ImageEncoding_t -{ - /// For unknown image type. Also used as a default value for ImageEncoding_t. - UNKNOWN = 0, - - /// The RGB format consists of 3 bytes per pixel: one byte for - /// Red, one for Green, and one for Blue. The byte ordering is - /// endian independent and is always in RGB byte order. - RGB = 1, - - /// The ARGB32 format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering depends on the - /// underlying CPU. For little endian CPUs, the byte order is BGRA. - /// For big endian CPUs, the byte order is ARGB. - ARGB32 = 2, - - /// The RGBA format consists of 4 bytes per pixel: one byte for - /// Red, one for Green, one for Blue, and one for the alpha channel. - /// The alpha channel is ignored. The byte ordering is endian independent - /// and is always in RGBA byte order. - RGBA = 3, - - /// The GRAYSCALE format is for 8-bit grayscale. - GRAYSCALE = 4, - - /// NV21 is the Android version of YUV. The Chrominance is down - /// sampled and has a subsampling ratio of 4:2:0. Note that this - /// image format has 3 channels, but the U and V channels - /// are subsampled. For every four Y pixels there is one U and one V pixel. @newpage - NV21 = 5, - - /// The BGR format consists of 3 bytes per pixel: one byte for - /// Red, one for Green and one for Blue. The byte ordering is - /// endian independent and is always BGR byte order. - BGR = 6 -}; - -/** - * Enumeration that lists the supported LogLevels that can be set by users. - */ -enum class LogLevel_t -{ - /// Enumeration variable to be used by user to set logging level to FATAL. - LOG_FATAL = 0, - - /// Enumeration variable to be used by user to set logging level to ERROR. - LOG_ERROR = 1, - - /// Enumeration variable to be used by user to set logging level to WARN. - LOG_WARN = 2, - - /// Enumeration variable to be used by user to set logging level to INFO. - LOG_INFO = 3, - - /// Enumeration variable to be used by user to set logging level to VERBOSE. - LOG_VERBOSE = 4, - - /// Any new enums should be added above this line - NUM_LOG_LEVELS -}; - -enum class IOBufferDataType_t : int -{ - UNSPECIFIED = 0, - FLOATING_POINT_32 = 1, - FLOATING_POINT_16 = 2, - FIXED_POINT_8 = 3, - FIXED_POINT_16 = 4, - INT_32 = 5, - UINT_32 = 6, - INT_8 = 7, - UINT_8 = 8, - INT_16 = 9, - UINT_16 = 10, - BOOL_8 = 11, - INT_64 = 12, - UINT_64 = 13 -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, Runtime_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, RuntimeCheckOption_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, PerformanceProfile_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ProfilingLevel_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ExecutionPriorityHint_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ImageEncoding_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, LogLevel_t) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IOBufferDataType_t) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlError.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlError.h deleted file mode 100644 index f8c216ea..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlError.h +++ /dev/null @@ -1,299 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _DL_ERROR_H_ -#define _DL_ERROR_H_ - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * Enumeration of error codes - */ -typedef enum -{ - /// Indicate success: SNPE_SUCCESS = 0 - SNPE_SUCCESS = 0, - - // C API Error Codes - // This is a temporary place for them. We still have to figure out how to manage - // passing error codes from the C API to C++ if we want to use things like SetLastError - SNPE_ERRORCODE_CAPI_CREATE_FAILURE = 10, - SNPE_ERRORCODE_CAPI_HANDLEGEN_FAILURE = 11, - SNPE_ERRORCODE_CAPI_DELETE_FAILURE = 12, - SNPE_ERRORCODE_CAPI_BAD_HANDLE = 13, - SNPE_ERRORCODE_CAPI_BAD_ARGUMENT = 14, - SNPE_ERRORCODE_CAPI_BAD_ALLOC = 15, - - // System config errors - SNPE_ERRORCODE_CONFIG_MISSING_PARAM = 100, - SNPE_ERRORCODE_CONFIG_INVALID_PARAM = 101, - SNPE_ERRORCODE_CONFIG_MISSING_FILE = 102, - SNPE_ERRORCODE_CONFIG_NNCONFIG_NOT_SET = 103, - SNPE_ERRORCODE_CONFIG_NNCONFIG_INVALID = 104, - SNPE_ERRORCODE_CONFIG_WRONG_INPUT_NAME = 105, - SNPE_ERRORCODE_CONFIG_INCORRECT_INPUT_DIMENSIONS = 106, - SNPE_ERRORCODE_CONFIG_DIMENSIONS_MODIFICATION_NOT_SUPPORTED = 107, - SNPE_ERRORCODE_CONFIG_BOTH_OUTPUT_LAYER_TENSOR_NAMES_SET = 108, - - SNPE_ERRORCODE_CONFIG_NNCONFIG_ONLY_TENSOR_SUPPORTED = 120, - SNPE_ERRORCODE_CONFIG_NNCONFIG_ONLY_USER_BUFFER_SUPPORTED = 121, - - // DlSystem errors - SNPE_ERRORCODE_DLSYSTEM_MISSING_BUFFER = 200, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_CAST_FAILED = 201, - SNPE_ERRORCODE_DLSYSTEM_FIXED_POINT_PARAM_INVALID = 202, - SNPE_ERRORCODE_DLSYSTEM_SIZE_MISMATCH = 203, - SNPE_ERRORCODE_DLSYSTEM_NAME_NOT_FOUND = 204, - SNPE_ERRORCODE_DLSYSTEM_VALUE_MISMATCH = 205, - SNPE_ERRORCODE_DLSYSTEM_INSERT_FAILED = 206, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_FILE_READ_FAILED = 207, - SNPE_ERRORCODE_DLSYSTEM_DIAGLOG_FAILURE = 208, - SNPE_ERRORCODE_DLSYSTEM_LAYER_NOT_SET = 209, - SNPE_ERRORCODE_DLSYSTEM_WRONG_NUMBER_INPUT_BUFFERS = 210, - SNPE_ERRORCODE_DLSYSTEM_RUNTIME_TENSOR_SHAPE_MISMATCH = 211, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_MISSING = 212, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_ITERATION_UNSUPPORTED = 213, - SNPE_ERRORCODE_DLSYSTEM_BUFFER_MANAGER_MISSING = 214, - SNPE_ERRORCODE_DLSYSTEM_RUNTIME_BUFFER_SOURCE_UNSUPPORTED = 215, - SNPE_ERRORCODE_DLSYSTEM_BUFFER_CAST_FAILED = 216, - SNPE_ERRORCODE_DLSYSTEM_WRONG_TRANSITION_TYPE = 217, - SNPE_ERRORCODE_DLSYSTEM_LAYER_ALREADY_REGISTERED = 218, - SNPE_ERRORCODE_DLSYSTEM_TENSOR_DIM_INVALID = 219, - - SNPE_ERRORCODE_DLSYSTEM_BUFFERENCODING_UNKNOWN = 240, - SNPE_ERRORCODE_DLSYSTEM_BUFFER_INVALID_PARAM = 241, - - // DlContainer errors - SNPE_ERRORCODE_DLCONTAINER_MODEL_PARSING_FAILED = 300, - SNPE_ERRORCODE_DLCONTAINER_UNKNOWN_LAYER_CODE = 301, - SNPE_ERRORCODE_DLCONTAINER_MISSING_LAYER_PARAM = 302, - SNPE_ERRORCODE_DLCONTAINER_LAYER_PARAM_NOT_SUPPORTED = 303, - SNPE_ERRORCODE_DLCONTAINER_LAYER_PARAM_INVALID = 304, - SNPE_ERRORCODE_DLCONTAINER_TENSOR_DATA_MISSING = 305, - SNPE_ERRORCODE_DLCONTAINER_MODEL_LOAD_FAILED = 306, - SNPE_ERRORCODE_DLCONTAINER_MISSING_RECORDS = 307, - SNPE_ERRORCODE_DLCONTAINER_INVALID_RECORD = 308, - SNPE_ERRORCODE_DLCONTAINER_WRITE_FAILURE = 309, - SNPE_ERRORCODE_DLCONTAINER_READ_FAILURE = 310, - SNPE_ERRORCODE_DLCONTAINER_BAD_CONTAINER = 311, - SNPE_ERRORCODE_DLCONTAINER_BAD_DNN_FORMAT_VERSION = 312, - SNPE_ERRORCODE_DLCONTAINER_UNKNOWN_AXIS_ANNOTATION = 313, - SNPE_ERRORCODE_DLCONTAINER_UNKNOWN_SHUFFLE_TYPE = 314, - SNPE_ERRORCODE_DLCONTAINER_TEMP_FILE_FAILURE = 315, - - // Network errors - SNPE_ERRORCODE_NETWORK_EMPTY_NETWORK = 400, - SNPE_ERRORCODE_NETWORK_CREATION_FAILED = 401, - SNPE_ERRORCODE_NETWORK_PARTITION_FAILED = 402, - SNPE_ERRORCODE_NETWORK_NO_OUTPUT_DEFINED = 403, - SNPE_ERRORCODE_NETWORK_MISMATCH_BETWEEN_NAMES_AND_DIMS = 404, - SNPE_ERRORCODE_NETWORK_MISSING_INPUT_NAMES = 405, - SNPE_ERRORCODE_NETWORK_MISSING_OUTPUT_NAMES = 406, - SNPE_ERRORCODE_NETWORK_EXECUTION_FAILED = 407, - - // Host runtime errors - SNPE_ERRORCODE_HOST_RUNTIME_TARGET_UNAVAILABLE = 500, - - // CPU runtime errors - SNPE_ERRORCODE_CPU_LAYER_NOT_SUPPORTED = 600, - SNPE_ERRORCODE_CPU_LAYER_PARAM_NOT_SUPPORTED = 601, - SNPE_ERRORCODE_CPU_LAYER_PARAM_INVALID = 602, - SNPE_ERRORCODE_CPU_LAYER_PARAM_COMBINATION_INVALID = 603, - SNPE_ERRORCODE_CPU_BUFFER_NOT_FOUND = 604, - SNPE_ERRORCODE_CPU_NETWORK_NOT_SUPPORTED = 605, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_CPU_UDO_OPERATION_FAILED = 606, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // CPU fixed-point runtime errors - SNPE_ERRORCODE_CPU_FXP_LAYER_NOT_SUPPORTED = 700, - SNPE_ERRORCODE_CPU_FXP_LAYER_PARAM_NOT_SUPPORTED = 701, - SNPE_ERRORCODE_CPU_FXP_LAYER_PARAM_INVALID = 702, - SNPE_ERRORCODE_CPU_FXP_OPTION_INVALID = 703, - - // GPU runtime errors - SNPE_ERRORCODE_GPU_LAYER_NOT_SUPPORTED = 800, - SNPE_ERRORCODE_GPU_LAYER_PARAM_NOT_SUPPORTED = 801, - SNPE_ERRORCODE_GPU_LAYER_PARAM_INVALID = 802, - SNPE_ERRORCODE_GPU_LAYER_PARAM_COMBINATION_INVALID = 803, - SNPE_ERRORCODE_GPU_KERNEL_COMPILATION_FAILED = 804, - SNPE_ERRORCODE_GPU_CONTEXT_NOT_SET = 805, - SNPE_ERRORCODE_GPU_KERNEL_NOT_SET = 806, - SNPE_ERRORCODE_GPU_KERNEL_PARAM_INVALID = 807, - SNPE_ERRORCODE_GPU_OPENCL_CHECK_FAILED = 808, - SNPE_ERRORCODE_GPU_OPENCL_FUNCTION_ERROR = 809, - SNPE_ERRORCODE_GPU_BUFFER_NOT_FOUND = 810, - SNPE_ERRORCODE_GPU_TENSOR_DIM_INVALID = 811, - SNPE_ERRORCODE_GPU_MEMORY_FLAGS_INVALID = 812, - SNPE_ERRORCODE_GPU_UNEXPECTED_NUMBER_OF_IO = 813, - SNPE_ERRORCODE_GPU_LAYER_PROXY_ERROR = 814, - SNPE_ERRORCODE_GPU_BUFFER_IN_USE = 815, - SNPE_ERRORCODE_GPU_BUFFER_MODIFICATION_ERROR = 816, - SNPE_ERRORCODE_GPU_DATA_ARRANGEMENT_INVALID = 817, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_GPU_UDO_OPERATION_FAILED = 818, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - // DSP runtime errors - SNPE_ERRORCODE_DSP_LAYER_NOT_SUPPORTED = 900, - SNPE_ERRORCODE_DSP_LAYER_PARAM_NOT_SUPPORTED = 901, - SNPE_ERRORCODE_DSP_LAYER_PARAM_INVALID = 902, - SNPE_ERRORCODE_DSP_LAYER_PARAM_COMBINATION_INVALID = 903, - SNPE_ERRORCODE_DSP_STUB_NOT_PRESENT = 904, - SNPE_ERRORCODE_DSP_LAYER_NAME_TRUNCATED = 905, - SNPE_ERRORCODE_DSP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 906, - SNPE_ERRORCODE_DSP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 907, - SNPE_ERRORCODE_DSP_RUNTIME_COMMUNICATION_ERROR = 908, - SNPE_ERRORCODE_DSP_RUNTIME_INVALID_PARAM_ERROR = 909, - SNPE_ERRORCODE_DSP_RUNTIME_SYSTEM_ERROR = 910, - SNPE_ERRORCODE_DSP_RUNTIME_CRASHED_ERROR = 911, - SNPE_ERRORCODE_DSP_BUFFER_SIZE_ERROR = 912, - SNPE_ERRORCODE_DSP_UDO_EXECUTE_ERROR = 913, - SNPE_ERRORCODE_DSP_UDO_LIB_NOT_REGISTERED_ERROR = 914, - SNPE_ERRORCODE_DSP_UDO_INVALID_QUANTIZATION_TYPE_ERROR = 915, - - // Model validataion errors - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_NOT_SUPPORTED = 1000, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_NOT_SUPPORTED = 1001, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_INVALID = 1002, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_MISSING = 1003, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_PARAM_COMBINATION_INVALID = 1004, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_ORDERING_INVALID = 1005, - SNPE_ERRORCODE_MODEL_VALIDATION_INVALID_CONSTRAINT = 1006, - SNPE_ERRORCODE_MODEL_VALIDATION_MISSING_BUFFER = 1007, - SNPE_ERRORCODE_MODEL_VALIDATION_BUFFER_REUSE_NOT_SUPPORTED = 1008, - SNPE_ERRORCODE_MODEL_VALIDATION_LAYER_COULD_NOT_BE_ASSIGNED = 1009, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_MODEL_VALIDATION_UDO_LAYER_FAILED = 1010, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // UDL errors - SNPE_ERRORCODE_UDL_LAYER_EMPTY_UDL_NETWORK = 1100, - SNPE_ERRORCODE_UDL_LAYER_PARAM_INVALID = 1101, - SNPE_ERRORCODE_UDL_LAYER_INSTANCE_MISSING = 1102, - SNPE_ERRORCODE_UDL_LAYER_SETUP_FAILED = 1103, - SNPE_ERRORCODE_UDL_EXECUTE_FAILED = 1104, - SNPE_ERRORCODE_UDL_BUNDLE_INVALID = 1105, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_ERRORCODE_UDO_REGISTRATION_FAILED = 1106, - SNPE_ERRORCODE_UDO_GET_PACKAGE_FAILED = 1107, - SNPE_ERRORCODE_UDO_GET_IMPLEMENTATION_FAILED = 1108, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // Dependent library errors - SNPE_ERRORCODE_STD_LIBRARY_ERROR = 1200, - - // Unknown exception (catch (...)), Has no component attached to this - SNPE_ERRORCODE_UNKNOWN_EXCEPTION = 1210, - - // Storage Errors - SNPE_ERRORCODE_STORAGE_INVALID_KERNEL_REPO = 1300, - -#ifdef DNN_RUNTIME_HAVE_AIP_RUNTIME - // AIP runtime errors - SNPE_ERRORCODE_AIP_LAYER_NOT_SUPPORTED = 1400, - SNPE_ERRORCODE_AIP_LAYER_PARAM_NOT_SUPPORTED = 1401, - SNPE_ERRORCODE_AIP_LAYER_PARAM_INVALID = 1402, - SNPE_ERRORCODE_AIP_LAYER_PARAM_COMBINATION_INVALID = 1403, - SNPE_ERRORCODE_AIP_STUB_NOT_PRESENT = 1404, - SNPE_ERRORCODE_AIP_LAYER_NAME_TRUNCATED = 1405, - SNPE_ERRORCODE_AIP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 1406, - SNPE_ERRORCODE_AIP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 1407, - SNPE_ERRORCODE_AIP_RUNTIME_COMMUNICATION_ERROR = 1408, - SNPE_ERRORCODE_AIP_RUNTIME_INVALID_PARAM_ERROR = 1409, - SNPE_ERRORCODE_AIP_RUNTIME_SYSTEM_ERROR = 1410, - SNPE_ERRORCODE_AIP_RUNTIME_TENSOR_MISSING = 1411, - SNPE_ERRORCODE_AIP_RUNTIME_TENSOR_SHAPE_MISMATCH = 1412, - SNPE_ERRORCODE_AIP_RUNTIME_BAD_AIX_RECORD = 1413, -#endif // DNN_RUNTIME_HAVE_AIP_RUNTIME - - // DlCaching errors - SNPE_ERRORCODE_DLCACHING_INVALID_METADATA = 1500, - SNPE_ERRORCODE_DLCACHING_INVALID_INITBLOB = 1501, - - // Infrastructure Errors - SNPE_ERRORCODE_INFRA_CLUSTERMGR_INSTANCE_INVALID = 1600, - SNPE_ERRORCODE_INFRA_CLUSTERMGR_EXECUTE_SYNC_FAILED = 1601, - - // Memory Errors - SNPE_ERRORCODE_MEMORY_CORRUPTION_ERROR = 1700 - -} Snpe_ErrorCode_t; - - - -/** - * Clear the last error code - */ -SNPE_API void Snpe_ErrorCode_clearLastErrorCode(); - -/** -* Returns the error code of the last error encountered. -* -* @return The error code. -* -* @note The returned error code is significant only when the return -* value of the call indicated an error. -*/ -SNPE_API Snpe_ErrorCode_t Snpe_ErrorCode_getLastErrorCode(); - -/** -* Returns the error string of the last error encountered. -* -* @return The error string. -* -* @note The returned error string is significant only when the return -* value of the call indicated an error. -*/ -SNPE_API const char* Snpe_ErrorCode_GetLastErrorString(); - -/** - * Returns the info string of the last error encountered. - */ -SNPE_API const char* Snpe_ErrorCode_getLastInfoString(); - -/** - * Returns the uint32_t representation of the error code enum. - * - * @param[in] code The error code to be converted. - * - * @return uint32_t representation of the error code. - */ -SNPE_API uint32_t Snpe_ErrorCode_enumToUInt32(Snpe_ErrorCode_t code); - - -#ifdef __cplusplus -} // extern "C" -#endif - - -#endif // _DL_ERROR_H_ - diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlError.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlError.hpp deleted file mode 100644 index 55dc2140..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlError.hpp +++ /dev/null @@ -1,261 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include - -#include "DlSystem/DlError.h" - - -namespace DlSystem { - -enum class ErrorCode : uint32_t { - NONE = 0, - - // C API Error Codes - // This is a temporary place for them. We still have to figure out how to manage - // passing error codes from the C API to C++ if we want to use things like SetLastError - SNPE_CAPI_CREATE_FAILURE = 10, - SNPE_CAPI_HANDLEGEN_FAILURE = 11, - SNPE_CAPI_DELETE_FAILURE = 12, - SNPE_CAPI_BAD_HANDLE = 13, - SNPE_CAPI_BAD_ARGUMENT = 14, - SNPE_CAPI_BAD_ALLOC = 15, - - - // System config errors - SNPE_CONFIG_MISSING_PARAM = 100, - SNPE_CONFIG_INVALID_PARAM = 101, - SNPE_CONFIG_MISSING_FILE = 102, - SNPE_CONFIG_NNCONFIG_NOT_SET = 103, - SNPE_CONFIG_NNCONFIG_INVALID = 104, - SNPE_CONFIG_WRONG_INPUT_NAME = 105, - SNPE_CONFIG_INCORRECT_INPUT_DIMENSIONS = 106, - SNPE_CONFIG_DIMENSIONS_MODIFICATION_NOT_SUPPORTED = 107, - SNPE_CONFIG_BOTH_OUTPUT_LAYER_TENSOR_NAMES_SET = 108, - - SNPE_CONFIG_NNCONFIG_ONLY_TENSOR_SUPPORTED = 120, - SNPE_CONFIG_NNCONFIG_ONLY_USER_BUFFER_SUPPORTED = 121, - - // DlSystem errors - SNPE_DLSYSTEM_MISSING_BUFFER = 200, - SNPE_DLSYSTEM_TENSOR_CAST_FAILED = 201, - SNPE_DLSYSTEM_FIXED_POINT_PARAM_INVALID = 202, - SNPE_DLSYSTEM_SIZE_MISMATCH = 203, - SNPE_DLSYSTEM_NAME_NOT_FOUND = 204, - SNPE_DLSYSTEM_VALUE_MISMATCH = 205, - SNPE_DLSYSTEM_INSERT_FAILED = 206, - SNPE_DLSYSTEM_TENSOR_FILE_READ_FAILED = 207, - SNPE_DLSYSTEM_DIAGLOG_FAILURE = 208, - SNPE_DLSYSTEM_LAYER_NOT_SET = 209, - SNPE_DLSYSTEM_WRONG_NUMBER_INPUT_BUFFERS = 210, - SNPE_DLSYSTEM_RUNTIME_TENSOR_SHAPE_MISMATCH = 211, - SNPE_DLSYSTEM_TENSOR_MISSING = 212, - SNPE_DLSYSTEM_TENSOR_ITERATION_UNSUPPORTED = 213, - SNPE_DLSYSTEM_BUFFER_MANAGER_MISSING = 214, - SNPE_DLSYSTEM_RUNTIME_BUFFER_SOURCE_UNSUPPORTED = 215, - SNPE_DLSYSTEM_BUFFER_CAST_FAILED = 216, - SNPE_DLSYSTEM_WRONG_TRANSITION_TYPE = 217, - SNPE_DLSYSTEM_LAYER_ALREADY_REGISTERED = 218, - SNPE_DLSYSTEM_TENSOR_DIM_INVALID = 219, - - SNPE_DLSYSTEM_BUFFERENCODING_UNKNOWN = 240, - SNPE_DLSYSTEM_BUFFER_INVALID_PARAM = 241, - - // DlContainer errors - SNPE_DLCONTAINER_MODEL_PARSING_FAILED = 300, - SNPE_DLCONTAINER_UNKNOWN_LAYER_CODE = 301, - SNPE_DLCONTAINER_MISSING_LAYER_PARAM = 302, - SNPE_DLCONTAINER_LAYER_PARAM_NOT_SUPPORTED = 303, - SNPE_DLCONTAINER_LAYER_PARAM_INVALID = 304, - SNPE_DLCONTAINER_TENSOR_DATA_MISSING = 305, - SNPE_DLCONTAINER_MODEL_LOAD_FAILED = 306, - SNPE_DLCONTAINER_MISSING_RECORDS = 307, - SNPE_DLCONTAINER_INVALID_RECORD = 308, - SNPE_DLCONTAINER_WRITE_FAILURE = 309, - SNPE_DLCONTAINER_READ_FAILURE = 310, - SNPE_DLCONTAINER_BAD_CONTAINER = 311, - SNPE_DLCONTAINER_BAD_DNN_FORMAT_VERSION = 312, - SNPE_DLCONTAINER_UNKNOWN_AXIS_ANNOTATION = 313, - SNPE_DLCONTAINER_UNKNOWN_SHUFFLE_TYPE = 314, - SNPE_DLCONTAINER_TEMP_FILE_FAILURE = 315, - - // Network errors - SNPE_NETWORK_EMPTY_NETWORK = 400, - SNPE_NETWORK_CREATION_FAILED = 401, - SNPE_NETWORK_PARTITION_FAILED = 402, - SNPE_NETWORK_NO_OUTPUT_DEFINED = 403, - SNPE_NETWORK_MISMATCH_BETWEEN_NAMES_AND_DIMS = 404, - SNPE_NETWORK_MISSING_INPUT_NAMES = 405, - SNPE_NETWORK_MISSING_OUTPUT_NAMES = 406, - SNPE_NETWORK_EXECUTION_FAILED = 407, - - // Host runtime errors - SNPE_HOST_RUNTIME_TARGET_UNAVAILABLE = 500, - - // CPU runtime errors - SNPE_CPU_LAYER_NOT_SUPPORTED = 600, - SNPE_CPU_LAYER_PARAM_NOT_SUPPORTED = 601, - SNPE_CPU_LAYER_PARAM_INVALID = 602, - SNPE_CPU_LAYER_PARAM_COMBINATION_INVALID = 603, - SNPE_CPU_BUFFER_NOT_FOUND = 604, - SNPE_CPU_NETWORK_NOT_SUPPORTED = 605, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_CPU_UDO_OPERATION_FAILED = 606, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // CPU fixed-point runtime errors - SNPE_CPU_FXP_LAYER_NOT_SUPPORTED = 700, - SNPE_CPU_FXP_LAYER_PARAM_NOT_SUPPORTED = 701, - SNPE_CPU_FXP_LAYER_PARAM_INVALID = 702, - SNPE_CPU_FXP_OPTION_INVALID = 703, - - // GPU runtime errors - SNPE_GPU_LAYER_NOT_SUPPORTED = 800, - SNPE_GPU_LAYER_PARAM_NOT_SUPPORTED = 801, - SNPE_GPU_LAYER_PARAM_INVALID = 802, - SNPE_GPU_LAYER_PARAM_COMBINATION_INVALID = 803, - SNPE_GPU_KERNEL_COMPILATION_FAILED = 804, - SNPE_GPU_CONTEXT_NOT_SET = 805, - SNPE_GPU_KERNEL_NOT_SET = 806, - SNPE_GPU_KERNEL_PARAM_INVALID = 807, - SNPE_GPU_OPENCL_CHECK_FAILED = 808, - SNPE_GPU_OPENCL_FUNCTION_ERROR = 809, - SNPE_GPU_BUFFER_NOT_FOUND = 810, - SNPE_GPU_TENSOR_DIM_INVALID = 811, - SNPE_GPU_MEMORY_FLAGS_INVALID = 812, - SNPE_GPU_UNEXPECTED_NUMBER_OF_IO = 813, - SNPE_GPU_LAYER_PROXY_ERROR = 814, - SNPE_GPU_BUFFER_IN_USE = 815, - SNPE_GPU_BUFFER_MODIFICATION_ERROR = 816, - SNPE_GPU_DATA_ARRANGEMENT_INVALID = 817, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_GPU_UDO_OPERATION_FAILED = 818, -#endif //DNN_RUNTIME_HAVE_UDO_CAPABILITY - // DSP runtime errors - SNPE_DSP_LAYER_NOT_SUPPORTED = 900, - SNPE_DSP_LAYER_PARAM_NOT_SUPPORTED = 901, - SNPE_DSP_LAYER_PARAM_INVALID = 902, - SNPE_DSP_LAYER_PARAM_COMBINATION_INVALID = 903, - SNPE_DSP_STUB_NOT_PRESENT = 904, - SNPE_DSP_LAYER_NAME_TRUNCATED = 905, - SNPE_DSP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 906, - SNPE_DSP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 907, - SNPE_DSP_RUNTIME_COMMUNICATION_ERROR = 908, - SNPE_DSP_RUNTIME_INVALID_PARAM_ERROR = 909, - SNPE_DSP_RUNTIME_SYSTEM_ERROR = 910, - SNPE_DSP_RUNTIME_CRASHED_ERROR = 911, - SNPE_DSP_BUFFER_SIZE_ERROR = 912, - SNPE_DSP_UDO_EXECUTE_ERROR = 913, - SNPE_DSP_UDO_LIB_NOT_REGISTERED_ERROR = 914, - SNPE_DSP_UDO_INVALID_QUANTIZATION_TYPE_ERROR = 915, - SNPE_DSP_RUNTIME_INVALID_RPC_DRIVER = 916, - SNPE_DSP_RUNTIME_RPC_PERMISSION_ERROR = 917, - SNPE_DSP_RUNTIME_DSP_FILE_OPEN_ERROR = 918, - - // Model validataion errors - SNPE_MODEL_VALIDATION_LAYER_NOT_SUPPORTED = 1000, - SNPE_MODEL_VALIDATION_LAYER_PARAM_NOT_SUPPORTED = 1001, - SNPE_MODEL_VALIDATION_LAYER_PARAM_INVALID = 1002, - SNPE_MODEL_VALIDATION_LAYER_PARAM_MISSING = 1003, - SNPE_MODEL_VALIDATION_LAYER_PARAM_COMBINATION_INVALID = 1004, - SNPE_MODEL_VALIDATION_LAYER_ORDERING_INVALID = 1005, - SNPE_MODEL_VALIDATION_INVALID_CONSTRAINT = 1006, - SNPE_MODEL_VALIDATION_MISSING_BUFFER = 1007, - SNPE_MODEL_VALIDATION_BUFFER_REUSE_NOT_SUPPORTED = 1008, - SNPE_MODEL_VALIDATION_LAYER_COULD_NOT_BE_ASSIGNED = 1009, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_MODEL_VALIDATION_UDO_LAYER_FAILED = 1010, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // UDL errors - SNPE_UDL_LAYER_EMPTY_UDL_NETWORK = 1100, - SNPE_UDL_LAYER_PARAM_INVALID = 1101, - SNPE_UDL_LAYER_INSTANCE_MISSING = 1102, - SNPE_UDL_LAYER_SETUP_FAILED = 1103, - SNPE_UDL_EXECUTE_FAILED = 1104, - SNPE_UDL_BUNDLE_INVALID = 1105, -#ifdef DNN_RUNTIME_HAVE_UDO_CAPABILITY - SNPE_UDO_REGISTRATION_FAILED = 1106, - SNPE_UDO_GET_PACKAGE_FAILED = 1107, - SNPE_UDO_GET_IMPLEMENTATION_FAILED = 1108, -#endif // DNN_RUNTIME_HAVE_UDO_CAPABILITY - - // Dependent library errors - SNPE_STD_LIBRARY_ERROR = 1200, - - // Unknown exception (catch (...)), Has no component attached to this - SNPE_UNKNOWN_EXCEPTION = 1210, - - // Storage Errors - SNPE_STORAGE_INVALID_KERNEL_REPO = 1300, - -#ifdef DNN_RUNTIME_HAVE_AIP_RUNTIME - // AIP runtime errors - SNPE_AIP_LAYER_NOT_SUPPORTED = 1400, - SNPE_AIP_LAYER_PARAM_NOT_SUPPORTED = 1401, - SNPE_AIP_LAYER_PARAM_INVALID = 1402, - SNPE_AIP_LAYER_PARAM_COMBINATION_INVALID = 1403, - SNPE_AIP_STUB_NOT_PRESENT = 1404, - SNPE_AIP_LAYER_NAME_TRUNCATED = 1405, - SNPE_AIP_LAYER_INPUT_BUFFER_NAME_TRUNCATED = 1406, - SNPE_AIP_LAYER_OUTPUT_BUFFER_NAME_TRUNCATED = 1407, - SNPE_AIP_RUNTIME_COMMUNICATION_ERROR = 1408, - SNPE_AIP_RUNTIME_INVALID_PARAM_ERROR = 1409, - SNPE_AIP_RUNTIME_SYSTEM_ERROR = 1410, - SNPE_AIP_RUNTIME_TENSOR_MISSING = 1411, - SNPE_AIP_RUNTIME_TENSOR_SHAPE_MISMATCH = 1412, - SNPE_AIP_RUNTIME_BAD_AIX_RECORD = 1413, - SNPE_AIP_AXIS_QUANT_UNSUPPORTED = 1414, - -#endif // DNN_RUNTIME_HAVE_AIP_RUNTIME - - // DlCaching errors - SNPE_DLCACHING_INVALID_METADATA = 1500, - SNPE_DLCACHING_INVALID_INITBLOB = 1501, - - // Infrastructure Errors - SNPE_INFRA_CLUSTERMGR_INSTANCE_INVALID = 1600, - SNPE_INFRA_CLUSTERMGR_EXECUTE_SYNC_FAILED = 1601, - - // Memory Errors - SNPE_MEMORY_CORRUPTION_ERROR = 1700 - -}; - - -inline ErrorCode getLastErrorCode(){ - return static_cast(Snpe_ErrorCode_getLastErrorCode()); -} - -inline const char* getLastErrorString(){ - return Snpe_ErrorCode_GetLastErrorString(); -} - -inline const char* getLastInfoString(){ - return Snpe_ErrorCode_getLastInfoString(); -} - - -inline uint32_t enumToUInt32(ErrorCode code){ - return Snpe_ErrorCode_enumToUInt32(static_cast(code)); -} - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ErrorCode); - - -namespace zdl{ namespace DlSystem { - inline ErrorCode getLastErrorCode() { return ::DlSystem::getLastErrorCode() ; } - inline const char* getLastErrorString() { return ::DlSystem::getLastErrorString() ; } - inline const char* getLastInfoString() { return ::DlSystem::getLastInfoString() ; } - inline uint32_t enumToUInt32(ErrorCode code){ return ::DlSystem::enumToUInt32(code); } -}} // ns zdl::DlSystem diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlOptional.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlOptional.hpp deleted file mode 100644 index e7bbf666..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlOptional.hpp +++ /dev/null @@ -1,244 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include -#include - - -//============================================================================== -// -// Copyright (c) 2016, 2020 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -//#include -#include -//#include - - -namespace DlSystem { - - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * @brief . - * - * Class to manage a value that may or may not exist. The boolean value - * of the Optional class is true if the object contains a value and false - * if it does not contain a value. - * - * The class must be evaluated and confirmed as true (containing a value) - * before being dereferenced. - */ -template -class Optional { -public: - enum class LIFECYCLE { - NONE = 0, - REFERENCE_OWNED = 1, - POINTER_OWNED = 2, - POINTER_NOT_OWNED = 3 - }; - - struct ReferenceCount { - size_t count = 0; - - void increment() { count++; } - - size_t decrement() { - if (count > 0) { - count--; - } - return count; - } - }; - - using U = typename std::remove_pointer::type; - - /** - * The default constructor is set to not have any value, and is - * therefore evaluated as false. - */ - // Do not explicit it so we can return {} - Optional() { - m_Type = LIFECYCLE::NONE; - } - - /** - * Construct an Optional class using an object. - * @param[in] Reference to an object v - * @param[out] Optional instance of object v - */ - template - Optional (const T& v, typename std::enable_if::value>::type* = 0) - : m_Type(LIFECYCLE::REFERENCE_OWNED) { - try { - m_StoragePtr = new T(v); - } catch (...) { - m_StoragePtr = nullptr; - m_Type = LIFECYCLE::NONE; - } - } - - template - Optional(U* v, LIFECYCLE type, typename std::enable_if::value>::type* = 0) - : m_Type(type) { - switch (m_Type) { - case LIFECYCLE::POINTER_OWNED: - m_StoragePtr = v; - m_Count = new ReferenceCount(); - m_Count->increment(); - break; - case LIFECYCLE::POINTER_NOT_OWNED: - m_StoragePtr = v; - break; - case LIFECYCLE::REFERENCE_OWNED: - throw std::bad_exception(); - case LIFECYCLE::NONE: - break; - } - } - - Optional(const Optional &other) : m_Type(other.m_Type), m_Count(other.m_Count) { - if (isReference()) { - m_StoragePtr = new U(*other.m_StoragePtr); - } else if (isPointer()) { - m_StoragePtr = other.m_StoragePtr; - if (isOwned()) { - m_Count->increment(); - } - } - } - - Optional& operator=(const Optional& other) noexcept { - Optional tmp(other); - swap(std::move(tmp)); - return *this; - } - - Optional(Optional&& other) noexcept { - swap(std::move(other)); - } - - Optional& operator=(Optional&& other) noexcept { - swap(std::move(other)); - return *this; - } - - ~Optional() { - if (isOwned()) { - if (isReference() || (isPointer() && m_Count->decrement() == 0)) { - delete m_StoragePtr; - delete m_Count; - } - } - } - - /** - * Boolean value of Optional class is only true when there exists a value. - */ - operator bool() const noexcept { return isValid(); } - - bool operator!() const noexcept { return !isValid(); } - - /** - * Get reference of Optional object - * @warning User must validate Optional has value before. - */ - const T& operator*() { return this->GetReference(); } - - /** - * Get reference of Optional object - * @warning User must validate Optional has value before. - */ - const T& operator*() const { return this->GetReference(); } - - operator T&() { return this->GetReference(); } - - T operator->() { - T self = this->GetReference(); - return self; - } - - void release(){ - if(isOwned() && isPointer()){ - m_Type = LIFECYCLE::POINTER_NOT_OWNED; - if(m_Count && m_Count->decrement() == 0){ - delete m_Count; - m_Count = nullptr; - } - } - } -private: - void swap(Optional&& other) { - m_Type = other.m_Type; - m_StoragePtr = other.m_StoragePtr; - m_Count = other.m_Count; - - other.m_Type = LIFECYCLE::NONE; - other.m_StoragePtr = nullptr; - other.m_Count = nullptr; - } - - template - typename std::enable_if::value, const Q&>::type GetReference() const noexcept { - if (!isReference()) std::terminate(); - return *static_cast(m_StoragePtr); - } - - template - typename std::enable_if::value, const Q&>::type GetReference() const noexcept { - if (!isPointer()) std::terminate(); - return static_cast(m_StoragePtr); - } - - template - typename std::enable_if::value, Q&>::type GetReference() noexcept { - if (!isReference()) std::terminate(); - return *m_StoragePtr; - } - - template - typename std::enable_if::value, Q&>::type GetReference() noexcept { - if (!isPointer()) std::terminate(); - return m_StoragePtr; - } - - bool isPointer() const { - return m_Type == LIFECYCLE::POINTER_OWNED || m_Type == LIFECYCLE::POINTER_NOT_OWNED; - } - - bool isOwned() const { - return m_Type == LIFECYCLE::REFERENCE_OWNED || m_Type == LIFECYCLE::POINTER_OWNED; - } - - bool isReference() const { - return m_Type == LIFECYCLE::REFERENCE_OWNED; - } - - bool isValid() const { - return m_Type != LIFECYCLE::NONE; - } - - U* m_StoragePtr = nullptr; - LIFECYCLE m_Type; - ReferenceCount *m_Count = nullptr; -}; - -} // ns DlSystem - - - -namespace zdl { namespace DlSystem { template using Optional = ::DlSystem::Optional; }} diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlVersion.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlVersion.h deleted file mode 100644 index fac01d1c..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlVersion.h +++ /dev/null @@ -1,122 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - - -/** - * @file - */ - -#ifndef _DL_VERSION_H_ -#define _DL_VERSION_H_ - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" -#include - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A class that contains the different portions of a version number. - * A typedef to indicate a SNPE DlVersion handle - */ -typedef void* Snpe_DlVersion_Handle_t; - -/** - * Construct a DlVersion - * - * @return a handle to the created DlVersion - */ -SNPE_API -Snpe_DlVersion_Handle_t Snpe_DlVersion_Create(); - - -/** - * Destroys/frees DlVersion - * - * @param[in] handle : Handle to access DlVersion - * - * @return SNPE_SUCCESS if Delete operation successful. -*/ -SNPE_API -Snpe_ErrorCode_t Snpe_DlVersion_Delete(Snpe_DlVersion_Handle_t handle); - -/** - * Get the major version number. - * @param[in] handle : Handle to access DlVersion - * @return Major version - */ -SNPE_API -int32_t Snpe_DlVersion_GetMajor(Snpe_DlVersion_Handle_t handle); - -/** - * Get the minor version number. - * @param[in] handle : Handle to access DlVersion - * @return Minor version - */ -SNPE_API -int32_t Snpe_DlVersion_GetMinor(Snpe_DlVersion_Handle_t handle); - -/** - * Get the teeny version number. - * @param[in] handle : Handle to access DlVersion - * @return Teeny version - */ -SNPE_API -int32_t Snpe_DlVersion_GetTeeny(Snpe_DlVersion_Handle_t handle); - -/** - * Get the string holding information about the build version. - * - * @param[in] handle : Handle to access DlVersion - * @return Build information - */ -SNPE_API -const char* Snpe_DlVersion_GetBuild(Snpe_DlVersion_Handle_t handle); - -/** - * @brief Returns a string in the form Major.Minor.Teeny.Build - * - * @param[in] handle : Handle to access DlVersion - * @return A formatted char* holding the version information. - * - * @note the returned string will be invalidated by subsequent calls to this function - */ -SNPE_API -const char* Snpe_DlVersion_ToString(Snpe_DlVersion_Handle_t handle); - -/** - * @brief Create a DlVersion from a string - * - * @param stringValue The formatted DlVersion string - * - * @return A handle to the created DlVersion - */ -SNPE_API -Snpe_DlVersion_Handle_t Snpe_DlVersion_FromString(const char* stringValue); - - - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DL_VERSION_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlVersion.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlVersion.hpp deleted file mode 100644 index 7badab1f..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/DlVersion.hpp +++ /dev/null @@ -1,118 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include - -#include "Wrapper.hpp" -#include "String.hpp" - -#include "DlSystem/DlVersion.h" -#include "SNPE/SNPEUtil.h" - - -namespace DlSystem { - -class Version_t : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_DlVersion_Delete}; - - template - using MajorReference = WrapperDetail::GenericConstMemberReference; - - template - using MinorReference = WrapperDetail::GenericConstMemberReference; - - template - using TeenyReference = WrapperDetail::GenericConstMemberReference; - - - static std::string BuildGetter(Snpe_DlVersion_Handle_t handle){ - return Snpe_DlVersion_GetBuild(handle); - } - - template - using BuildReference = WrapperDetail::GenericConstMemberReference; - - - static const std::string& toString(int32_t Major, int32_t Minor, int32_t Teeny, const std::string& Build){ - thread_local std::string toret; - - toret = std::to_string(Major); - toret += '.'; - toret += std::to_string(Minor); - toret += '.'; - toret += std::to_string(Teeny); - if(!Build.empty()){ - toret += '.'; - toret += Build; - } - - return toret; - } - -public: - Version_t() - : BaseType(Snpe_DlVersion_Create()) - { } - - Version_t(int32_t Major, int32_t Minor, int32_t Teeny, const std::string& Build) - : BaseType(Snpe_DlVersion_FromString(toString(Major, Minor, Teeny, Build).c_str())) - { } - - - /// Holds the major version number. Changes in this value indicate - /// major changes that break backward compatibility. - MajorReference Major{*this}; - - /// Holds the minor version number. Changes in this value indicate - /// minor changes made to library that are backwards compatible - /// (such as additions to the interface). - MinorReference Minor{*this}; - - /// Holds the teeny version number. Changes in this value indicate - /// changes such as bug fixes and patches made to the library that - /// do not affect the interface. - TeenyReference Teeny{*this}; - - /// This string holds information about the build version. - BuildReference Build{*this}; - - - static Version_t fromString(const std::string& stringValue){ - return moveHandle(Snpe_DlVersion_FromString(stringValue.c_str())); - } - - /** - * @brief Returns a string in the form Major.Minor.Teeny.Build - * - * @return A formatted string holding the version information. - */ - std::string toString() const{ - return Snpe_DlVersion_ToString(handle()); - } - - /** - * @brief Returns a string in the form Major.Minor.Teeny.Build - * - * @return A formatted string holding the version information. - */ - String asString() const{ - return String(toString()); - } -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, Version_t) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.h deleted file mode 100644 index 96453ef9..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.h +++ /dev/null @@ -1,117 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _IBUFFER_ATTRIBUTES_H -#define _IBUFFER_ATTRIBUTES_H - -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE IBufferAttributes handle - */ -typedef void* Snpe_IBufferAttributes_Handle_t; - - -/** - * @brief Gets the buffer's element size, in bytes - * - * This can be used to compute the memory size required - * to back this buffer. - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Element size, in bytes - */ -SNPE_API -size_t Snpe_IBufferAttributes_GetElementSize(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the element's encoding type - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return encoding type - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_IBufferAttributes_GetEncodingType(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the number of elements in each dimension - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Dimension size, in terms of number of elements - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_IBufferAttributes_GetDims(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the alignment requirement of each dimension - * - * Alignment per each dimension is expressed as an multiple, for - * example, if one particular dimension can accept multiples of 8, - * the alignment will be 8. - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Alignment in each dimension, in terms of multiple of - * number of elements - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_IBufferAttributes_GetAlignments(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Gets the buffer encoding returned from the network responsible - * for generating this buffer. Depending on the encoding type, this will - * be an instance of an encoding type specific derived class. - * - * @param[in] handle : Handle to access IBufferAttributes - * - * @return Derived user buffer encoding object. - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_IBufferAttributes_GetEncoding_Ref(Snpe_IBufferAttributes_Handle_t handle); - -/** - * @brief Destroys the IBufferAttributes object - * - * @param[handle] handle : Handle to access IBufferAttributes - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IBufferAttributes_Delete(Snpe_IBufferAttributes_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _IBUFFER_ATTRIBUTES_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.hpp deleted file mode 100644 index 2a86fcec..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IBufferAttributes.hpp +++ /dev/null @@ -1,85 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include -#include "TensorShape.hpp" - -#include "DlSystem/IBufferAttributes.h" -#include "IUserBuffer.hpp" - -namespace DlSystem { - - -class IBufferAttributes : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_IBufferAttributes_Delete}; -public: - - size_t getElementSize() const noexcept{ - return Snpe_IBufferAttributes_GetElementSize(handle()); - } - - UserBufferEncoding::ElementType_t getEncodingType() const noexcept{ - return static_cast(Snpe_IBufferAttributes_GetEncodingType(handle())); - } - - TensorShape getDims() const{ - return moveHandle(Snpe_IBufferAttributes_GetDims(handle())); - } - - TensorShape getAlignments() const{ - return moveHandle(Snpe_IBufferAttributes_GetAlignments(handle())); - } - - UserBufferEncoding* getEncoding() const{ - auto h = Snpe_IBufferAttributes_GetEncoding_Ref(handle()); - switch(Snpe_UserBufferEncoding_GetElementType(h)){ - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32: - return makeReference(h); - - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16: - return makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8: - return makeReference(h); - - default: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN: - return makeReference(h); - } - } - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IBufferAttributes) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.h deleted file mode 100644 index a3c3c623..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.h +++ /dev/null @@ -1,156 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_IOBUFFER_DATATYPE_MAP_H -#define DL_SYSTEM_IOBUFFER_DATATYPE_MAP_H - -#include - -#include "DlSystem/DlError.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE IOBufferDataTypeMap handle - */ -typedef void* Snpe_IOBufferDataTypeMap_Handle_t; - -/** - * @brief . - * - * Creates a new Buffer Data type map - * - */ -SNPE_API -Snpe_IOBufferDataTypeMap_Handle_t Snpe_IOBufferDataTypeMap_Create(); - -/** - * @brief Destroys the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IOBufferDataTypeMap_Delete(Snpe_IOBufferDataTypeMap_Handle_t handle); -/** - * @brief Adds a name and the corresponding buffer data type - * to the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - * @param[in] bufferDataType : data type of the buffer - * - * @note If a buffer with the same name already exists, no new - * buffer is added. - */ -SNPE_API -Snpe_ErrorCode_t -Snpe_IOBufferDataTypeMap_Add(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name, Snpe_IOBufferDataType_t bufferDataType); - -/** - * @brief Removes a buffer name from the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IOBufferDataTypeMap_Remove(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name); - -/** - * @brief Returns the type of the named buffer - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - * @return The type of the buffer, or UNSPECIFIED if the buffer does not exist - * - */ -SNPE_API -Snpe_IOBufferDataType_t Snpe_IOBufferDataTypeMap_GetBufferDataType(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name); - -/** - * @brief Returns the type of the first buffer - * - * @param handle : Handle to access the IOBufferDataType map - * - * @return The type of the first buffer, or SNPE_IO_BUFFER_DATATYPE_UNSPECIFIED if the map is empty. - */ -SNPE_API -Snpe_IOBufferDataType_t Snpe_IOBufferDataTypeMap_GetBufferDataTypeOfFirst(Snpe_IOBufferDataTypeMap_Handle_t handle); - -/** - * @brief Returns the size of the buffer type map. - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @return The size of the map - * - */ -SNPE_API -size_t Snpe_IOBufferDataTypeMap_Size(Snpe_IOBufferDataTypeMap_Handle_t handle); - -/** - * @brief Checks the existence of the named buffer in the map - * - * @param[in] handle : Handle to access the IOBufferDataType map - * - * @param[in] name : The name of the buffer - * - * @return 1 if the named buffer exists, 0 otherwise. - * - */ -SNPE_API -int Snpe_IOBufferDataTypeMap_Find(Snpe_IOBufferDataTypeMap_Handle_t handle, const char* name); - -/** - * @brief Resets the map - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IOBufferDataTypeMap_Clear(Snpe_IOBufferDataTypeMap_Handle_t handle); - -/** - * @brief Checks whether the map is empty - * - * @return 1 if the map is empty, 0 otherwise. - * - */ -SNPE_API -int Snpe_IOBufferDataTypeMap_Empty(Snpe_IOBufferDataTypeMap_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_IOBUFFER_DATATYPE_MAP_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.hpp deleted file mode 100644 index c39d3320..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IOBufferDataTypeMap.hpp +++ /dev/null @@ -1,69 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include - -#include "DlEnums.hpp" - - -#include "DlSystem/IOBufferDataTypeMap.h" - -namespace DlSystem { - -class IOBufferDataTypeMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_IOBufferDataTypeMap_Delete}; - -public: - - IOBufferDataTypeMap() - : BaseType(Snpe_IOBufferDataTypeMap_Create()) - { } - - void add(const char* name, IOBufferDataType_t bufferDataType){ - Snpe_IOBufferDataTypeMap_Add(handle(), name, static_cast(bufferDataType)); - } - - void remove(const char* name){ - Snpe_IOBufferDataTypeMap_Remove(handle(), name); - } - - IOBufferDataType_t getBufferDataType(const char* name){ - return static_cast(Snpe_IOBufferDataTypeMap_GetBufferDataType(handle(), name)); - } - - IOBufferDataType_t getBufferDataType(){ - return static_cast(Snpe_IOBufferDataTypeMap_GetBufferDataTypeOfFirst(handle())); - } - - size_t size() const{ - return Snpe_IOBufferDataTypeMap_Size(handle()); - } - - bool find(const char* name) const{ - return Snpe_IOBufferDataTypeMap_Find(handle(), name); - } - - void clear(){ - Snpe_IOBufferDataTypeMap_Clear(handle()); - } - - bool empty() const{ - return Snpe_IOBufferDataTypeMap_Empty(handle()); - } -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IOBufferDataTypeMap) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensor.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensor.h deleted file mode 100644 index 913f3bdc..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensor.h +++ /dev/null @@ -1,118 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _DL_SYSTEM_ITENSOR_H_ -#define _DL_SYSTEM_ITENSOR_H_ - -#include - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlError.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * Represents a tensor which holds n-dimensional data. It is important to - * understand how the tensor data is represented in memory - * relative to the tensor dimensions. Tensors store data in - * memory in row-major order (i.e. the last tensor dimension is - * the fastest varying one). For example, if you have a two - * dimensional tensor with 3 rows and 2 columns (i.e. the tensor - * dimensions are 3,2 as returned in tensor dimension vectors) - * with the following data in terms rows and columns: - * - * | 1 2 |
- * | 3 4 |
- * | 5 6 |
- * - * This data would be stored in memory as 1,2,3,4,5,6. - */ -typedef void* Snpe_ITensor_Handle_t; - - -/** - * Destroys/frees an ITensor - * - * @param[in] userBufferHandle : Handle to access the IUserBuffer - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_ITensor_Delete(Snpe_ITensor_Handle_t iTensorHandle); - -/** - * Returns a tensor iterator pointing to the beginning - * of the data in the tensor. - * - * @param[in] tensorHandle : Handle to access ITensor - * - * @return The tensor data as a void pointer. - */ -SNPE_API -void* Snpe_ITensor_GetData(Snpe_ITensor_Handle_t tensorHandle); - -/** - * @brief Gets the shape of this tensor. - * - * The last element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying dimension, etc. - * - * @param[in] tensorHandle : Handle to access ITensor - * - * @return A TensorShape handle holding the tensor dimensions. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_ITensor_GetShape(Snpe_ITensor_Handle_t tensorHandle); - -/** - * Returns the element size of the data in the tensor - * (discounting strides). This is how big a buffer would - * need to be to hold the tensor data contiguously in - * memory. - * - * @param[in] tensorHandle : Handle to access ITensor - * - * @return The size of the tensor (in elements). - */ -SNPE_API -size_t Snpe_ITensor_GetSize(Snpe_ITensor_Handle_t tensorHandle); - -SNPE_API -int Snpe_ITensor_IsQuantized(Snpe_ITensor_Handle_t tensorHandle); - -SNPE_API -float Snpe_ITensor_GetDelta(Snpe_ITensor_Handle_t tensorHandle); - -SNPE_API -float Snpe_ITensor_GetOffset(Snpe_ITensor_Handle_t tensorHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _DL_SYSTEM_ITENSOR_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensor.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensor.hpp deleted file mode 100644 index 4785a39d..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensor.hpp +++ /dev/null @@ -1,95 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "TensorShape.hpp" -#include "ITensorItr.hpp" - -#include "DlSystem/ITensor.h" - - -namespace DlSystem { - - -class ITensor : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_ITensor_Delete}; - - template - T* getData(){ - return static_cast(Snpe_ITensor_GetData(handle())); - } - - template - const T* getData() const{ - return static_cast(Snpe_ITensor_GetData(handle())); - } - -public: - using iterator = DlSystem::ITensorItr; - using const_iterator = DlSystem::ITensorItr; - - - iterator begin(){ - return iterator(getData()); - } - - const_iterator begin() const{ - return const_iterator(getData()); - } - - const_iterator cbegin() const{ - return begin(); - } - - iterator end(){ - return begin() + getSize(); - } - - const_iterator end() const{ - return cbegin() + getSize(); - } - - const_iterator cend() const{ - return end(); - } - - TensorShape getShape() const{ - return moveHandle(Snpe_ITensor_GetShape(handle())); - } - - size_t getSize() const{ - return Snpe_ITensor_GetSize(handle()); - } - - // Serialize to std::ostream is no longer supported - void serialize(std::ostream &output) const = delete; - - bool isQuantized() const{ - return Snpe_ITensor_IsQuantized(handle()); - } - - float GetDelta() const{ - return Snpe_ITensor_GetDelta(handle()); - } - - float GetOffset() const{ - return Snpe_ITensor_GetOffset(handle()); - } -}; - - -} //ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ITensor) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensorFactory.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensorFactory.hpp deleted file mode 100644 index 5ef1e9d3..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensorFactory.hpp +++ /dev/null @@ -1,52 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "ITensor.hpp" - -#include - - -#include "SNPE/SNPEUtil.h" - -namespace DlSystem{ -// NOTE: These factories use a different handle type because they are singletons -// Never copy this pattern unless you're also implementing a singleton -class ITensorFactory : public Wrapper{ - friend BaseType; - - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - -public: - ITensorFactory() - : BaseType(nullptr) - { } - - - std::unique_ptr createTensor(const TensorShape &shape) noexcept{ - return makeUnique(Snpe_Util_CreateITensor(getHandle(shape))); - } - - // Create from std::istream is no longer supported - std::unique_ptr createTensor(std::istream &input) noexcept = delete; - - std::unique_ptr createTensor(const TensorShape &shape, - const unsigned char *data, - size_t dataSize) noexcept{ - auto handle = Snpe_Util_CreateITensorDataSize(getHandle(shape), data, dataSize); - return makeUnique(handle); - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, ITensorFactory) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensorItr.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensorItr.hpp deleted file mode 100644 index 801aa217..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensorItr.hpp +++ /dev/null @@ -1,199 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include - -#include "Wrapper.hpp" -#include "ITensorItrImpl.hpp" - -namespace DlSystem{ - -template -class ITensorItr{ -public: - using iterator_category = std::bidirectional_iterator_tag; - using pointer = typename std::conditional::type; - using value_type = float; - using difference_type = std::ptrdiff_t; - using reference = typename std::conditional::type; - - - ITensorItr() = delete; - virtual ~ITensorItr() = default; - - explicit ITensorItr(pointer data) noexcept - : m_Impl{nullptr}, - m_IsTrivial{true}, - m_Data{data}, - m_DataStart{data} - { } - - ITensorItr(std::unique_ptr impl, - bool isTrivial = false, - float* data = nullptr) - : m_Impl(impl->clone()), - m_IsTrivial(isTrivial), - m_Data(data), - m_DataStart(data) - { } - - ITensorItr(const ITensorItr& itr) - : m_Impl(itr.m_Impl ? itr.m_Impl->clone() : nullptr), - m_IsTrivial(itr.m_IsTrivial), - m_Data(itr.m_Data), - m_DataStart(itr.m_DataStart) - { } - - ITensorItr(ITensorItr&& itr) noexcept - : m_Impl(std::move(itr.m_Impl)), - m_IsTrivial(itr.m_IsTrivial), - m_Data(itr.m_Data), - m_DataStart(itr.m_DataStart) - { } - - ITensorItr& operator=(const ITensorItr& other){ - if (this == &other) return *this; - - m_Impl = other.m_Impl ? other.m_Impl->clone() : nullptr; - m_IsTrivial = other.m_IsTrivial; - m_Data = other.m_Data; - m_DataStart = other.m_DataStart; - return *this; - } - ITensorItr& operator=(ITensorItr&& other) noexcept{ - if(this != &other){ - m_Impl = std::move(other.m_Impl); - m_IsTrivial = other.m_IsTrivial; - m_Data = other.m_Data; - m_DataStart = other.m_DataStart; - } - return *this; - } - - inline ITensorItr& operator++(){ - if (m_IsTrivial){ - m_Data++; - } else { - m_Impl->increment(); - } - return *this; - } - inline ITensorItr operator++(int){ - ITensorItr tmp(*this); - operator++(); - return tmp; - } - inline ITensorItr& operator--(){ - if (m_IsTrivial){ - m_Data--; - } else { - m_Impl->decrement(); - } - return *this; - } - inline ITensorItr operator--(int){ - ITensorItr tmp(*this); - operator--(); - return tmp; - } - inline ITensorItr& operator+=(int rhs){ - if (m_IsTrivial){ - m_Data += rhs; - } else { - m_Impl->increment(rhs); - } - return *this; - } - inline friend ITensorItr operator+(ITensorItr lhs, int rhs){ - lhs += rhs; - return lhs; - } - inline ITensorItr& operator-=(int rhs){ - if (m_IsTrivial){ - m_Data -= rhs; - } else { - m_Impl->decrement(rhs); - } - return *this; - } - inline friend ITensorItr operator-(ITensorItr lhs, int rhs){ - lhs -= rhs; - return lhs; - } - - inline size_t operator-(const ITensorItr& rhs){ - if (m_IsTrivial) return (m_Data - m_DataStart) - (rhs.m_Data - rhs.m_DataStart); - return m_Impl->getPosition() - rhs.m_Impl->getPosition(); - } - - inline friend bool operator<(const ITensorItr& lhs, const ITensorItr& rhs){ - if (lhs.m_IsTrivial) return lhs.m_Data < rhs.m_Data; - return lhs.m_Impl->dataPointer() < rhs.m_Impl->dataPointer(); - } - inline friend bool operator>(const ITensorItr& lhs, const ITensorItr& rhs){ - return rhs < lhs; - } - inline friend bool operator<=(const ITensorItr& lhs, const ITensorItr& rhs){ - return !(lhs > rhs); - } - inline friend bool operator>=(const ITensorItr& lhs, const ITensorItr& rhs){ - return !(lhs < rhs); - } - - inline bool operator==(const ITensorItr& rhs) const{ - if (m_IsTrivial) return m_Data == rhs.m_Data; - return m_Impl->dataPointer() == rhs.m_Impl->dataPointer(); - } - inline bool operator!=(const ITensorItr& rhs) const{ - return !operator==(rhs); - } - - inline reference operator[](size_t idx){ - if (m_IsTrivial) return *(m_DataStart + idx); - return m_Impl->getReferenceAt(idx); - } - inline reference operator*(){ - if (m_IsTrivial) return *m_Data; - return m_Impl->getReference(); - } - inline reference operator->(){ - return *(*this); - } - inline float* dataPointer() const{ - if (m_IsTrivial) return m_Data; - return m_Impl->dataPointer(); - } - - -protected: - std::unique_ptr<::DlSystem::ITensorItrImpl> m_Impl; - bool m_IsTrivial = false; - pointer m_Data = nullptr; - pointer m_DataStart = nullptr; -}; - - -inline void fill(ITensorItr first, ITensorItr end, float val){ - std::fill(first, end, val); -} -template -OutItr copy(InItr first, InItr last, OutItr result){ - return std::copy(first, last, result); -} - -} // ns DlSystem - - -// ALIAS_IN_ZDL_NAMESPACE -namespace zdl{ namespace DlSystem{ - template - using ITensorItr = ::DlSystem::ITensorItr; -}} diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensorItrImpl.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensorItrImpl.hpp deleted file mode 100644 index 6b9a497b..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/ITensorItrImpl.hpp +++ /dev/null @@ -1,32 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once -#include "Wrapper.hpp" - -namespace DlSystem { - -class ITensorItrImpl { -public: - ITensorItrImpl() = default; - virtual ~ITensorItrImpl() = default; - - virtual float getValue() const = 0; - virtual float& getReference() = 0; - virtual float& getReferenceAt(size_t idx) = 0; - virtual float* dataPointer() const = 0; - virtual void increment(int incVal = 1) = 0; - virtual void decrement(int decVal = 1) = 0; - virtual size_t getPosition() = 0; - virtual std::unique_ptr clone() = 0; - -private: - ITensorItrImpl& operator=(const ITensorItrImpl& other) = delete; - ITensorItrImpl(const ITensorItrImpl& other) = delete; -}; - -} // ns DlSystem diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.h deleted file mode 100644 index fc4cc316..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.h +++ /dev/null @@ -1,714 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _IUSER_BUFFER_H -#define _IUSER_BUFFER_H - -#include -#include - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlError.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE UserByfferEncoding handle - */ -typedef void* Snpe_UserBufferEncoding_Handle_t; - -/** - * @brief . - * - * An enum class of all supported element types in a IUserBuffer - */ -//enum class Snpe_UserBufferEncoding_ElementType_t -typedef enum -{ - /// Unknown element type. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN = 0, - - /// Each element is presented by float. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT = 1, - - /// Each element is presented by an unsigned int. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT = 2, - - /// Each element is presented by float16. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16 = 3, - - /// Each element is presented by an 8-bit quantized value. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8 = 10, - - /// Each element is presented by an 16-bit quantized value. - SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16 = 11, - - /// Each element is presented by Int32 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32 = 12, - - /// Each element is presented by UInt32 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32 = 13, - - /// Each element is presented by Int8 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8 = 14, - - /// Each element is presented by UInt8 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8 = 15, - - /// Each element is presented by Int16 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT16 = 16, - - /// Each element is presented by UInt16 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT16 = 17, - - /// Each element is present by Bool8 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8 = 18, - - /// Each element is present by Int64 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT64 = 19, - - /// Each element is present by UInt64 - SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT64 = 20 - -}Snpe_UserBufferEncoding_ElementType_t; - - -/** - * @brief Retrieves the element type - * - * @param[in] userBufferEncodingHandle : Handle to access userBufferEncoding - * - * @return Element type - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncoding_GetElementType(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access userBufferEncoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncoding_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys/frees a UserBufferEncoding - * - * @param[in] userBufferEncodingHandle : Handle to access UserBufferEncoding - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncoding_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - - -/** - * @brief . - * - * A base class buffer source type - * - * @note User buffer from CPU support all kinds of runtimes; - * User buffer from GLBUFFER support only GPU runtime. - */ -typedef void* Snpe_UserBufferSource_Handle_t; - -typedef enum -{ - /// Unknown buffer source type. - SNPE_USERBUFFERSOURCE_SOURCETYPE_UNKNOWN = 0, - - /// The network inputs are from CPU buffer. - SNPE_USERBUFFERSOURCE_SOURCETYPE_CPU = 1, - - /// The network inputs are from OpenGL buffer. - SNPE_USERBUFFERSOURCE_SOURCETYPE_GLBUFFER = 2 -}Snpe_UserBufferSource_SourceType_t; - -/** - * @brief Retrieves the source type - * - * @param[in] userBufferSourceHandle : Handle to access userBufferSource - * - * @return Source type - */ -SNPE_API -Snpe_UserBufferSource_SourceType_t Snpe_UserBufferSource_GetSourceType(Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -/** - * @brief Destroys/frees a UserBufferSource - * - * @param[in] userBufferSourceHandle : Handle to access UserBufferSource - * - * @return indication of success/failures - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferSource_Delete(Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -/** - * @brief . - * - * An source type where input data is delivered from OpenGL buffer - */ -SNPE_API -Snpe_UserBufferSource_Handle_t Snpe_UserBufferSourceGLBuffer_Create(); - -/** - * @brief Destroys the userBuffer - * - * @param[in] userBufferSourceHandle : Handle to access the UserBuffer - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferSourceGLBuffer_Delete(Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -// Encoding 8 Bit -/** - * @brief . - * - * An encoding type where each element is represented by an unsigned int. - * - * Userbuffer size assumes uint8 encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 1 = 6 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUnsigned8Bit_Create(); - -/** - * @brief Copy Constructor for UserBufferEncodingUnsigned8Bit - * - * An encoding type where each element is represented by an unsigned int. - * - * Userbuffer size assumes uint8 encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 1 = 6 bytes in memory). - * - * @param[in] otherHandle : a handle to another UserBufferEncodingUnsigned8Bit to copy - * - * @return a handle to the UserBufferEncodingUnsigned8Bit - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUnsigned8Bit_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - -/** - * @brief Destroys the encodingUnsigned8Bit - * - * @param[in] userBufferEncodingHandle : Handle to access the encodingUnsigned8Bit - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingUnsigned8Bit_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingUnsigned8Bit_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - - -// Encoding Float -/** - * @brief . - * - * An encoding type where each element is represented by a float. - * - * Userbuffer size assumes float encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 4 = 24 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloat_Create(); - -/** - * @brief Copy Constructor for UserBufferEncodingFloat - * - * An encoding type where each element is represented by a float. - * - * Userbuffer size assumes float encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 4 = 24 bytes in memory). - * - * @param[in] otherHandle : a handle to another UserBufferEncodingFloat to copy - * - * @return a handle to the constructed UserBufferEncodingFloat - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloat_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - -/** - * @brief Destroys the encodingFloat - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingFloat_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingFloat_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -// Encoding FloatN -/** - * @brief . - * - * An encoding type where each element is represented by a float N - * - * Userbuffer size assumes float N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloatN_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingFloatN - * - * An encoding type where each element is represented by a float N - * - * Userbuffer size assumes float N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - * - * @param[in] otherHandle : a handle to another UserBufferEncodingFloatN to copy - * - * @return a handle to the constructed UserBufferEncodingFloatN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingFloatN_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - - -/** - * @brief Destroys the encodingFloatN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingFloatN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingFloatN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - - -/** - * @brief Get the Float type corresponding to a given bitwidth - * - * @param width bitwidth of Float type - * - * @return ElementType corresponding to a Float of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingFloatN_GetTypeFromWidth(uint8_t width); - -/** - * @brief . - * - * An encoding type where each element is represented by tfN, which is an - * N-bit quantized value, which has an exact representation of 0.0 - * - * Userbuffer size assumes tf N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingTfN_Create(uint64_t stepFor0, float stepSize, uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingTfN - * - * An encoding type where each element is represented by tfN, which is an - * N-bit quantized value, which has an exact representation of 0.0 - * - * Userbuffer size assumes tf N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 16 will be represented by (2 * 3) * 2 = 12 bytes in memory). - * @param otherHandle the UserBufferEncodingTfN to copy - * @return a handle to a newly constructed UserBufferEncodingTfN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingTfN_CreateCopy(Snpe_UserBufferEncoding_Handle_t otherHandle); - -/** - * @brief Destroys the encodingTfN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingTfN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingTfN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Sets the step value that represents 0 - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @param[in] stepExactly0 : The step value that represents 0 - * - */ -SNPE_API -void Snpe_UserBufferEncodingTfN_SetStepExactly0(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle, uint64_t stepExactly0); - -/** - * @brief Sets the float value that each step represents - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @param[in] quantizedStepSize : The float value of each step size - * - */ -SNPE_API -void Snpe_UserBufferEncodingTfN_SetQuantizedStepSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle, float quantizedStepSize); - -/** - * @brief Retrieves the step that represents 0.0 - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Step value - */ -SNPE_API -uint64_t Snpe_UserBufferEncodingTfN_GetStepExactly0(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the step size - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Step size - */ -SNPE_API -float Snpe_UserBufferEncodingTfN_GetQuantizedStepSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * Calculates the minimum floating point value that - * can be represented with this encoding. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Minimum representable floating point value - */ -SNPE_API -float Snpe_UserBufferEncodingTfN_GetMin(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * Calculates the maximum floating point value that - * can be represented with this encoding. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Maximum representable floating point value - */ -SNPE_API -float Snpe_UserBufferEncodingTfN_GetMax(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Get the tfN type corresponding to a given bitwidth - * - * @param width bitwidth of tfN type - * - * @return ElementType corresponding to a tfN of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingTfN_GetTypeFromWidth(uint8_t width); - -// Encoding Int N -/** - * @brief . - * - * An encoding type where each element is represented by a Int - * - * Userbuffer size assumes int N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingIntN_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingIntN - * - * An encoding type where each element is represented by a Int - * - * Userbuffer size assumes int N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - * @param otherHandle the UserBufferEncodingIntN to copy - * @return a handle to a newly constructed UserBufferEncodingIntN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingIntN_CreateCopy(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys the encodingIntN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingIntN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingIntN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Get the int type corresponding to a given bitwidth - * - * @param width bitwidth of int type - * - * @return ElementType corresponding to a int of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingIntN_GetTypeFromWidth(uint8_t bWidth); - -// Encoding Uint N -/** - * @brief . - * - * An encoding type where each element is represented by a Uint - * - * Userbuffer size assumes uint N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUintN_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingUintN - * - * An encoding type where each element is represented by a Uint - * - * Userbuffer size assumes uint N encoding for each element. - * (i.e., a tensor with dimensions (2,3) with a provided bitwidth of 32 will be represented by (2 * 3) * 4 = 24 bytes in memory). - * @param otherHandle the UserBufferEncodingUintN to copy - * @return a handle to a newly constructed UserBufferEncodingUintN - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingUintN_CreateCopy(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys the encodingUintN - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingUintN_Delete(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferEncodingHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingUintN_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Get the uint type corresponding to a given bitwidth - * - * @param width bitwidth of uint type - * - * @return ElementType corresponding to a uint of width bits - */ -SNPE_API -Snpe_UserBufferEncoding_ElementType_t Snpe_UserBufferEncodingUintN_GetTypeFromWidth(uint8_t bWidth); - - -// Encoding Bool -/** - * @brief . - * - * An encoding type where each element is represented by a Bool - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingBool_Create(uint8_t bWidth); - -/** - * @brief Copy Constructor for UserBufferEncodingBool - * - * An encoding type where each element is represented by a bool - * - * @param otherHandle the UserBufferEncodingBool to copy - * @return a handle to a newly constructed UserBufferEncodingBool - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_UserBufferEncodingBool_CreateCopy(Snpe_UserBufferEncoding_Handle_t userBufferEncodingHandle); - -/** - * @brief Destroys the encodingBool - * - * @param[in] userBufferHandle : Handle to access the encoding - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferEncodingBool_Delete(Snpe_UserBufferEncoding_Handle_t userBufferHandle); - -/** - * @brief Retrieves the size of the element, in bytes. - * - * @param[in] userBufferHandle : Handle to access the encoding - * - * @return Size of the element, in bytes. - */ -SNPE_API -size_t Snpe_UserBufferEncodingBool_GetElementSize(Snpe_UserBufferEncoding_Handle_t userBufferHandle); - - - -/** - * A typedef to indicate a SNPE IUserBuffer handle - * UserBuffer contains a pointer and info on how to walk it and interpret its content. - */ -typedef void* Snpe_IUserBuffer_Handle_t; - -/** - * Destroys/frees an IUserBuffer - * - * @param[in] userBufferHandle : Handle to access the IUserBuffer - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_IUserBuffer_Delete(Snpe_IUserBuffer_Handle_t userBufferHandle); - - -/** - * @brief Retrieves the total number of bytes between elements in each dimension if - * the buffer were to be interpreted as a multi-dimensional array. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @warning Do not modify the TensorShape returned by reference. Treat it as a const reference. - * - * @return A const reference to the number of bytes between elements in each dimension. - * e.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would - * return strides of [24, 8, 4]. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_IUserBuffer_GetStrides_Ref(Snpe_IUserBuffer_Handle_t userBufferHandle); - -/** - * @brief Retrieves the size of the buffer, in bytes. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @return Size of the underlying buffer, in bytes. - */ -SNPE_API -size_t Snpe_IUserBuffer_GetSize(Snpe_IUserBuffer_Handle_t userBufferHandle); - -/** - * @brief Retrieves the size of the inference data in the buffer, in bytes. - * - * The inference results from a dynamic-sized model may not be exactly the same size - * as the UserBuffer provided to SNPE. This function can be used to get the amount - * of output inference data, which may be less or greater than the size of the UserBuffer. - * - * If the inference results fit in the UserBuffer, getOutputSize() would be less than - * or equal to getSize(). But if the inference results were more than the capacity of - * the provided UserBuffer, the results would be truncated to fit the UserBuffer. But, - * getOutputSize() would be greater than getSize(), which indicates a bigger buffer - * needs to be provided to SNPE to hold all of the inference results. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @return Size required for the buffer to hold all inference results, which can be less - * or more than the size of the buffer, in bytes. - */ -SNPE_API -size_t Snpe_IUserBuffer_GetOutputSize(Snpe_IUserBuffer_Handle_t userBufferHandle); - -/** - * @brief Changes the underlying memory that backs the UserBuffer. - * - * This can be used to avoid creating multiple UserBuffer objects - * when the only thing that differs is the memory location. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @param[in] buffer : Pointer to the memory location - * - * @return Whether the set succeeds. - */ -SNPE_API -int Snpe_IUserBuffer_SetBufferAddress(Snpe_IUserBuffer_Handle_t userBufferHandle, void* buffer); - -/** - * @brief Gets a reference to the data encoding object of - * the underlying buffer - * - * This is necessary when the UserBuffer is re-used, and the encoding - * parameters can change. For example, each input can be quantized with - * different step sizes. - * - * @param[in] userBufferHandle : Handle to access the user Buffer - * - * @return Data encoding meta-data - */ -SNPE_API -Snpe_UserBufferEncoding_Handle_t Snpe_IUserBuffer_GetEncoding_Ref(Snpe_IUserBuffer_Handle_t userBufferHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _IUSER_BUFFER_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.hpp deleted file mode 100644 index 727c195b..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IUserBuffer.hpp +++ /dev/null @@ -1,390 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include -#include "TensorShape.hpp" - -#include "DlSystem/IUserBuffer.h" - - -namespace DlSystem { - - -class UserBufferEncoding: public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferEncoding_Delete}; -protected: - UserBufferEncoding(HandleType handle) - : BaseType(handle) - { } -public: - - virtual ~UserBufferEncoding() = default; - - UserBufferEncoding(UserBufferEncoding&& other) noexcept - : BaseType(std::move(other)) - { } - - enum class ElementType_t - { - /// Unknown element type. - UNKNOWN = 0, - - /// Each element is presented by 32-bit float. - FLOAT = 1, - - /// Each element is presented by an unsigned int. - UNSIGNED8BIT = 2, - - /// Each element is presented by 16-bit float. - FLOAT16 = 3, - - /// Each element is presented by an 8-bit quantized value. - TF8 = 10, - - /// Each element is presented by an 16-bit quantized value. - TF16 = 11, - - /// Each element is presented by Int32 - INT32 = 12, - - /// Each element is presented by UInt32 - UINT32 = 13, - - /// Each element is presented by Int8 - INT8 = 14, - - /// Each element is presented by UInt8 - UINT8 = 15, - - /// Each element is presented by Int16 - INT16 = 16, - - /// Each element is presented by UInt16 - UINT16 = 17, - - // Each element is presented by Bool8 - BOOL8 = 18, - - // Each element is presented by Int64 - INT64 = 19, - - // Each element is presented by UInt64 - UINT64 = 20 - }; - - ElementType_t getElementType() const noexcept{ - return static_cast(Snpe_UserBufferEncoding_GetElementType(handle())); - } - - size_t getElementSize() const noexcept{ - return Snpe_UserBufferEncoding_GetElementSize(handle()); - } -}; - - -class UserBufferSource: public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferSource_Delete}; - -public: - enum class SourceType_t - { - /// Unknown buffer source type. - UNKNOWN = 0, - - /// The network inputs are from CPU buffer. - CPU = 1, - - /// The network inputs are from OpenGL buffer. - GLBUFFER = 2 - }; -protected: - UserBufferSource(HandleType handle) - : BaseType(handle) - { } -public: - SourceType_t getSourceType() const noexcept{ - return static_cast(Snpe_UserBufferSource_GetSourceType(handle())); - } - -}; - -class UserBufferSourceGLBuffer : public UserBufferSource{ -public: - UserBufferSourceGLBuffer() - : UserBufferSource(Snpe_UserBufferSourceGLBuffer_Create()) - { } -}; - -class UserBufferEncodingUnsigned8Bit : public UserBufferEncoding{ -public: - using UserBufferEncoding::UserBufferEncoding; - UserBufferEncodingUnsigned8Bit() - : UserBufferEncoding(Snpe_UserBufferEncodingUnsigned8Bit_Create()) - { } -}; - -class UserBufferEncodingFloatN : public UserBufferEncoding{ -public: - using UserBufferEncoding::UserBufferEncoding; - - UserBufferEncodingFloatN(uint8_t bWidth=32) - : UserBufferEncoding(Snpe_UserBufferEncodingFloatN_Create(bWidth)) - { } - - UserBufferEncodingFloatN(const UserBufferEncodingFloatN& other) - : UserBufferEncoding(Snpe_UserBufferEncodingFloatN_CreateCopy(other.handle())) - { } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingFloatN_GetTypeFromWidth(width)); - } -}; - -class UserBufferEncodingFloat : public UserBufferEncoding{ -public: - using UserBufferEncoding::UserBufferEncoding; - UserBufferEncodingFloat() - : UserBufferEncoding(Snpe_UserBufferEncodingFloat_Create()) - { } - UserBufferEncodingFloat(const UserBufferEncodingFloat& other) - : UserBufferEncoding(Snpe_UserBufferEncodingFloat_CreateCopy(other.handle())) - { } - - UserBufferEncodingFloat(UserBufferEncodingFloat&& other) noexcept - : UserBufferEncoding(std::move(other)) - { } -}; - - -class UserBufferEncodingTfN : public UserBufferEncoding{ -public: - - using UserBufferEncoding::UserBufferEncoding; - template::value && std::is_floating_point::value, int>::type = 0> - UserBufferEncodingTfN(T stepFor0, U stepSize, uint8_t bWidth=8) - : UserBufferEncoding(Snpe_UserBufferEncodingTfN_Create(stepFor0, stepSize, bWidth)) - { } - - UserBufferEncodingTfN(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingTfN_CreateCopy(getHandle(ubEncoding))) - { } - UserBufferEncodingTfN(const UserBufferEncodingTfN& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingTfN_CreateCopy(getHandle(ubEncoding))) - { } - - void setStepExactly0(uint64_t stepExactly0){ - Snpe_UserBufferEncodingTfN_SetStepExactly0(handle(), stepExactly0); - } - - void setQuantizedStepSize(const float quantizedStepSize){ - Snpe_UserBufferEncodingTfN_SetQuantizedStepSize(handle(), quantizedStepSize); - } - - uint64_t getStepExactly0() const{ - return Snpe_UserBufferEncodingTfN_GetStepExactly0(handle()); - } - - float getMin() const{ - return Snpe_UserBufferEncodingTfN_GetMin(handle()); - } - float getMax() const{ - return Snpe_UserBufferEncodingTfN_GetMax(handle()); - } - - float getQuantizedStepSize() const{ - return Snpe_UserBufferEncodingTfN_GetQuantizedStepSize(handle()); - } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingTfN_GetTypeFromWidth(width)); - } -}; - -class UserBufferEncodingIntN : public UserBufferEncoding{ -public: - - UserBufferEncodingIntN(uint8_t bWidth=32) - : UserBufferEncoding(Snpe_UserBufferEncodingIntN_Create(bWidth)) - { } - - UserBufferEncodingIntN(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingIntN_CreateCopy(getHandle(ubEncoding))) - { } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingIntN_GetTypeFromWidth(width)); - } -}; - - - -class UserBufferEncodingUintN : public UserBufferEncoding{ -public: - - UserBufferEncodingUintN(uint8_t bWidth=32) - : UserBufferEncoding(Snpe_UserBufferEncodingUintN_Create(bWidth)) - { } - - UserBufferEncodingUintN(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingUintN_CreateCopy(getHandle(ubEncoding))) - { } - - static ElementType_t getTypeFromWidth(uint8_t width){ - return static_cast(Snpe_UserBufferEncodingUintN_GetTypeFromWidth(width)); - } -}; - - -class UserBufferEncodingTf8 : public UserBufferEncodingTfN{ -public: - using UserBufferEncodingTfN::UserBufferEncodingTfN; - UserBufferEncodingTf8() = delete; - - template::value && std::is_floating_point::value, int>::type = 0> - UserBufferEncodingTf8(T stepFor0, U stepSize) - : UserBufferEncodingTfN(stepFor0, stepSize, 8) - { } - - UserBufferEncodingTf8(const UserBufferEncoding& ubEncoding) - : UserBufferEncodingTfN(ubEncoding) - { } - -}; - -class UserBufferEncodingBool : public UserBufferEncoding{ -public: - UserBufferEncodingBool(uint8_t bWidth=8) - : UserBufferEncoding(Snpe_UserBufferEncodingBool_Create(bWidth)) - { } - - UserBufferEncodingBool(const UserBufferEncoding& ubEncoding) - : UserBufferEncoding(Snpe_UserBufferEncodingBool_CreateCopy(getHandle(ubEncoding))) - { } -}; - -class IUserBuffer: public Wrapper { - friend BaseType; - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction{Snpe_IUserBuffer_Delete}; - -public: - const TensorShape& getStrides() const{ - return *makeReference(Snpe_IUserBuffer_GetStrides_Ref(handle())); - } - - size_t getSize() const{ - return Snpe_IUserBuffer_GetSize(handle()); - } - - size_t getOutputSize() const{ - return Snpe_IUserBuffer_GetOutputSize(handle()); - } - - bool setBufferAddress(void* buffer) noexcept{ - return Snpe_IUserBuffer_SetBufferAddress(handle(), buffer); - } - - const UserBufferEncoding& getEncoding() const noexcept{ - auto h = Snpe_IUserBuffer_GetEncoding_Ref(handle()); - switch(Snpe_UserBufferEncoding_GetElementType(h)){ - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32: - return *makeReference(h); - - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8: - return *makeReference(h); - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16: - return *makeReference(h); - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8: - return *makeReference(h); - - default: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN: - return *makeReference(h); - } - } - UserBufferEncoding& getEncoding() noexcept{ - auto h = Snpe_IUserBuffer_GetEncoding_Ref(handle()); - switch(Snpe_UserBufferEncoding_GetElementType(h)){ - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNSIGNED8BIT: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UINT32: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT8: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT16: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_INT32: - return *makeReference(h); - - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT16: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8: - return *makeReference(h); - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF16: - return *makeReference(h); - - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_BOOL8: - return *makeReference(h); - - default: - case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN: - return *makeReference(h); - } - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncoding) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferSource) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferSourceGLBuffer) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingUnsigned8Bit) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingFloatN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingFloat) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingTfN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingIntN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingUintN) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferEncodingTf8) - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IUserBuffer) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IUserBufferFactory.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IUserBufferFactory.hpp deleted file mode 100644 index b3bbb087..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/IUserBufferFactory.hpp +++ /dev/null @@ -1,68 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - -#include "Wrapper.hpp" -#include "IUserBuffer.hpp" -#include "TensorShape.hpp" - - -#include "SNPE/SNPEUtil.h" - -namespace DlSystem{ - - -// NOTE: These factories use a different handle type because they are singletons -// Never copy this pattern unless you're also implementing a singleton -class IUserBufferFactory : public Wrapper{ - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - -public: - IUserBufferFactory() - : BaseType(nullptr) - { } - - std::unique_ptr createUserBuffer(void *buffer, - size_t bufSize, - const TensorShape &strides, - UserBufferEncoding* userBufferEncoding) noexcept{ - if(!userBufferEncoding) return {}; - auto handle = Snpe_Util_CreateUserBuffer(buffer, - bufSize, - getHandle(strides), - getHandle(userBufferEncoding)); - return makeUnique(handle); - } - - std::unique_ptr createUserBuffer(void *buffer, - size_t bufSize, - const TensorShape &strides, - UserBufferEncoding* userBufferEncoding, - UserBufferSource* userBufferSource) noexcept{ - if(!userBufferEncoding || !userBufferSource) return {}; - auto handle = Snpe_Util_CreateUserBufferFromSource(buffer, - bufSize, - getHandle(strides), - getHandle(*userBufferEncoding), - getHandle(*userBufferSource)); - return makeUnique(handle); - } - -}; - - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, IUserBufferFactory) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.h deleted file mode 100644 index 15b2a089..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.h +++ /dev/null @@ -1,329 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_PLATFORMCONFIG_H -#define DL_SYSTEM_PLATFORMCONFIG_H - -#include "DlSystem/DlError.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * @brief . - * - * A structure OpenGL configuration - * - * @note When certain OpenGL context and display are provided to UserGLConfig for using - * GPU buffer as input directly, the user MUST ensure the particular OpenGL - * context and display remain vaild throughout the execution of neural network models. - */ -typedef void* Snpe_UserGLConfig_Handle_t; - -/** - * @brief . - * - * Creates a new userGLConfig - * - */ -SNPE_API -Snpe_UserGLConfig_Handle_t Snpe_UserGLConfig_Create(); - -/** - * @brief Destroys the userGLConfig - * - * @param[in] handle : Handle to access the userGLConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGLConfig_Delete(Snpe_UserGLConfig_Handle_t handle); - -/** - * @brief Sets the EGL context - * - * @param[in] handle : Handle to access userGLConfig - * - * @param[in] userGLContext : void pointer - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGLConfig_SetUserGLContext(Snpe_UserGLConfig_Handle_t handle, void* userGLContext); - -/** - * @brief Sets the EGL Display - * - * @param[in] handle : Handle to access userGLConfig - * - * @param[in] userGLDisplay : void pointer - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGLConfig_SetUserGLDisplay(Snpe_UserGLConfig_Handle_t handle, void* userGLDisplay); - - -/** - * @brief Get EGL context - * - * @param[in] handle : Handle to access userGLConfig - * - * @return userGLContext of type void pointer - * - */ -SNPE_API -void* Snpe_UserGLConfig_GetUserGLContext(Snpe_UserGLConfig_Handle_t handle); - -/** - * @brief Get EGL Display - * - * @param[in] handle : Handle to access userGLConfig - * - * @return userGLDisplay of type void pointer - * - */ -SNPE_API -void* Snpe_UserGLConfig_GetUserGLDisplay(Snpe_UserGLConfig_Handle_t handle); - - -/** - * @brief . - * - * A structure Gpu configuration - */ -typedef void* Snpe_UserGpuConfig_Handle_t; - -/** - * @brief . - * - * Creates a new userGpuConfig - * - */ -SNPE_API -Snpe_UserGpuConfig_Handle_t Snpe_UserGpuConfig_Create(); - -/** - * @brief Destroys the userGpuConfig - * - * @param[in] handle : Handle to access userGLConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserGpuConfig_Delete(Snpe_UserGpuConfig_Handle_t handle); - -/** - * @brief Set the userGpuConfig - * - * @param[in] handle : Handle to access userGpuConfig - * - * @param[in] glHandle : Handle needed to access userGlConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -void Snpe_UserGpuConfig_Set(Snpe_UserGpuConfig_Handle_t handle, Snpe_UserGLConfig_Handle_t glHandle); - -/** - * @brief Get the userGpuConfig - * - * @param[in] handle : Handle to access userGpuConfig - * - * @return Handle needed to access userGlConfig - */ -SNPE_API -Snpe_UserGLConfig_Handle_t Snpe_UserGpuConfig_Get_Ref(Snpe_UserGpuConfig_Handle_t handle); - - - -/** - * A typedef to indicate a SNPE PlatformConfig handle - */ -typedef void* Snpe_PlatformConfig_Handle_t; - - -/** - * @brief . - * - * Creates a new PlatformConfig - * - */ -SNPE_API -Snpe_PlatformConfig_Handle_t Snpe_PlatformConfig_Create(); - - -/** - * @brief Copy-Construct a PlatformConfig from another PlatformConfig - * - * @param[in] otherHandle Handle to the other PlatformConfig - * - * @return Handle to the Copy-Constructed PlatformConfig - */ -SNPE_API -Snpe_PlatformConfig_Handle_t Snpe_PlatformConfig_CreateCopy(Snpe_PlatformConfig_Handle_t otherHandle); - -/** - * @brief Destroys the PlatformConfig - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PlatformConfig_Delete(Snpe_PlatformConfig_Handle_t handle); - - -typedef enum -{ - /// Unknown platform type. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_UNKNOWN = 0, - - /// Snapdragon CPU. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_CPU = 1, - - /// Adreno GPU. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_GPU = 2, - - /// Hexagon DSP. - SNPE_PLATFORMCONFIG_PLATFORMTYPE_DSP = 3 -} Snpe_PlatformConfig_PlatformType_t; - - -/** - * @brief Retrieves the platform type - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return Platform type - */ -SNPE_API -Snpe_PlatformConfig_PlatformType_t Snpe_PlatformConfig_GetPlatformType(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Indicates whther the plaform configuration is valid. - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return 1 if the platform configuration is valid; 0 otherwise. - */ -SNPE_API -int Snpe_PlatformConfig_IsValid(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Retrieves the Gpu configuration - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return userGpuConfig populated with the Gpu configuration. - * - */ -SNPE_API -Snpe_UserGpuConfig_Handle_t Snpe_PlatformConfig_GetUserGpuConfig(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Sets the Gpu configuration - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @param[in] gpuHandle : Gpu Configuration handle - * - * @return 1 if Gpu configuration was successfully set; 0 otherwise. - */ -SNPE_API -int Snpe_PlatformConfig_SetUserGpuConfig(Snpe_PlatformConfig_Handle_t handle, Snpe_UserGpuConfig_Handle_t gpuHandle); - -/** - * @brief Sets the platform options - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @param[in] options : Options as a const char* in the form of "keyword:options" - * - * @return 1 if options are pass validation; otherwise 0. If false, the options are not updated. - */ -SNPE_API -int Snpe_PlatformConfig_SetPlatformOptions(Snpe_PlatformConfig_Handle_t handle, const char* options); - -/** - * @brief Indicates whther the plaform configuration is valid. - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return 1 if the platform configuration is valid; 0 otherwise. - */ -SNPE_API -int Snpe_PlatformConfig_IsOptionsValid(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Gets the platform options - * - * @param[in] handle : Handle needed to access the platformConfig - * - * @return Options as a const char* - */ -SNPE_API -const char* Snpe_PlatformConfig_GetPlatformOptions(Snpe_PlatformConfig_Handle_t handle); - -/** - * @brief Sets the platform options - * - * @note the returned string will be invalidated by subsequent calls to this function - * - * @param[in] handle : Handle needed to access the platformConfig - * @param[in] optionName : Name of platform options" - * @param[in] value : Value of specified optionName - * - * @return If 1, add "optionName:value" to platform options if optionName don't exist, otherwise update the - * value of specified optionName. - * If 0, the platform options will not be changed. - */ -SNPE_API -int Snpe_PlatformConfig_SetPlatformOptionValue(Snpe_PlatformConfig_Handle_t handle, const char* optionName, const char* value); - -/** - * @brief Removes the platform options - * - * @param[in] handle : Handle needed to access the platformConfig - * @param[in] optionName : Name of platform options" - * @param[in] value : Value of specified optionName - * - * @return If 1, removed "optionName:value" to platform options if optionName don't exist, do nothing. - * If 0, the platform options will not be changed. - */ -SNPE_API -int Snpe_PlatformConfig_RemovePlatformOptionValue(Snpe_PlatformConfig_Handle_t handle, const char* optionName, const char* value); - -SNPE_API -void Snpe_PlatformConfig_SetIsUserGLBuffer(int isUserGLBuffer); - -SNPE_API -int Snpe_PlatformConfig_GetIsUserGLBuffer(); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_PLATFORMCONFIG_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.hpp deleted file mode 100644 index 5995c51b..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/PlatformConfig.hpp +++ /dev/null @@ -1,265 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include "DlSystem/PlatformConfig.h" - -namespace DlSystem { - -struct UserGLConfig -{ - /// Holds user EGL context. - /// - void* userGLContext = nullptr; - - /// Holds user EGL display. - void* userGLDisplay = nullptr; -}; - -struct UserGpuConfig{ - /// Holds user OpenGL configuration. - /// - UserGLConfig userGLConfig; -}; - -class PlatformConfig : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_PlatformConfig_Delete}; - - class UserGLConfigInternal : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserGLConfig_Delete}; - - public: - UserGLConfigInternal() - : BaseType(Snpe_UserGLConfig_Create()) - { } - UserGLConfigInternal(const UserGLConfig& uglc) - : UserGLConfigInternal() - { - setUserGLContext(uglc.userGLContext); - setUserGLDisplay(uglc.userGLDisplay); - } - void setUserGLContext(void* userGLContext){ - Snpe_UserGLConfig_SetUserGLContext(handle(), userGLContext); - } - void setUserGLDisplay(void* userGLDisplay){ - Snpe_UserGLConfig_SetUserGLDisplay(handle(), userGLDisplay); - } - - void* getUserGLContext(){ - return Snpe_UserGLConfig_GetUserGLContext(handle()); - } - void* getUserGLDisplay(){ - return Snpe_UserGLConfig_GetUserGLDisplay(handle()); - } - }; - - - - class UserGpuConfigInternal : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserGpuConfig_Delete}; - - public: - UserGpuConfigInternal() - : BaseType(Snpe_UserGpuConfig_Create()) - { } - - void set(const UserGLConfig& userGLConfig){ - UserGLConfigInternal uglc(userGLConfig); - Snpe_UserGpuConfig_Set(handle(), getHandle(uglc)); - } - - void get(UserGLConfig& uglc){ - UserGLConfigInternal uglci(moveHandle(Snpe_UserGpuConfig_Get_Ref(handle()), true)); - - uglc.userGLContext = uglci.getUserGLContext(); - uglc.userGLDisplay = uglci.getUserGLDisplay(); - } - - }; -public: - - /** - * @brief . - * - * An enum class of all supported platform types - */ - enum class PlatformType_t - { - /// Unknown platform type. - UNKNOWN = 0, - - /// Snapdragon CPU. - CPU = 1, - - /// Adreno GPU. - GPU = 2, - - /// Hexagon DSP. - DSP = 3 - }; - - /** - * @brief . - * - * A union class user platform configuration information - */ - struct PlatformConfigInfo - { - /// Holds user GPU Configuration. - /// - UserGpuConfig userGpuConfig; - - }; - - ~PlatformConfig() = default; - - PlatformConfig() - : BaseType(Snpe_PlatformConfig_Create()) - { } - - PlatformConfig(const PlatformConfig& other) - : BaseType(Snpe_PlatformConfig_CreateCopy(other.handle())) - { } - - /** - * @brief Retrieves the platform type - * - * @return Platform type - */ - PlatformType_t getPlatformType() const{ - return static_cast(Snpe_PlatformConfig_GetPlatformType(handle())); - }; - - /** - * @brief Indicates whther the plaform configuration is valid. - * - * @return True if the platform configuration is valid; false otherwise. - */ - bool isValid() const{ - return Snpe_PlatformConfig_IsValid(handle()); - }; - - /** - * @brief Retrieves the Gpu configuration - * - * @param[out] userGpuConfig The passed in userGpuConfig populated with the Gpu configuration on return. - * - * @return True if Gpu configuration was retrieved; false otherwise. - */ - bool getUserGpuConfig(UserGpuConfig& userGpuConfig) const{ - auto platformType = static_cast(Snpe_PlatformConfig_GetPlatformType(handle())); - if(platformType != PlatformType_t::GPU) return false; - - UserGpuConfigInternal gpuConf(moveHandle(Snpe_PlatformConfig_GetUserGpuConfig(handle()))); - - gpuConf.get(userGpuConfig.userGLConfig); - return true; - } - - /** - * @brief Sets the Gpu configuration - * - * @param[in] userGpuConfig Gpu Configuration - * - * @return True if Gpu configuration was successfully set; false otherwise. - */ - bool setUserGpuConfig(UserGpuConfig& userGpuConfig){ - UserGpuConfigInternal gpuConf; - gpuConf.set(userGpuConfig.userGLConfig); - return Snpe_PlatformConfig_SetUserGpuConfig(handle(), getHandle(gpuConf)); - } - - /** - * @brief Sets the platform options - * - * @param[in] options Options as a string in the form of "keyword:options" - * - * @return True if options are pass validation; otherwise false. If false, the options are not updated. - */ - bool setPlatformOptions(const std::string& options){ - return Snpe_PlatformConfig_SetPlatformOptions(handle(), options.c_str()); - } - - /** - * @brief Indicates whther the plaform configuration is valid. - * - * @return True if the platform configuration is valid; false otherwise. - */ - bool isOptionsValid() const{ - return Snpe_PlatformConfig_IsOptionsValid(handle()); - } - - /** - * @brief Gets the platform options - * - * @return Options as a string - */ - std::string getPlatformOptions() const { - return Snpe_PlatformConfig_GetPlatformOptions(handle()); - } - - /** - * @brief Sets the platform options - * - * @param[in] optionName Name of platform options" - * @param[in] value Value of specified optionName - * - * @return If true, add "optionName:value" to platform options if optionName don't exist, otherwise update the - * value of specified optionName. - * If false, the platform options will not be changed. - */ - bool setPlatformOptionValue(const std::string& optionName, const std::string& value){ - return Snpe_PlatformConfig_SetPlatformOptionValue(handle(), optionName.c_str(), value.c_str()); - } - - /** - * @brief Removes the platform options - * - * @param[in] optionName Name of platform options" - * @param[in] value Value of specified optionName - * - * @return If true, removed "optionName:value" to platform options if optionName don't exist, do nothing. - * If false, the platform options will not be changed. - */ - bool removePlatformOptionValue(const std::string& optionName, const std::string& value){ - return Snpe_PlatformConfig_RemovePlatformOptionValue(handle(), optionName.c_str(), value.c_str()); - } - - static void SetIsUserGLBuffer(bool isUserGLBuffer){ - Snpe_PlatformConfig_SetIsUserGLBuffer(isUserGLBuffer); - } - static bool GetIsUserGLBuffer(){ - return Snpe_PlatformConfig_GetIsUserGLBuffer(); - } - -}; - - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserGLConfig) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserGpuConfig) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, PlatformConfig) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/RuntimeList.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/RuntimeList.h deleted file mode 100644 index 2b699a7a..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/RuntimeList.h +++ /dev/null @@ -1,203 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_RUNTIME_LIST_H -#define DL_SYSTEM_RUNTIME_LIST_H - -#include - -#include "DlSystem/DlEnums.h" -#include "DlSystem/DlError.h" - -#include "StringList.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE RuntimeList handle - */ -typedef void* Snpe_RuntimeList_Handle_t; - -/** - * @brief . - * - * Creates a new runtime list - * - */ -SNPE_API -Snpe_RuntimeList_Handle_t Snpe_RuntimeList_Create(); - - -/** - * Copy-Constructs a RuntimeList and returns a handle to it - * - * @param runtimeListHandle the other RuntimeList to copy - * - * @return the handle to the created RuntimeList - */ -SNPE_API -Snpe_RuntimeList_Handle_t Snpe_RuntimeList_CreateCopy(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief Destroys the RuntimeList - * - * @param[in] runtimeListHandle : Handle needed to access the runtimeList - * - * @return Error code. Returns SNPE_SUCCESS if destruction successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Delete(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param src Source RuntimeList handle - * - * @param dst Destination RuntimeList handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Assign(Snpe_RuntimeList_Handle_t src, Snpe_RuntimeList_Handle_t dst); - -/** - * @brief Returns the Runtime from list at position index - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @param[in] index : position in runtimeList - * - * @return The Runtime from list at position index - */ -SNPE_API -Snpe_Runtime_t Snpe_RuntimeList_GetRuntime(Snpe_RuntimeList_Handle_t runtimeListHandle, int index); - -/** - * @brief Set the Runtime of the list at position index - * - * @param[in] runtimeListHandle : Handle needed to access the runtimeList - * - * @param[in] index : position in runtimeList - * - * @param[in] runtime : The Runtime to assign to position index - * - * @return SNPE_SUCCESS on success - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_SetRuntime(Snpe_RuntimeList_Handle_t runtimeListHandle, size_t index, Snpe_Runtime_t runtime); - -/** - * @brief Adds runtime to the end of the runtime list - * order of precedence is former followed by latter entry - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @param[in] runtime to add - * - * @return Error code. Ruturns SNPE_SUCCESS If the runtime added successfully - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Add(Snpe_RuntimeList_Handle_t runtimeListHandle, Snpe_Runtime_t runtime); - -/** - * @brief Removes the runtime from the list - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @param[in] runtime to be removed - * - * @return Error code. Ruturns SNPE_SUCCESS If the runtime removed successfully - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Remove(Snpe_RuntimeList_Handle_t runtimeListHandle, Snpe_Runtime_t runtime) ; - -/** - * @brief Returns the number of runtimes in the list - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @return number of entries in the runtimeList. - */ -SNPE_API -size_t Snpe_RuntimeList_Size(Snpe_RuntimeList_Handle_t runtimeListHandle) ; - -/** - * @brief Returns 1 if the list is empty - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @return 1 if list empty, 0 otherwise. - */ -SNPE_API -int Snpe_RuntimeList_Empty(Snpe_RuntimeList_Handle_t runtimeListHandle) ; - -/** - * @brief . - * - * Removes all runtime from the list - * - * @param[in] runtimeListHandle: Handle needed to access the runtimeList - * - * @return Error code. Returns SNPE_SUCCESS if runtime list is cleared successfully. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeList_Clear(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief Get a StringList of names from the runtime list in order of precedence - * - * @param runtimeListHandle Handle to a RuntimeList - * - * @return Handle to a StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_RuntimeList_GetRuntimeListNames(Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief . - * - * @param[in] runtime const char* - * Returns a Runtime enum corresponding to the in param string - * - */ -SNPE_API -Snpe_Runtime_t Snpe_RuntimeList_StringToRuntime(const char* str); - -/** - * @brief . - * - * @param[in] runtime - * Returns a const char* corresponding to the in param runtime enum - * - */ -SNPE_API -const char* Snpe_RuntimeList_RuntimeToString(Snpe_Runtime_t runtime); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_RUNTIME_LIST_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/RuntimeList.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/RuntimeList.hpp deleted file mode 100644 index a2abf2b7..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/RuntimeList.hpp +++ /dev/null @@ -1,115 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "StringList.hpp" -#include "DlEnums.hpp" -#include "DlSystem/RuntimeList.h" - - - - - - -namespace DlSystem { - -class RuntimeList : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_RuntimeList_Delete}; - - static Runtime_t GetRuntime(HandleType handle, size_t idx){ - return static_cast(Snpe_RuntimeList_GetRuntime(handle, int(idx))); - } - static Snpe_ErrorCode_t SetRuntime(HandleType handle, size_t idx, Runtime_t runtime){ - return Snpe_RuntimeList_SetRuntime(handle, idx, static_cast(runtime)); - } - -private: - using RuntimeReference = WrapperDetail::MemberIndexedReference; - friend RuntimeReference; - -public: - - RuntimeList() - : BaseType(Snpe_RuntimeList_Create()) - { } - RuntimeList(const RuntimeList& other) - : BaseType(Snpe_RuntimeList_CreateCopy(other.handle())) - { } - RuntimeList(RuntimeList&& other) noexcept - : BaseType(std::move(other)) - { } - - RuntimeList(const Runtime_t& runtime) - : BaseType(Snpe_RuntimeList_Create()) - { - Snpe_RuntimeList_Add(handle(), static_cast(runtime)); - } - - RuntimeList& operator=(const RuntimeList& other){ - if(this != &other){ - Snpe_RuntimeList_Assign(other.handle(), handle()); - } - return *this; - } - - RuntimeList& operator=(RuntimeList&& other) noexcept{ - return moveAssign(std::move(other)); - } - - Runtime_t operator[](size_t idx) const{ - return GetRuntime(handle(), idx); - } - - RuntimeReference operator[](size_t idx) noexcept{ - return {*this, idx}; - } - - bool add(const Runtime_t& runtime){ - return SNPE_SUCCESS == Snpe_RuntimeList_Add(handle(), static_cast(runtime)); - } - - void remove(Runtime_t runtime) noexcept{ - Snpe_RuntimeList_Remove(handle(), static_cast(runtime)); - } - - size_t size() const noexcept{ - return Snpe_RuntimeList_Size(handle()); - } - - bool empty() const noexcept{ - return Snpe_RuntimeList_Empty(handle()); - } - - void clear() noexcept{ - Snpe_RuntimeList_Clear(handle()); - } - - StringList getRuntimeListNames() const{ - return moveHandle(Snpe_RuntimeList_GetRuntimeListNames(handle())); - } - - static Runtime_t stringToRuntime(const char* runtimeStr){ - return static_cast(Snpe_RuntimeList_StringToRuntime(runtimeStr)); - } - static const char* runtimeToString(Runtime_t runtime){ - return Snpe_RuntimeList_RuntimeToString(static_cast(runtime)); - } - -}; - - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, RuntimeList) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/SnpeApiExportDefine.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/SnpeApiExportDefine.h deleted file mode 100644 index 62c6718f..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/SnpeApiExportDefine.h +++ /dev/null @@ -1,34 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -// Macro controlling visibility of SNPE API - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef SNPE_API -#define SNPE_API -#endif - -#ifdef __cplusplus -} // extern "C" -#endif diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/String.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/String.hpp deleted file mode 100644 index 85b2ef22..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/String.hpp +++ /dev/null @@ -1,70 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - - -#include - - -#include "Wrapper.hpp" - -namespace DlSystem{ - - -// Just a backwards compatible wrapper for std::string -class String{ -public: - String() = delete; - explicit String(const std::string& str) - : m_String(str) - { } - explicit String(std::string&& str) noexcept - : m_String(std::move(str)) - { } - - explicit String(const char* str) - : m_String(str) - { } - - String(String&& other) noexcept = default; - String(const String& other) = delete; - - - String& operator=(String&& other) noexcept = default; - String& operator=(const String& other) = delete; - - bool operator<(const String& rhs) const noexcept{ return m_String < rhs.m_String; } - bool operator>(const String& rhs) const noexcept{ return m_String > rhs.m_String; } - bool operator<=(const String& rhs) const noexcept{ return m_String <= rhs.m_String; } - bool operator>=(const String& rhs) const noexcept{ return m_String >= rhs.m_String; } - bool operator==(const String& rhs) const noexcept{ return m_String == rhs.m_String; } - bool operator!=(const String& rhs) const noexcept{ return m_String != rhs.m_String; } - - - bool operator<(const std::string& rhs) const noexcept{ return m_String < rhs; } - bool operator>(const std::string& rhs) const noexcept{ return m_String > rhs; } - bool operator<=(const std::string& rhs) const noexcept{ return m_String <= rhs; } - bool operator>=(const std::string& rhs) const noexcept{ return m_String >= rhs; } - bool operator==(const std::string& rhs) const noexcept{ return m_String == rhs; } - bool operator!=(const std::string& rhs) const noexcept{ return m_String != rhs; } - - - const char* c_str() const noexcept{ return m_String.c_str(); } - - explicit operator std::string&() noexcept{ return m_String; } - explicit operator const std::string&() const noexcept{ return m_String; } - -private: - std::string m_String; -}; - - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, String) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/StringList.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/StringList.h deleted file mode 100644 index faa793b3..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/StringList.h +++ /dev/null @@ -1,154 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_STRING_LIST_H -#define DL_SYSTEM_STRING_LIST_H - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate a SNPE StringList handle - */ -typedef void* Snpe_StringList_Handle_t; - -/** - * Constructs a StringList and returns a handle to it - * - * @return the handle to the created StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_StringList_Create(); - -/** - * Constructs a StringList and returns a handle to it - * - * @param[in] size : size of list - * - * @return the handle to the created StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_StringList_CreateSize(size_t size); - -/** - * Constructs a StringList and returns a handle to it - * - * @param[in] other : StringList handle to be copied from - * - * @return the handle to the created StringList - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_StringList_CreateCopy(Snpe_StringList_Handle_t other); - -/** - * Destroys/frees a StringList - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_StringList_Delete(Snpe_StringList_Handle_t stringListHandle); - - -/** - * Append a string to the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * @param[in] str Null-terminated ASCII string to append to the list. - * - * @return SNPE_SUCCESS if Append operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_StringList_Append(Snpe_StringList_Handle_t stringListHandle, const char* string); - -/** - * Returns the string at the indicated position, - * or an empty string if the positions is greater than the size - * of the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * @param[in] idx Position in the list of the desired string - * - * @return the string at the indicated position - */ -SNPE_API -const char* Snpe_StringList_At(Snpe_StringList_Handle_t stringListHandle, size_t idx); - -/** - * Pointer to the first string in the list. - * Can be used to iterate through the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return Pointer to the first string in the list. - */ -SNPE_API -const char** Snpe_StringList_Begin(Snpe_StringList_Handle_t stringListHandle); - -/** - * Pointer to one after the last string in the list. - * Can be used to iterate through the list. - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return Pointer to one after the last string in the list - */ -SNPE_API -const char** Snpe_StringList_End(Snpe_StringList_Handle_t stringListHandle); - -/** - * Return the number of valid string pointers held by this list. - * - * @param[in] stringListHandle : Handle to access the stringList - * - * @return The size of the StringList - */ -SNPE_API -size_t Snpe_StringList_Size(Snpe_StringList_Handle_t stringListHandle); - -/** - * Copy-assigns the contents of src into dst - * - * @param src Source StringList handle - * @param dst Destination StringList handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_StringList_Assign(Snpe_StringList_Handle_t src, Snpe_StringList_Handle_t dst); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_STRING_LIST_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/StringList.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/StringList.hpp deleted file mode 100644 index 2fd84bf1..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/StringList.hpp +++ /dev/null @@ -1,73 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/DlError.hpp" - -#include "DlSystem/StringList.h" - - -namespace DlSystem { - -class StringList : public Wrapper{ - friend BaseType; - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction = Snpe_StringList_Delete; - -public: - StringList() - : BaseType(Snpe_StringList_Create()) - { } - explicit StringList(size_t length) - : BaseType(Snpe_StringList_CreateSize(length)) - { } - StringList(const StringList& other) - : BaseType(Snpe_StringList_CreateCopy(other.handle())) - { } - StringList(StringList&& other) noexcept - : BaseType(std::move(other)) - { } - - - StringList& operator=(const StringList& other){ - if(this != &other){ - Snpe_StringList_Assign(other.handle(), handle()); - } - return *this; - } - StringList& operator=(StringList&& other) noexcept{ - return moveAssign(std::move(other)); - } - - - DlSystem::ErrorCode append(const char* str){ - return static_cast(Snpe_StringList_Append(handle(), str)); - } - - const char* at(size_t idx) const noexcept{ - return Snpe_StringList_At(handle(), idx); - } - - const char** begin() const noexcept{ - return Snpe_StringList_Begin(handle()); - } - const char** end() const noexcept{ - return Snpe_StringList_End(handle()); - } - - size_t size() const noexcept{ - return Snpe_StringList_Size(handle()); - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, StringList) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorMap.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorMap.h deleted file mode 100644 index aa367eda..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorMap.h +++ /dev/null @@ -1,154 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_TENSORMAP_H -#define DL_SYSTEM_TENSORMAP_H - -#include "DlSystem/ITensor.h" -#include "DlSystem/StringList.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate a SNPE Tensor Map handle - */ -typedef void* Snpe_TensorMap_Handle_t; - - -/** - * Constructs a TensorMap and returns a handle to it - * - * @return the handle to the created TensorMap - */ -SNPE_API -Snpe_TensorMap_Handle_t Snpe_TensorMap_Create(); - - -/** - * Copy-Constructs a TensorMap and returns a handle to it - * - * @param tensorMapHandle the other TensorMap to copy - * - * @return the handle to the created TensorMap - */ -SNPE_API -Snpe_TensorMap_Handle_t Snpe_TensorMap_CreateCopy(Snpe_TensorMap_Handle_t tensorMapHandle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param src Source TensorMap handle - * - * @param dst Destination TensorMap handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorMap_Assign(Snpe_TensorMap_Handle_t srcHandle, Snpe_TensorMap_Handle_t dstHandle); - - -/** - * Destroys/frees Tensor Map - * - * @param[in] handle : handle to tensorMap - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorMap_Delete(Snpe_TensorMap_Handle_t handle); - -/** - * @brief Adds a name and the corresponding tensor pointer - * to the map - * - * @param[in] handle : Handle to tensorMap - * @param[in] name : The name of the tensor - * @param[in] tensorHandle : Handle to access ITensor - * - * @note If a tensor with the same name already exists, the - * tensor is replaced with the existing tensor. - */ -SNPE_API -void Snpe_TensorMap_Add(Snpe_TensorMap_Handle_t handle, const char *name, Snpe_ITensor_Handle_t tensorHandle); - -/** - * @brief Removes a mapping of tensor and its name by its name - * - * @param[in] handle : Handle to tensorMap - * @param[in] name : The name of tensor to be removed - * - * @note If no tensor with the specified name is found, nothing - * is done. - */ -SNPE_API -void Snpe_TensorMap_Remove(Snpe_TensorMap_Handle_t handle, const char *name); - -/** - * @brief Returns the number of tensors in the map - * - * @param[in] handle : Handle to tensorMap - * - * @return Number of tensors in the map - */ -SNPE_API -size_t Snpe_TensorMap_Size(Snpe_TensorMap_Handle_t handle); - -/** - * @brief . - * - * @param[in] handle : Handle to tensorMap - * Removes all tensors from the map - */ -SNPE_API -void Snpe_TensorMap_Clear(Snpe_TensorMap_Handle_t handle); - -/** - * @brief Returns the tensor given its name. - * - * @param[in] handle : Handle to tensorMap - * @param[in] name : The name of the tensor to get. - * - * @return nullptr if no tensor with the specified name is - * found; otherwise, a valid pointer to the tensor. - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_TensorMap_GetTensor_Ref(Snpe_TensorMap_Handle_t handle, const char *name); - -/** - * @brief . - * - * @param[in] handle : Handle to tensorMap - * - * @return A StringList of the names of all tensors - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_TensorMap_GetTensorNames(Snpe_TensorMap_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_TENSOR_MAP_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorMap.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorMap.hpp deleted file mode 100644 index 20a6c21f..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorMap.hpp +++ /dev/null @@ -1,81 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/ITensor.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/DlError.hpp" - -#include "DlSystem/TensorMap.h" - -namespace DlSystem { - -class TensorMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_TensorMap_Delete}; -public: - - TensorMap() - : BaseType(Snpe_TensorMap_Create()) - { } - - TensorMap(const TensorMap& other) - : BaseType(Snpe_TensorMap_CreateCopy(other.handle())) - { } - - TensorMap(TensorMap&& other) noexcept - : BaseType(std::move(other)) - { } - - TensorMap& operator=(const TensorMap& other){ - if(this != &other){ - Snpe_TensorMap_Assign(other.handle(), handle()); - } - return *this; - } - TensorMap& operator=(TensorMap&& other) noexcept{ - return moveAssign(std::move(other)); - } - - DlSystem::ErrorCode add(const char* name, ITensor* tensor){ - if(!tensor) return DlSystem::ErrorCode::SNPE_CAPI_BAD_ARGUMENT; - Snpe_TensorMap_Add(handle(), name, getHandle(*tensor)); - return DlSystem::ErrorCode::NONE; - } - - void remove(const char* name) noexcept{ - Snpe_TensorMap_Remove(handle(), name); - } - - size_t size() const noexcept{ - return Snpe_TensorMap_Size(handle()); - } - - void clear() noexcept{ - Snpe_TensorMap_Clear(handle()); - } - - - ITensor* getTensor(const char* name) const noexcept{ - return makeReference(Snpe_TensorMap_GetTensor_Ref(handle(), name)); - } - - StringList getTensorNames() const{ - return moveHandle(Snpe_TensorMap_GetTensorNames(handle())); - } - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, TensorMap) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShape.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShape.h deleted file mode 100644 index 1fde628c..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShape.h +++ /dev/null @@ -1,174 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_TENSOR_SHAPE_H -#define DL_SYSTEM_TENSOR_SHAPE_H - -#include - -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE TensorShape handle - */ -typedef void* Snpe_TensorShape_Handle_t; - - -/** - * @brief . - * - * Creates a new shape with a list of dims specified in array - * - * @param[in] dims The dimensions are specified in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] size Size of the array. - * - * @return the handle to the created TensorShape - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShape_CreateDimsSize(const size_t *dims, size_t size); - -/** - * Constructs a TensorShape and returns a handle to it - * - * @return the handle to the created TensorShape - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShape_Create(); - -/** - * @brief . - * - * copy constructor. - * @param[in] other object to copy. - * - * @return the handle to the created TensorShape. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShape_CreateCopy(Snpe_TensorShape_Handle_t other); - -/** - * Destroys/frees Tensor Shape - * - * @param[in] handle : handle to tensorShape - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShape_Delete(Snpe_TensorShape_Handle_t tensorShapeHandle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param srcHandle Source TensorShape handle - * @param dstHandle Destination TensorShape handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShape_Assign(Snpe_TensorShape_Handle_t srcHandle, Snpe_TensorShape_Handle_t dstHandle); - -/** - * @brief . - * - * Concatenates additional dimensions specified in - * the array to the existing dimensions. - * - * @param[in] handle : handle to tensorShape - * @param[in] dims The dimensions are specified in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] size Size of the array. - * - */ -SNPE_API -void Snpe_TensorShape_Concatenate(Snpe_TensorShape_Handle_t tensorShape, const size_t *dims, size_t size); - -/** - * @brief . - * - * @param[in] handle : handle to tensorShape - * - * Retrieves the rank i.e. number of dimensions. - * - * @return The rank - */ -SNPE_API -size_t Snpe_TensorShape_Rank(Snpe_TensorShape_Handle_t tensorShape); - -/** - * @brief . - * - * @param[in] handle : handle to tensorShape - * - * @param[in] index : Position in the dimension array. - * - * @return The dimension value in tensor shape - */ -SNPE_API -size_t Snpe_TensorShape_At(Snpe_TensorShape_Handle_t tensorShapeHandle, size_t index); - -/** - * @brief Set a value in a TensorShape at the provided index - * - * @param[in] handle : handle to tensorShape - * - * @param[in] index : Position in the dimension array. - * - * @param[in] value : Dimension value to set - * - * @return SNPE_SUCCESS on success - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShape_Set(Snpe_TensorShape_Handle_t tensorShapeHandle, size_t index, size_t value); - -/** - * @brief . - * - * Retrieves a pointer to the first dimension of shape - * - * @param[in] handle : handle to tensorShape - * - * @return nullptr if no dimension exists; otherwise, points to - * the first dimension. - * - */ -SNPE_API -const size_t* Snpe_TensorShape_GetDimensions(Snpe_TensorShape_Handle_t tensorShape); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_TENSOR_SHAPE_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShape.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShape.hpp deleted file mode 100644 index 776637c7..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShape.hpp +++ /dev/null @@ -1,104 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include - -#include "Wrapper.hpp" - -#include "DlSystem/TensorShape.h" - -namespace DlSystem { - - -using Dimension = size_t; - - - -class TensorShape : public Wrapper { - friend BaseType; - using BaseType::BaseType; - -protected: - static constexpr DeleteFunctionType DeleteFunction{Snpe_TensorShape_Delete}; - -private: - using DimensionReference = WrapperDetail::MemberIndexedReference; - friend DimensionReference; - -public: - - TensorShape() - : BaseType(Snpe_TensorShape_Create()) - { } - - TensorShape(const TensorShape& other) - : BaseType(Snpe_TensorShape_CreateCopy(other.handle())) - { } - - TensorShape(TensorShape&& other) noexcept - : BaseType(std::move(other)) - { } - - TensorShape(std::initializer_list dims) - : BaseType(Snpe_TensorShape_CreateDimsSize(dims.begin(), dims.size())) - { } - - TensorShape& operator=(const TensorShape& other) noexcept{ - if(this != &other){ - Snpe_TensorShape_Assign(other.handle(), handle()); - } - return *this; - } - - TensorShape& operator=(TensorShape&& other) noexcept{ - return moveAssign(std::move(other)); - } - - TensorShape(const size_t *dims, size_t size) - : BaseType(Snpe_TensorShape_CreateDimsSize(dims, size)) - { } - - TensorShape(const std::vector& dims) - : TensorShape(dims.data(), dims.size()) - { } - - - void concatenate(const size_t *dims, size_t size){ - Snpe_TensorShape_Concatenate(handle(), dims, size); - } - - void concatenate(const size_t &dim){ - return concatenate(&dim, 1); - } - - size_t operator[](size_t idx) const{ - return Snpe_TensorShape_At(handle(), idx); - } - - DimensionReference operator[](size_t idx){ - return {*this, idx}; - } - - size_t rank() const{ - return Snpe_TensorShape_Rank(handle()); - } - - const size_t* getDimensions() const{ - return Snpe_TensorShape_GetDimensions(handle()); - } - - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, Dimension) -ALIAS_IN_ZDL_NAMESPACE(DlSystem, TensorShape) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.h deleted file mode 100644 index 520fa5ab..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.h +++ /dev/null @@ -1,163 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - - -/** - * @file - */ - -#ifndef _SNPE_TENSOR_SHAPE_MAP_H_ -#define _SNPE_TENSOR_SHAPE_MAP_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/TensorShape.h" -#include "DlSystem/StringList.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE TensorShapeMap handle - */ -typedef void* Snpe_TensorShapeMap_Handle_t; - -/** - * Constructs a TensorShapeMap and returns a handle to it - * - * @return the handle to the created TensorShapeMap - */ -SNPE_API -Snpe_TensorShapeMap_Handle_t Snpe_TensorShapeMap_Create(); - -/** - * @brief . - * - * copy constructor. - * - * @param[in] tsmHandle : Handle to the other object to copy. - * @return the handle to the created TensorShapeMap - */ -SNPE_API -Snpe_TensorShapeMap_Handle_t Snpe_TensorShapeMap_CreateCopy(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * Destroys/frees Tensor Shape Map - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Delete(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * @brief . - * - * assignment operator. Copy-assigns from srcHandle to dstHandle - * @param[in] srcHandle : handle to source Tensor Shape Map object - * @param[out] dstHandle : handle to destination Tensor Shape Map object - * - * @return Returns SNPE_SUCCESS if Assignment successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Assign(Snpe_TensorShapeMap_Handle_t srcHandle, Snpe_TensorShapeMap_Handle_t dstHandle); - -/** - * @brief Adds a name and the corresponding tensor pointer - * to the map - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @param[in] name The name of the tensor - * @param[in] tsHandle : Handle to access Tensor Shape - * - * @return Returns SNPE_SUCCESS if Add operation successful - * @note If a tensor with the same name already exists, no new - * tensor is added. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Add(Snpe_TensorShapeMap_Handle_t tsmHandle, const char* name, Snpe_TensorShape_Handle_t tsHandle); - -/** - * @brief Removes a mapping of tensor and its name by its name - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @param[in] name The name of tensor to be removed - * @return Returns SNPE_SUCCESS if Remove operation successful - * - * @note If no tensor with the specified name is found, nothing - * is done. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Remove(Snpe_TensorShapeMap_Handle_t tsmHandle, const char* name); - -/** - * @brief Returns the number of tensors in the map - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @return Returns number entries in TensorShapeMap - */ -SNPE_API -size_t Snpe_TensorShapeMap_Size(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * @brief . - * - * Removes all tensors from the map - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @return Returns SNPE_SUCCESS if Clear operation successful - */ -SNPE_API -Snpe_ErrorCode_t Snpe_TensorShapeMap_Clear(Snpe_TensorShapeMap_Handle_t tsmHandle); - -/** - * @brief Returns the tensor given its name. - * - * @param[in] tsmhandle : handle to access Tensor Shape Map - * @param[in] name The name of the tensor to get. - * - * @return nullptr if no tensor with the specified name is - * found; otherwise, a valid Tensor Shape Handle. - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_TensorShapeMap_GetTensorShape(Snpe_TensorShapeMap_Handle_t tsmHandle, const char* name); - -/** - * @brief . - * - * @param[in] tsmHandle : handle to access Tensor Shape Map - * @return A stringList Handle to access names of all tensor shapes - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_TensorShapeMap_GetTensorShapeNames(Snpe_TensorShapeMap_Handle_t tsmHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_TENSOR_SHAPE_MAP_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.hpp deleted file mode 100644 index 8b79a6e2..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/TensorShapeMap.hpp +++ /dev/null @@ -1,77 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include "DlSystem/StringList.hpp" -#include "DlSystem/TensorShape.hpp" -#include "DlSystem/DlError.hpp" - -#include "DlSystem/TensorShapeMap.h" - -namespace DlSystem { - -class TensorShapeMap : public Wrapper { - friend BaseType; - using BaseType::BaseType; - static constexpr DeleteFunctionType DeleteFunction{Snpe_TensorShapeMap_Delete}; - -public: - TensorShapeMap() - : BaseType(Snpe_TensorShapeMap_Create()) - { } - TensorShapeMap(const TensorShapeMap& other) - : BaseType(Snpe_TensorShapeMap_CreateCopy(other.handle())) - { } - TensorShapeMap(TensorShapeMap&& other) noexcept - : BaseType(std::move(other)) - { } - - TensorShapeMap& operator=(const TensorShapeMap& other){ - if(this != &other){ - Snpe_TensorShapeMap_Assign(other.handle(), handle()); - } - return *this; - } - TensorShapeMap& operator=(TensorShapeMap&& other) noexcept{ - return moveAssign(std::move(other)); - } - - DlSystem::ErrorCode add(const char *name, const TensorShape& tensorShape){ - return static_cast( - Snpe_TensorShapeMap_Add(handle(), name, getHandle(tensorShape)) - ); - } - - DlSystem::ErrorCode remove(const char* name) noexcept{ - return static_cast(Snpe_TensorShapeMap_Remove(handle(), name)); - } - - size_t size() const noexcept{ - return Snpe_TensorShapeMap_Size(handle()); - } - - DlSystem::ErrorCode clear() noexcept{ - return static_cast(Snpe_TensorShapeMap_Clear(handle())); - } - - TensorShape getTensorShape(const char* name) const noexcept{ - return moveHandle(Snpe_TensorShapeMap_GetTensorShape(handle(), name)); - } - - StringList getTensorShapeNames() const{ - return moveHandle(Snpe_TensorShapeMap_GetTensorShapeNames(handle())); - } - -}; - -} // ns DlSystem - - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, TensorShapeMap) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.h deleted file mode 100644 index 2da1c792..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.h +++ /dev/null @@ -1,151 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_USER_BUFFER_MAP_H -#define DL_SYSTEM_USER_BUFFER_MAP_H - -#include "DlSystem/StringList.h" -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE UserBufferMap handle - */ -typedef void* Snpe_UserBufferMap_Handle_t; - -/** - * @brief . - * - * Creates a new empty UserBuffer map - */ -SNPE_API -Snpe_UserBufferMap_Handle_t Snpe_UserBufferMap_Create(); - -/** - * copy constructor. - * @param[in] other : Handle to the other userBufferMap to be copied from. - */ -SNPE_API -Snpe_UserBufferMap_Handle_t Snpe_UserBufferMap_CreateCopy(Snpe_UserBufferMap_Handle_t other); - - -/** - * @brief Adds a name and the corresponding UserBuffer pointer - * to the map - * - * @param[in] handle : Handle to access UserBufferMap - * @param[in] name : The name of the UserBuffer - * @param[in] bufferHandle : Handle to access UserBuffer - * - * @note If a UserBuffer with the same name already exists, the new - * UserBuffer pointer would be updated. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Add(Snpe_UserBufferMap_Handle_t handle, const char *name, Snpe_IUserBuffer_Handle_t bufferHandle); - -/** - * @brief Removes a mapping of one UserBuffer and its name by its name - * - * @param[in] handle : Handle to access UserBufferMap - * - * @param[in] name : The name of UserBuffer to be removed - * - * @note If no UserBuffer with the specified name is found, nothing - * is done. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Remove(Snpe_UserBufferMap_Handle_t handle, const char *name); - -/** - * @brief Returns the number of UserBuffers in the map - * @param[in] handle : Handle to access UserBufferMap - */ -SNPE_API -size_t Snpe_UserBufferMap_Size(Snpe_UserBufferMap_Handle_t handle); - -/** - * @brief . - * - * @param[in] handle : Handle to access UserBufferMap - * Removes all UserBuffers from the map - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Clear(Snpe_UserBufferMap_Handle_t handle); - -/** - * @brief Returns the UserBuffer given its name. - * - * @param[in] handle : Handle to access UserBufferMap - * - * @param[in] name : The name of the UserBuffer to get. - * - * @return nullptr if no UserBuffer with the specified name is - * found; otherwise, a valid pointer to the UserBuffer. - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_UserBufferMap_GetUserBuffer_Ref(Snpe_UserBufferMap_Handle_t handle , const char *name); - -/** - * @brief . - * - * Returns the names of all UserBuffers - * - * @param[in] handle : Handle to access UserBufferMap - * - * @return A list of UserBuffer names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_UserBufferMap_GetUserBufferNames(Snpe_UserBufferMap_Handle_t handle); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param src Source UserBufferMap handle - * @param dst Destination UserBufferMap handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Assign(Snpe_UserBufferMap_Handle_t srcHandle, Snpe_UserBufferMap_Handle_t dstHandle); - -/** - * Destroys/frees UserBuffer Map - * - * @param[in] handle : Handle to access UserBuffer Map - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferMap_Delete(Snpe_UserBufferMap_Handle_t handle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_USER_BUFFER_MAP_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.hpp deleted file mode 100644 index acf3207c..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserBufferMap.hpp +++ /dev/null @@ -1,80 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - -#include "Wrapper.hpp" -#include "DlSystem/DlError.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/IUserBuffer.hpp" - -#include "DlSystem/UserBufferMap.h" - -namespace DlSystem { - -class UserBufferMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferMap_Delete}; - -public: - UserBufferMap() - : BaseType(Snpe_UserBufferMap_Create()) - { } - - UserBufferMap(const UserBufferMap& other) - : BaseType(Snpe_UserBufferMap_CreateCopy(other.handle())) - { } - UserBufferMap(UserBufferMap&& other) noexcept - : BaseType(std::move(other)) - { } - - UserBufferMap& operator=(const UserBufferMap& other){ - if(this != &other){ - Snpe_UserBufferMap_Assign(other.handle(), handle()); - } - return *this; - } - UserBufferMap& operator=(UserBufferMap&& other) noexcept{ - return moveAssign(std::move(other)); - } - - DlSystem::ErrorCode add(const char* name, IUserBuffer* buffer){ - if(!buffer) return ErrorCode::SNPE_CAPI_BAD_ARGUMENT; - return static_cast(Snpe_UserBufferMap_Add(handle(), name, getHandle(*buffer))); - } - - DlSystem::ErrorCode remove(const char* name) noexcept{ - return static_cast(Snpe_UserBufferMap_Remove(handle(), name)); - } - - size_t size() const noexcept{ - return Snpe_UserBufferMap_Size(handle()); - } - - DlSystem::ErrorCode clear() noexcept{ - return static_cast(Snpe_UserBufferMap_Clear(handle())); - } - - IUserBuffer* getUserBuffer(const char* name) const noexcept{ - return makeReference(Snpe_UserBufferMap_GetUserBuffer_Ref(handle(), name)); - } - - StringList getUserBufferNames() const{ - return moveHandle(Snpe_UserBufferMap_GetUserBufferNames(handle())); - } - -}; - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserBufferMap) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.h deleted file mode 100644 index c927d33e..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.h +++ /dev/null @@ -1,156 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef DL_SYSTEM_USER_MEMORY_MAP_H -#define DL_SYSTEM_USER_MEMORY_MAP_H - -#include "DlSystem/StringList.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE User Memory handle - */ -typedef void* Snpe_UserMemoryMap_Handle_t; - -/** - * @brief . - * - * Creates a new empty UserMemory map - */ -SNPE_API -Snpe_UserMemoryMap_Handle_t Snpe_UserMemoryMap_Create(); - -/** - * copy constructor. - * @param[in] other : Handle to the other object to copy. - */ -SNPE_API -Snpe_UserMemoryMap_Handle_t Snpe_UserMemoryMap_Copy(Snpe_UserMemoryMap_Handle_t other); - -/** - * Copy-assigns the contents of srcHandle into dstHandle - * - * @param[in] srcHandle Source UserMemoryMap handle - * - * @param[out] dstHandle Destination UserMemoryMap handle - * - * @return SNPE_SUCCESS on successful copy-assignment - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Assign(Snpe_UserMemoryMap_Handle_t srcHandle, Snpe_UserMemoryMap_Handle_t dstHandle); - -/** - * Destroys/frees UserMemory Map - * - * @param[in] handle : Handle to access UserMemory Map - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Delete(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief Adds a name and the corresponding buffer address - * to the map - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of the UserMemory - * @param[in] address : The pointer to the Buffer Memory - * - * @note If a UserBuffer with the same name already exists, the new - * address would be updated. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Add(Snpe_UserMemoryMap_Handle_t handle, const char *name, void *address); - -/** - * @brief Removes a mapping of one Buffer address and its name by its name - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of Memory address to be removed - * - * @note If no UserBuffer with the specified name is found, nothing - * is done. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Remove(Snpe_UserMemoryMap_Handle_t handle, const char *name); - -/** - * @brief Returns the number of User Memory addresses in the map - * @param[in] handle : Handle to access UserMemory Map - */ -SNPE_API -size_t Snpe_UserMemoryMap_Size(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief . - * - * Removes all User Memory from the map - * @param[in] handle : Handle to access UserMemory Map - */ -SNPE_API -Snpe_ErrorCode_t Snpe_UserMemoryMap_Clear(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief . - * Returns the names of all User Memory - * - * @param[in] handle : Handle to access UserMemory Map - * - * @return Returns a handle to the stringList. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_UserMemoryMap_GetUserBufferNames(Snpe_UserMemoryMap_Handle_t handle); - -/** - * @brief Returns the no of UserMemory addresses mapped to the buffer - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of the UserMemory - * - */ -SNPE_API -size_t Snpe_UserMemoryMap_GetUserMemoryAddressCount(Snpe_UserMemoryMap_Handle_t handle, const char *name); - -/** - * @brief Returns address at a specified index corresponding to a UserMemory buffer name - * - * @param[in] handle : Handle to access UserMemory Map - * @param[in] name : The name of the buffer - * @param[in] index : The index in the list of addresses - * - */ -SNPE_API -void* Snpe_UserMemoryMap_GetUserMemoryAddressAtIndex(Snpe_UserMemoryMap_Handle_t handle, const char *name, uint32_t index); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // DL_SYSTEM_USER_MEMORY_MAP_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.hpp deleted file mode 100644 index 36e9cd37..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/DlSystem/UserMemoryMap.hpp +++ /dev/null @@ -1,76 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/DlError.hpp" -#include "DlSystem/StringList.hpp" - -#include "DlSystem/UserMemoryMap.h" - -namespace DlSystem { - -class UserMemoryMap : public Wrapper { - friend BaseType; -// Use this to get free move Ctor and move assignment operator, provided this class does not specify -// as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserMemoryMap_Delete}; -public: - UserMemoryMap() - : BaseType(Snpe_UserMemoryMap_Create()) - { } - UserMemoryMap(const UserMemoryMap& other) - : BaseType(Snpe_UserMemoryMap_Copy(other.handle())) - { } - UserMemoryMap(UserMemoryMap&& other) noexcept - : BaseType(std::move(other)) - { } - - UserMemoryMap& operator=(const UserMemoryMap& other){ - if(this != &other){ - Snpe_UserMemoryMap_Assign(handle(), other.handle()); - } - return *this; - } - - DlSystem::ErrorCode add(const char* name, void* address) noexcept{ - return static_cast(Snpe_UserMemoryMap_Add(handle(), name, address)); - } - - DlSystem::ErrorCode remove(const char* name){ - return static_cast(Snpe_UserMemoryMap_Remove(handle(), name)); - } - - size_t size() const noexcept{ - return Snpe_UserMemoryMap_Size(handle()); - } - - DlSystem::ErrorCode clear() noexcept{ - return static_cast(Snpe_UserMemoryMap_Clear(handle())); - } - - StringList getUserBufferNames() const{ - return moveHandle(Snpe_UserMemoryMap_GetUserBufferNames(handle())); - } - - size_t getUserMemoryAddressCount(const char* name) const noexcept{ - return Snpe_UserMemoryMap_GetUserMemoryAddressCount(handle(), name); - } - - void* getUserMemoryAddressAtIndex(const char* name, uint32_t index) const noexcept{ - return Snpe_UserMemoryMap_GetUserMemoryAddressAtIndex(handle(), name, index); - } - -}; - - -} // ns DlSystem - -ALIAS_IN_ZDL_NAMESPACE(DlSystem, UserMemoryMap) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.h deleted file mode 100644 index 282ee547..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.h +++ /dev/null @@ -1,107 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _PLATFORM_VALIDATOR_H_ -#define _PLATFORM_VALIDATOR_H_ - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" -#include "DlSystem/DlEnums.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A typedef to indicate a SNPE PlatformValidator handle - */ -typedef void* Snpe_PlatformValidator_Handle_t; - -/** - * @brief . - * - * Creates a new Platform Validator - * - */ -SNPE_API -Snpe_PlatformValidator_Handle_t Snpe_PlatformValidator_Create(); - - -/** - * Destroys/frees Platform Validator - * - * @param[in] handle : Handle to access Platform Validator - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PlatformValidator_Delete(Snpe_PlatformValidator_Handle_t handle); - -/** - * @brief Sets the runtime processor for compatibility check - * - * @return Void - */ -SNPE_API -void Snpe_PlatformValidator_SetRuntime(Snpe_PlatformValidator_Handle_t handle, - Snpe_Runtime_t runtime, - bool unsignedPD=true); - -/** - * @brief Checks if the Runtime prerequisites for SNPE are available. - * - * @return 1 if the Runtime prerequisites are available, else 0. - */ -SNPE_API -int Snpe_PlatformValidator_IsRuntimeAvailable(Snpe_PlatformValidator_Handle_t handle, - bool unsignedPD=true); - -/** - * @brief Returns the core version for the Runtime selected. - * - * @return char* which contains the actual core version value - */ -SNPE_API -const char* Snpe_PlatformValidator_GetCoreVersion(Snpe_PlatformValidator_Handle_t handle); - -/** - * @brief Returns the library version for the Runtime selected. - * - * @return char* which contains the actual lib version value - */ -SNPE_API -const char* Snpe_PlatformValidator_GetLibVersion(Snpe_PlatformValidator_Handle_t handle); - -/** - * @brief Runs a small program on the runtime and Checks if SNPE is supported for Runtime. - * - * @return If 1, the device is ready for SNPE execution, else return 0. - */ -SNPE_API -int Snpe_PlatformValidator_RuntimeCheck(Snpe_PlatformValidator_Handle_t handle, - bool unsignedPD=true); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _PLATFORM_VALIDATOR_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.hpp deleted file mode 100644 index de52635c..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/PlatformValidator/PlatformValidator.hpp +++ /dev/null @@ -1,57 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - -#include "Wrapper.hpp" - -#include "DlSystem/DlEnums.hpp" - - -#include "PlatformValidator/PlatformValidator.h" - - -namespace SNPE { - -class PlatformValidator : public Wrapper { - friend BaseType; - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_PlatformValidator_Delete}; - -public: - PlatformValidator() - : BaseType(Snpe_PlatformValidator_Create()) - { } - - void setRuntime(DlSystem::Runtime_t runtime, bool unsignedPD=true){ - Snpe_PlatformValidator_SetRuntime(handle(), static_cast(runtime), unsignedPD); - } - - bool isRuntimeAvailable(bool unsignedPD=true){ - return Snpe_PlatformValidator_IsRuntimeAvailable(handle(), unsignedPD); - } - - std::string getCoreVersion(){ - return Snpe_PlatformValidator_GetCoreVersion(handle()); - } - - std::string getLibVersion(){ - return Snpe_PlatformValidator_GetLibVersion(handle()); - } - - bool runtimeCheck(bool unsignedPD=true){ - return Snpe_PlatformValidator_RuntimeCheck(handle(), unsignedPD); - } - -}; - -} // ns SNPE - -ALIAS_IN_ZDL_NAMESPACE(SNPE, PlatformValidator) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.h deleted file mode 100644 index 8a2bb7d2..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.h +++ /dev/null @@ -1,85 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef _SNPE_APPLICATION_BUFFER_MAP_H_ -#define _SNPE_APPLICATION_BUFFER_MAP_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" -#include "DlSystem/StringList.h" - - -#ifdef __cplusplus -extern "C" { -#endif - - -typedef void* Snpe_ApplicationBufferMap_Handle_t; - -SNPE_API -Snpe_ApplicationBufferMap_Handle_t Snpe_ApplicationBufferMap_Create(); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Delete(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Add(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name, - const uint8_t* buff, - size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_AddFloat(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name, - const float* buff, - size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Remove(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name); - -SNPE_API -size_t Snpe_ApplicationBufferMap_Size(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_Clear(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - - -SNPE_API -Snpe_StringList_Handle_t Snpe_ApplicationBufferMap_GetUserBufferNames(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle); - - -SNPE_API -Snpe_ErrorCode_t Snpe_ApplicationBufferMap_GetUserBuffer(Snpe_ApplicationBufferMap_Handle_t applicationBufferMapHandle, - const char* name, - size_t* size, - const uint8_t** data); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_APPLICATION_BUFFER_MAP_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.hpp deleted file mode 100644 index 6ad745bb..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/ApplicationBufferMap.hpp +++ /dev/null @@ -1,90 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include -#include - -#include "Wrapper.hpp" -#include "DlSystem/StringList.hpp" - -#include "SNPE/ApplicationBufferMap.h" - -namespace PSNPE { - -class ApplicationBufferMap : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_ApplicationBufferMap_Delete}; -public: - ApplicationBufferMap() - : BaseType(Snpe_ApplicationBufferMap_Create()){} - - explicit ApplicationBufferMap(const std::unordered_map> &buffer) - : ApplicationBufferMap(){ - for(const auto &kv: buffer){ - add(kv.first.c_str(), kv.second); - } - } - - void add(const char *name, const std::vector &buff){ - Snpe_ApplicationBufferMap_Add(handle(), name, buff.data(), buff.size()); - } - - void add(const char *name, const std::vector &buff){ - Snpe_ApplicationBufferMap_Add(handle(), name, reinterpret_cast(buff.data()), buff.size()*sizeof(float)); - } - - void remove(const char *name) noexcept{ - Snpe_ApplicationBufferMap_Remove(handle(), name); - } - - size_t size() const noexcept{ - return Snpe_ApplicationBufferMap_Size(handle()); - } - - void clear() noexcept{ - Snpe_ApplicationBufferMap_Clear(handle()); - } - - std::vector getUserBuffer(const char *name) const{ - size_t size{}; - const uint8_t *data{}; - Snpe_ApplicationBufferMap_GetUserBuffer(handle(), name, &size, &data); - - return std::vector(data, data + size); - } - - std::vector operator[](const char *name) const{ - return getUserBuffer(name); - } - - DlSystem::StringList getUserBufferNames() const{ - return moveHandle(Snpe_ApplicationBufferMap_GetUserBufferNames(handle())); - } - - std::unordered_map> getUserBuffer() const{ - std::unordered_map> toret; - for(auto name: getUserBufferNames()){ - toret.emplace(name, getUserBuffer(name)); - } - - return toret; - } - -}; - -} // ns PSNPE - - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, ApplicationBufferMap) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/PSNPE.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/PSNPE.h deleted file mode 100644 index 2358d535..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/PSNPE.h +++ /dev/null @@ -1,898 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022,2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _SNPE_PSNPE_H_ -#define _SNPE_PSNPE_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlContainer/DlContainer.h" -#include "SNPE/ApplicationBufferMap.h" -#include "SNPE/RuntimeConfigList.h" -#include "SNPE/UserBufferList.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/IBufferAttributes.h" - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/UserMemoryMap.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate the callback PSNPE handle of Async Output mode - */ -typedef void* Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t; - -//SNPE_API -//Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t Snpe_PSNPE_OutputAsyncCallbackParam_Create(size_t index, -// int status, -// const char* errorMsg); -// -//SNPE_API -//Snpe_ErrorCode_t Snpe_PSNPE_OutputAsyncCallbackParam_Delete(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -// NOTE: we don't need _{Create,Delete} functions because the user does not create or delete these handles -// They're passed in to the callback functions they created - -/** - * @brief Get the data index of an output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return The data idx for output async mode - */ -SNPE_API -size_t Snpe_PSNPE_OutputAsyncCallbackParam_GetDataIdx(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -/** - * @brief Execute an output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return True if executed successfully with outputAsync mode - */ -SNPE_API -int Snpe_PSNPE_OutputAsyncCallbackParam_GetExecuteStatus(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -/** - * @brief Get the error message during the execution of PSNPE output async mode - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return Error message - */ -SNPE_API -const char* Snpe_PSNPE_OutputAsyncCallbackParam_GetErrorMsg(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - -/** - * @brief Get the ID of an output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of output async mode - * - * @return The id of an PSNPE object for output async mode - */ -SNPE_API -size_t Snpe_PSNPE_OutputAsyncCallbackParam_GetID(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t oacpHandle); - - - -/** - * A typedef to indicate the output callback of PSNPE handle of input-output async mode - */ -typedef void* Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t; - -/** - * @brief Get the data index of an input-output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return The data index for input-output async mode - */ -SNPE_API -size_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetDataIdx(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Execute an input-output async PSNPE object - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return True if executed successfully with input-output async mode - */ -SNPE_API -int Snpe_PSNPE_InputOutputAsyncCallbackParam_GetExecuteStatus(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the error message during the execution of PSNPE input-output async mode - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return error message - */ -SNPE_API -const char* Snpe_PSNPE_InputOutputAsyncCallbackParam_GetErrorMsg(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the names of output buffers to the network - * - * @param[in] ioacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return Handle of output buffer name list - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetUserBufferNames(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the output buffer map of PSNPE object for input-output async mode - * - * @param[in] ioacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return The reference handle of output ApplicationBufferMap - */ -SNPE_API -Snpe_ApplicationBufferMap_Handle_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetOutputMap_Ref(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * @brief Get the id of the output callback for input-output async mode - * - * @param[in] oacpHandle Handle to access the PSNPE object of input-output async mode - * - * @return The id for output callback for input-output async mode - */ -SNPE_API -size_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetID(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle); - -/** - * A typedef to indicate the input callback of PSNPE handle of input-output async mode - */ -typedef void* Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t; - -/** - * @brief Get the input list for input callback of input-output async mode - * - * @param[in] ioacpHandle Handle to access the object of input callback of input-output async mode - * - * @return List the inputs - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputs(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t ioiacpHandle); - -/** - * @brief Get the input names for input callback of input-output async mode - * - * @param[in] ioacpHandle Handle to access the object of input callback of input-output async mode - * - * @return List the names of input - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputNames(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t ioiacpHandle); - -/** - * @brief Get the id of the input callback for input-output async mode - * - * @param[in] oacpHandle Handle to access the object of input-output async mode - * - * @return The id of input callback for input-output async mode - */ -SNPE_API -size_t Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetID(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t ioiacpHandle); - -/** - * @brief A struct to indicate userbuffer data type in output callback of input-output async mode - */ -typedef struct{ - /// data for the one output - const uint8_t* data; - /// the data size of this output - size_t size; -} Snpe_UserBufferData_t; - -/** - * @brief Get the output data of the output callback for input-output async mode - * - * @param[in] oacpHandle Handle to access the object of output callback of input-output async mode - * - * @param[in] name The output name of output callback of input-output async mode - * - * @return The output data of output callback for input-output async mode - */ -SNPE_API -Snpe_UserBufferData_t Snpe_PSNPE_InputOutputAsyncCallbackParam_GetUserBuffer(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t ioacpHandle, - const char* name); -/** - * A typedef to indicate build configuration - */ -typedef void* Snpe_BuildConfig_Handle_t; - -/** - * A typedef to indicate a PSNPE object - */ -typedef void* Snpe_PSNPE_Handle_t; - -/** - * A typedef to indicate if PSNPE object is built in serial or parallel, default = 0 - */ -typedef enum SNPE_API { - SNPE_PSNPE_BUILDMODE_SERIAL = 0, - SNPE_PSNPE_BUILDMODE_PARALLEL = 1 -} Snpe_PSNPE_BuildMode_t; - -/** - * A typedef to indicate if PSNPE objects are executed in sync mode or output async mode or input-output async mode, default = 0 - */ -typedef enum SNPE_API { - SNPE_PSNPE_INPUTOUTPUTTRANSMISSIONMODE_SYNC = 0, - SNPE_PSNPE_INPUTOUTPUTTRANSMISSIONMODE_OUTPUTASYNC = 1, - SNPE_PSNPE_INPUTOUTPUTTRANSMISSIONMODE_INPUTOUTPUTASYNC = 2 -} Snpe_PSNPE_InputOutputTransmissionMode_t; - -// BuildConfig -/** - * @brief Create the object of snpe build config - * - * @return the SNPE build handle - */ -SNPE_API -Snpe_BuildConfig_Handle_t Snpe_BuildConfig_Create(); - -/** - * @brief Release the object of snpe build config - * - * @param[in] buildConfigHandle Handle to access the object of snpe buid config - * - * @return The error of build config result - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_Delete(Snpe_BuildConfig_Handle_t buildConfigHandle); - -/** - * @brief Get the mode of build snpe object, serial or parallel - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The value of Snpe_PSNPE_BuildMode_t - */ -SNPE_API -Snpe_PSNPE_BuildMode_t Snpe_BuildConfig_GetBuildMode(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the mode of build snpe object, serial or parallel - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] buildMode A typedef of Snpe_PSNPE_BuildMode_t - * - * @return The result of setting mode - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetBuildMode(Snpe_BuildConfig_Handle_t bcHandle, Snpe_PSNPE_BuildMode_t buildMode); - -/** - * @brief Set the dlc model - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] dlcHandle A handle of snpe DLC container - * - * @return The result of setting dlc model - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetContainer(Snpe_BuildConfig_Handle_t bcHandle, Snpe_DlContainer_Handle_t dlcHandle); - -/** - * @brief Get dlc container in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of DLC container - */ -SNPE_API -Snpe_DlContainer_Handle_t Snpe_BuildConfig_GetContainer_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output buffer names in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] slHandle A handle of the output layer name list - * - * @return The result of setting output names - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputBufferNames(Snpe_BuildConfig_Handle_t bcHandle, Snpe_StringList_Handle_t slHandle); - -/** - * @brief Get output buffer names in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of output buffer name list. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_BuildConfig_GetOutputBufferNames_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output buffer names in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] slHandle List of tensor names to output. An empty list will result in producing output for the final output tensor of the model. The list will be copied - * - * @return The result of setting output tensors - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputTensors(Snpe_BuildConfig_Handle_t bcHandle, Snpe_StringList_Handle_t slHandle); - -/** - * @brief Get output tensors in snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of output tensor list - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_BuildConfig_GetOutputTensors_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set runtime config list for snpe buildConfig - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] rclHandle Handle to access the object of runtime config list - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetRuntimeConfigList(Snpe_BuildConfig_Handle_t bcHandle, Snpe_RuntimeConfigList_Handle_t rclHandle); - -/** - * @brief Get runtime config list for snpe buildConfig - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The reference handle of runtime config list - */ -SNPE_API -Snpe_RuntimeConfigList_Handle_t Snpe_BuildConfig_GetRuntimeConfigList_Ref(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Get input thread number of input data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The number of input thread - */ -SNPE_API -size_t Snpe_BuildConfig_GetInputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set input thread number of input data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] threadNumbers The number of input thread for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle, size_t threadNumbers); - -/** - * @brief Get output thread number of output data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The number of output thread - */ -SNPE_API -size_t Snpe_BuildConfig_GetOutputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output thread number of output data for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] threadNumbers The number of output thread for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputThreadNumbers(Snpe_BuildConfig_Handle_t bcHandle, size_t threadNumbers); - -/** - * @brief Set output callback for output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] callbackFunc The ouutput callback function for output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputCallback(Snpe_BuildConfig_Handle_t bcHandle, - void (*callbackFunc)(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t)); -/** - * @brief Set the id of output callback function for output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] id The id of output callback function - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetOutputCallbackID(Snpe_BuildConfig_Handle_t bcHandle, size_t id); - -/** - * @brief Set the inside output callback handle to NULL for output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_ClearOutputCallback(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set output callback for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] callbackFunc The output callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputCallback(Snpe_BuildConfig_Handle_t bcHandle, - void (*callbackFunc)(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t)); - -/** - * @brief Set the id of output callback function for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] id The id of output callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputCallbackID(Snpe_BuildConfig_Handle_t bcHandle, size_t id); - -/** - * @brief Set the inside output callback handle to NULL for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_ClearInputOutputCallback(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set input callback for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] callbackFunc The input callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputInputCallback(Snpe_BuildConfig_Handle_t bcHandle, - Snpe_ApplicationBufferMap_Handle_t (*callbackFunc)( - Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t - ) - ); - -/** - * @brief Set the id of input callback function for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] id The id of input callback function for input-output async mode - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputInputCallbackID(Snpe_BuildConfig_Handle_t bcHandle, size_t id); - -/** - * @brief Set the inside input callback handle to NULL for input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_ClearInputOutputInputCallback(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the input and output transmission mode including sync mode, output async mode and input-output async mode, defult is sync mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] iotMode The typedef of Snpe_PSNPE_InputOutputTransmissionMode_t - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetInputOutputTransmissionMode(Snpe_BuildConfig_Handle_t bcHandle, - Snpe_PSNPE_InputOutputTransmissionMode_t iotMode); - -/** - * @brief Get the input and output transmission mode including sync mode, output async mode and input-output async mode - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The typedef of Snpe_PSNPE_InputOutputTransmissionMode_t - */ -SNPE_API -Snpe_PSNPE_InputOutputTransmissionMode_t Snpe_BuildConfig_GetInputOutputTransmissionMode(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the profiling level for PSNPE build config, default is SNPE_PROFILING_LEVEL_OFF - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] profilingLevel The typedef of Snpe_ProfilingLevel_t - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetProfilingLevel(Snpe_BuildConfig_Handle_t bcHandle, Snpe_ProfilingLevel_t profilingLevel); - -/** - * @brief Get the profiling level for PSNPE build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The typedef of Snpe_ProfilingLevel_t - */ -SNPE_API -Snpe_ProfilingLevel_t Snpe_BuildConfig_GetProfilingLevel(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief To be deprecated, set the encode value when you want to divide one image into 2 or 4 parts to run, default is 0 which means the input don't need dividing. - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] encode0 The uint64 value of encode0 - * - * @param[in] encode1 The uint64 value of encode1 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEncode(Snpe_BuildConfig_Handle_t bcHandle, uint64_t encode0, uint64_t encode1); - -/** - * @brief To be deprecated, set the encode0 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] encode0 The uint64 value of encode0 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEncode0(Snpe_BuildConfig_Handle_t bcHandle, uint64_t encode0); - -/** - * @brief To be deprecated, set the encode1 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] encode1 The uint64 value of encode1 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEncode1(Snpe_BuildConfig_Handle_t bcHandle, uint64_t encode1); - -/** - * @brief To be deprecated, get the encode0 and encode1 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The uint64 value of encode - */ -SNPE_API -uint64_t* Snpe_BuildConfig_GetEncode(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief To be deprecated, get the encode0 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The uint64 value of encode0 - */ -SNPE_API -uint64_t Snpe_BuildConfig_GetEncode0(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief To be deprecated, get the encode1 value for snpe build config which is a special feature used in SM8250 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The uint64 value of encode1 - */ -SNPE_API -uint64_t Snpe_BuildConfig_GetEncode1(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set true or false for enabling init cache for snpe build config, enabling init cache = 1 - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] enableInitCache True for enabing init cache - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetEnableInitCache(Snpe_BuildConfig_Handle_t bcHandle, int enableInitCache); - -/** - * @brief Get the satus of enabling init cache for snpe build config, enabling init cache = 1. - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] enableInitCache True for enabing init cache - * - * @return 1 or 0 for enabling init cache - */ -SNPE_API -int Snpe_BuildConfig_GetEnableInitCache(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Handle needed to access the platformConfig. - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] platformOptions Options as a const char* - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetPlatformOptions(Snpe_BuildConfig_Handle_t bcHandle, const char* platformOptions); - -/** - * @brief Get the optional platform features for snpe build config - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return Options as a const char* - */ -SNPE_API -const char* Snpe_BuildConfig_GetPlatformOptions(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Set the path directory of output diag log you want to save - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @param[in] diaglogOutputDir The string directory - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_BuildConfig_SetDiaglogOutputDir(Snpe_BuildConfig_Handle_t bcHandle, const char* diaglogOutputDir); - -/** - * @brief Get the path of output diag log - * - * @param[in] bcHandle Handle to access the object of snpe buid config - * - * @return The string directory - */ -SNPE_API -const char* Snpe_BuildConfig_GetDiaglogOutputDir(Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Create the handle of PSNPE object - * - * @return The handle of PSNPE object - */ -SNPE_API -Snpe_PSNPE_Handle_t Snpe_PSNPE_Create(); - -/** - * @brief Release the handle of PSNPE object - * - * @param[in] psnpeHandle Handle to access the PSNPE object - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_Delete(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Build the instance of PSNPE object accorading of snpe build config - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_Build(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_BuildConfig_Handle_t bcHandle); - -/** - * @brief Execute PSNPE object for sync mode. - * - * @param[in] psnpeHandle Handle to access the PSNPE object - * - * @param[in] inputBufferListHandle Handle to access the input user buffer list - * - * @param[in] outputBufferListHandle Handle to access the output user buffer list - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_Execute(Snpe_PSNPE_Handle_t psnpeHandle, - Snpe_UserBufferList_Handle_t inputBufferListHandle, - Snpe_UserBufferList_Handle_t outputBufferListHandle); - -/** - * @brief Execute PSNPE object for input-output async mode - * - * @param[in] psnpeHandle Handle to access the PSNPE object - * - * @param[in] inputMapHandle Handle to access the input buffer map - * - * @param[in] dataIndex The index of input data - * - * @param[in] isTF8buff If the input buffer is TF8 - * - * @param[in] isTF8Outputbuff If the output buffer is TF8 - * - * @return The result error message - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_ExecuteInputOutputAsync(Snpe_PSNPE_Handle_t psnpeHandle, - Snpe_StringList_Handle_t inputMapHandle, - size_t dataIndex, - int isTF8buff, - int isTF8Outputbuff); - -/** - * @brief Get the input tensor names for PSNPE object. - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The string list of input tensor names - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_GetInputTensorNames(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the output tensor names for PSNPE object - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The string list of output tensor names - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_PSNPE_GetOutputTensorNames(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the input dimension shape for PSNPE object - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The tensor shape of input dimension - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_PSNPE_GetInputDimensions(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the input dimension shape for the specific input name for PSNPE object - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @param[in] name The name of input data - * - * @return The tensor shape of a specific input name - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_PSNPE_GetInputDimensions_Name(Snpe_PSNPE_Handle_t psnpeHandle, const char* name); - -/** - * @brief Get the number of elements in each dimension for input and output buffer - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @param[in] name The name of input and output buffer - * - * @return Dimension size - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_PSNPE_GetBufferAttributesDims(Snpe_PSNPE_Handle_t psnpeHandle, const char* name); - -/* To be deprecated, please use new api Snpe_PSNPE_RegisterUserMemoryMappedBuffers */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_RegisterIonBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_UserMemoryMap_Handle_t ionBufferMapHandle); - -/* To be deprecated, please use new api Snpe_PSNPE_DeregisterUserMemoryMappedBuffers */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_DeregisterIonBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_StringList_Handle_t ionBufferNames); - -/** - * @brief Register Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferMapHandle A UserMemoryMap of virtual addresses - * - * @note UserBuffer type passed for registration must match the data type of the tensor in the dlc - * For regular UserBuffers SNPE performs an online data conversion (quantization or - * dequantization etc). This is not possible for memory mapped buffers hence can lead to - * issues during execution or accuracy degradation - * - * @return SNPE_SUCCESS upon successful memory mapped buffer registration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_RegisterUserMemoryMappedBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_UserMemoryMap_Handle_t bufferMapHandle); - -/** - * @brief Deregister Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferNamesHandle A StringList of memory mapped buffer names - * - * @return SNPE_SUCCESS upon successful memory mapped buffer deregistration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_PSNPE_DeregisterUserMemoryMappedBuffers(Snpe_PSNPE_Handle_t psnpeHandle, Snpe_StringList_Handle_t bufferNamesHandle); - -/** - * @brief Get the error message during the failed execution - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @return The error message - */ -SNPE_API -const char* Snpe_PSNPE_GetLastErrorString(Snpe_PSNPE_Handle_t psnpeHandle); - -/** - * @brief Get the handle of IBufferAttributes - * - * @param[in] bcHandle Handle to access the PSNPE object - * - * @param[in] name The name of attribute buffer - * - * @return Handle to access IBufferAttributes - */ -SNPE_API -Snpe_IBufferAttributes_Handle_t Snpe_PSNPE_GetInputOutputBufferAttributes(Snpe_PSNPE_Handle_t psnpeHandle, const char *name); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_PSNPE_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/PSNPE.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/PSNPE.hpp deleted file mode 100644 index bd3af1ac..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/PSNPE.hpp +++ /dev/null @@ -1,537 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include -#include -#include -#include -#include - - -#include "Wrapper.hpp" - - -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/DlVersion.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/DlOptional.hpp" -#include "DlSystem/IBufferAttributes.hpp" -#include "DlSystem/UserMemoryMap.hpp" - -#include "SNPE/UserBufferList.hpp" -#include "SNPE/ApplicationBufferMap.hpp" -#include "SNPE/RuntimeConfigList.hpp" -#include "DlContainer/IDlContainer.hpp" - -#include "SNPE/RuntimeConfigList.hpp" - - -#include "SNPE/PSNPE.h" - -namespace PSNPE{ - -enum BuildMode { - SERIAL = 0, - PARALLEL = 1 -}; -/** - * @brief Input and output transmission mode - */ -enum InputOutputTransmissionMode { - sync = 0, - outputAsync = 1, - inputOutputAsync = 2 -}; - - -struct OutputAsyncCallbackParam : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - - - template - using DataIndexReference = WrapperDetail::GenericConstMemberReference - ; - - - template - using ExecuteStatusReference = WrapperDetail::GenericConstMemberReference - >; - - - static std::string ErrMsgGetter(Snpe_DlVersion_Handle_t handle){ - return Snpe_PSNPE_OutputAsyncCallbackParam_GetErrorMsg(handle); - } - template - using ErrorMsgReference = WrapperDetail::GenericConstMemberReference - ; - - template - using CallbackIDReference = WrapperDetail::GenericConstMemberReference - ; - - - - -public: - OutputAsyncCallbackParam() = delete; - OutputAsyncCallbackParam(OutputAsyncCallbackParam&& other) noexcept - : BaseType(std::move(other)) - { } - - DataIndexReference dataIndex{*this}; - ExecuteStatusReference executeStatus{*this}; - ErrorMsgReference errorMsg{*this}; - - CallbackIDReference callbackID{*this}; -}; - - - -struct InputOutputInputAsyncCallbackParam : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - - - static std::vector GetInputs(HandleType handle){ - DlSystem::StringList inputs(moveHandle(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputs(handle))); - - return std::vector(inputs.begin(), inputs.end()); - } - - template - using InputsReference = WrapperDetail::GenericConstMemberReference - ; - - - static DlSystem::StringList GetInputNames(HandleType handle){ - return moveHandle(Snpe_PSNPE_InputOutputInputAsyncCallbackParam_GetInputNames(handle)); - } - template - using InputNamesReference = WrapperDetail::GenericConstMemberReference - ; - - template - using CallbackIDReference = WrapperDetail::GenericConstMemberReference - ; - - -public: - InputOutputInputAsyncCallbackParam() = delete; - InputOutputInputAsyncCallbackParam(InputOutputInputAsyncCallbackParam&& other) noexcept - : BaseType(std::move(other)) - { } - - InputsReference> inputs{*this}; - InputNamesReference inputNames{*this}; - CallbackIDReference callbackID{*this}; - -}; - - - - - -struct InputOutputAsyncCallbackParam : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{NoOpDeleter}; - - template - using DataIndexReference = WrapperDetail::GenericConstMemberReference - ; - - static bool GetExecuteStatus(HandleType handle){ - return Snpe_PSNPE_InputOutputAsyncCallbackParam_GetExecuteStatus(handle); - } - template - using ExecuteStatusReference = WrapperDetail::GenericConstMemberReference - ; - - static std::string ErrMsgGetter(Snpe_DlVersion_Handle_t handle){ - return Snpe_PSNPE_OutputAsyncCallbackParam_GetErrorMsg(handle); - } - template - using ErrorMsgReference = WrapperDetail::GenericConstMemberReference - ; - - - - // This should work - static ApplicationBufferMap GetOutputMap(HandleType handle){ - return moveHandle(Snpe_PSNPE_InputOutputAsyncCallbackParam_GetOutputMap_Ref(handle), true); - } - - template - using OutputMapReference = WrapperDetail::GenericConstMemberReference - ; - - template - using CallbackIDReference = WrapperDetail::GenericConstMemberReference - ; - -public: - - InputOutputAsyncCallbackParam(InputOutputAsyncCallbackParam&& other) noexcept - : BaseType(std::move(other)) - { } - - DataIndexReference dataIndex{*this}; - OutputMapReference outputMap{*this}; /// OOOH, this will be super tricky to not have a copy every time - ExecuteStatusReference executeStatus{*this}; - ErrorMsgReference errorMsg{*this}; - CallbackIDReference callbackID{*this}; -}; - -/** - * @brief This callback is called when the output data is ready, only use for Output Async mode - */ -using OutputAsyncCallbackFunc = std::function; -/** - * @brief This callback is called when the output data is ready, only use for Output-Input Async mode - */ -using InputOutputAsyncCallbackFunc = std::function; -/** - * @brief This callback is called when the input data is ready,only use for Output-Input Async mode - */ -using InputOutputAsyncInputCallback = std::function(InputOutputInputAsyncCallbackParam)>; - - -struct BuildConfig final { - BuildMode buildMode = BuildMode::SERIAL; ///< Specify build in serial mode or parallel mode - zdl::DlContainer::IDlContainer* container;///< The opened container ptr - zdl::DlSystem::StringList outputBufferNames;///< Specify the output layer name - zdl::DlSystem::StringList outputTensors;///< Specify the output layer name - RuntimeConfigList runtimeConfigList;///< The runtime config list for PSNPE, @see RuntimeConfig - size_t inputThreadNumbers = 1;///< Specify the number of threads used in the execution phase to process input data, only used in inputOutputAsync mode - size_t outputThreadNumbers = 1;///< Specify the number of threads used in the execution phase to process output data, only used in inputOutputAsync and outputAsync mode - OutputAsyncCallbackFunc outputCallback;///< The callback to deal with output data ,only used in outputAsync mode - InputOutputAsyncCallbackFunc inputOutputCallback;///< The callback to deal with output data ,only used in inputOutputAsync mode - InputOutputAsyncInputCallback inputOutputInputCallback;///< The callback to deal with input data ,only used in inputOutputAsync mode - InputOutputTransmissionMode inputOutputTransmissionMode = InputOutputTransmissionMode::sync;///< Specify execution mode - zdl::DlSystem::ProfilingLevel_t profilingLevel = zdl::DlSystem::ProfilingLevel_t::OFF;///< Specify profiling level for Diaglog - uint64_t encode[2] = {0, 0}; - bool enableInitCache = false; - std::string platformOptions; - std::string diaglogOutputDir = "./diaglogs/"; ///< Specify a diaglog output directory to save the generated Diaglog files. - - size_t callbackID{}; -}; - - - - - -class PSNPE : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_PSNPE_Delete}; -// struct BuildConfigInternal : public Wrapper{ -// -// }; -public: - PSNPE() - : BaseType(Snpe_PSNPE_Create()) - { } - -private: - - template - static std::unordered_map& getCallbackMap(){ - static std::unordered_map toret; - return toret; - } - template - static std::mutex& getCallbackMapMutex(){ - static std::mutex mtx; - return mtx; - } - - static void outputCallbackTrampoline(Snpe_PSNPE_OutputAsyncCallbackParam_Handle_t paramHandle){ - OutputAsyncCallbackParam param(moveHandle(paramHandle)); - std::function callback; - { - std::lock_guard lk(getCallbackMapMutex()); - callback = getCallbackMap()[param.callbackID]; - } - callback(std::move(param)); - } - static void inputOutputCallbackTrampoline(Snpe_PSNPE_InputOutputAsyncCallbackParam_Handle_t paramHandle){ - InputOutputAsyncCallbackParam param(moveHandle(paramHandle)); - std::function callback; - { - std::lock_guard lk(getCallbackMapMutex()); - callback = getCallbackMap()[param.callbackID]; - } - callback(std::move(param)); - } - - static Snpe_ApplicationBufferMap_Handle_t inputOutputInputCallbackTrampoline( - Snpe_PSNPE_InputOutputInputAsyncCallbackParam_Handle_t paramHandle - ){ - InputOutputInputAsyncCallbackParam param(moveHandle(paramHandle)); - - std::function(InputOutputInputAsyncCallbackParam)> callback; - { - std::lock_guard lk(getCallbackMapMutex()); - callback = getCallbackMap()[param.callbackID]; - } - auto abm = callback(std::move(param)); - return WrapperDetail::HandleReleaser::release(*abm); - } - - template - class CallbackIdManager{ - public: - ~CallbackIdManager(){ - clear(); - } - std::pair registerCallback(WrapperCallbackType func){ - size_t id = get(); - - std::lock_guard lk(getCallbackMapMutex()); - getCallbackMap()[id] = std::move(func); - return {id, CapiCallback}; - } - private: - size_t m_CallbackId{}; - - void clear(){ - if(m_CallbackId){ - std::lock_guard lk(getCallbackMapMutex()); - getCallbackMap().erase(m_CallbackId); - } - } - - size_t get(){ - static std::atomic id{0}; - clear(); - m_CallbackId = ++id; - return m_CallbackId; - } - - }; - CallbackIdManager outputCallbackIdManager; - - CallbackIdManager inputOutputCallbackIdManager; - - CallbackIdManager inputOutputInputCallbackIdManager; - - -public: - - - - bool build(BuildConfig& buildConfig) noexcept{ - // Copy the BuildConfig across the CAPI boundary - - Snpe_BuildConfig_Handle_t bcHandle = Snpe_BuildConfig_Create(); - - Snpe_BuildConfig_SetBuildMode(bcHandle, static_cast(buildConfig.buildMode)); - Snpe_BuildConfig_SetContainer(bcHandle, getHandle(buildConfig.container)); - Snpe_BuildConfig_SetOutputBufferNames(bcHandle, getHandle(buildConfig.outputBufferNames)); - Snpe_BuildConfig_SetOutputTensors(bcHandle, getHandle(buildConfig.outputTensors)); - Snpe_BuildConfig_SetRuntimeConfigList(bcHandle, getHandle(buildConfig.runtimeConfigList)); - - Snpe_BuildConfig_SetInputThreadNumbers(bcHandle, buildConfig.inputThreadNumbers); - Snpe_BuildConfig_SetOutputThreadNumbers(bcHandle, buildConfig.outputThreadNumbers); - - - if(buildConfig.outputCallback){ - auto id_callback = outputCallbackIdManager.registerCallback(buildConfig.outputCallback); - Snpe_BuildConfig_SetOutputCallbackID(bcHandle, id_callback.first); - Snpe_BuildConfig_SetOutputCallback(bcHandle, id_callback.second); - } - - if(buildConfig.inputOutputCallback){ - auto id_callback = inputOutputCallbackIdManager.registerCallback(buildConfig.inputOutputCallback); - Snpe_BuildConfig_SetInputOutputCallbackID(bcHandle, id_callback.first); - Snpe_BuildConfig_SetInputOutputCallback(bcHandle, id_callback.second); - } - - if(buildConfig.inputOutputInputCallback){ - auto id_callback = inputOutputInputCallbackIdManager.registerCallback(buildConfig.inputOutputInputCallback); - Snpe_BuildConfig_SetInputOutputInputCallbackID(bcHandle, id_callback.first); - Snpe_BuildConfig_SetInputOutputInputCallback(bcHandle, id_callback.second); - } - - - Snpe_BuildConfig_SetInputOutputTransmissionMode(bcHandle, - static_cast(buildConfig.inputOutputTransmissionMode)); - - Snpe_BuildConfig_SetProfilingLevel(bcHandle, static_cast(buildConfig.profilingLevel)); - Snpe_BuildConfig_SetEncode(bcHandle, buildConfig.encode[0], buildConfig.encode[1]); - Snpe_BuildConfig_SetEnableInitCache(bcHandle, buildConfig.enableInitCache); - Snpe_BuildConfig_SetPlatformOptions(bcHandle, buildConfig.platformOptions.c_str()); - Snpe_BuildConfig_SetDiaglogOutputDir(bcHandle, buildConfig.diaglogOutputDir.c_str()); - - - auto status = Snpe_PSNPE_Build(handle(), bcHandle); - Snpe_BuildConfig_Delete(bcHandle); - - - return status == SNPE_SUCCESS; - } - - /** - * @brief Execute snpe instances in Async Output mode and Sync mode - * - * @param[in] inputBufferList A list of user buffers that contains the input data - * - * @param[in,out] outputBufferList A list of user buffers that will hold the output data - * - */ - bool execute(UserBufferList& inputBufferList, UserBufferList& outputBufferList) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_Execute(handle(), getHandle(inputBufferList), getHandle(outputBufferList)); - } - - /** - * @brief Execute snpe instances in Async Input/Output mode - * - * @param[in]inputMap A map of input buffers that contains input data. The names of buffers - * need to be matched with names retrived through getInputTensorNames() - * - * @param dataIndex Index of the input data - * - * @param isTF8buff Whether prefer to using 8 bit quantized element for inference - * - * @return True if executed successfully; flase, otherwise. - */ - bool executeInputOutputAsync(const DlSystem::StringList& inputMap, size_t dataIndex, bool isTF8buff, bool isTF8Outputbuff) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_ExecuteInputOutputAsync(handle(), getHandle(inputMap), dataIndex, isTF8buff, isTF8Outputbuff); - } - bool executeInputOutputAsync(const std::vector& inputMap, size_t dataIndex, bool isTF8buff, bool isTF8Outputbuff) noexcept{ - DlSystem::StringList sl(inputMap.size()); - for(auto&& e : inputMap) sl.append(e.c_str()); - return executeInputOutputAsync(sl, dataIndex, isTF8buff, isTF8Outputbuff); - } - - bool executeInputOutputAsync(const DlSystem::StringList& inputMap, size_t dataIndex, bool isTF8buff) noexcept{ - return executeInputOutputAsync(inputMap, dataIndex, isTF8buff, isTF8buff); - } - bool executeInputOutputAsync(const std::vector& inputMap, size_t dataIndex, bool isTF8buff) noexcept{ - return executeInputOutputAsync(inputMap, dataIndex, isTF8buff, isTF8buff); - } - - - - /** - * @brief Returns the input layer names of the network. - * - * @return StringList which contains the input layer names - */ - const DlSystem::StringList getInputTensorNames() const noexcept{ - return moveHandle(Snpe_PSNPE_GetInputTensorNames(handle())); - } - - /** - * @brief Returns the output layer names of the network. - * - * @return StringList which contains the output layer names - */ - const DlSystem::StringList getOutputTensorNames() const noexcept{ - return moveHandle(Snpe_PSNPE_GetOutputTensorNames(handle())); - } - - /** - * @brief Returns the input tensor dimensions of the network. - * - * @return TensorShape which contains the dimensions. - */ - const DlSystem::TensorShape getInputDimensions() const noexcept{ - return moveHandle(Snpe_PSNPE_GetInputDimensions(handle())); - } - - const zdl::DlSystem::TensorShape getInputDimensions(const char *name) const noexcept{ - return moveHandle(Snpe_PSNPE_GetInputDimensions_Name(handle(), name)); - } - - /** - * @brief Returns attributes of buffers. - * - * @see zdl::SNPE - * - * @return BufferAttributes of input/output tensor named. - */ - zdl::DlSystem::TensorShape getBufferAttributesDims(const char *name) const noexcept{ - return moveHandle(Snpe_PSNPE_GetBufferAttributesDims(handle(), name)); - } - - DlSystem::Optional getInputOutputBufferAttributes(const char *name) const noexcept{ - return { - new DlSystem::IBufferAttributes(moveHandle(Snpe_PSNPE_GetInputOutputBufferAttributes(handle(), name))), - DlSystem::Optional::LIFECYCLE::POINTER_OWNED - }; - } - /* To be deprecated, please use new api registerMemoryMappedBuffers */ - bool registerIonBuffers(const DlSystem::UserMemoryMap& ionBufferMap) const noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_RegisterIonBuffers(handle(), getHandle(ionBufferMap)); - } - /* To be deprecated, please use new api deregisterMemoryMappedBuffers */ - bool deregisterIonBuffers(const DlSystem::StringList& ionBufferNames) const noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_DeregisterIonBuffers(handle(), getHandle(ionBufferNames)); - } - - bool registerMemoryMappedBuffers(const DlSystem::UserMemoryMap& memoryMappedBufferMap) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_RegisterUserMemoryMappedBuffers(handle(), getHandle(memoryMappedBufferMap)); - } - - bool deregisterMemoryMappedBuffers(const DlSystem::StringList& bufferNames) noexcept{ - return SNPE_SUCCESS == Snpe_PSNPE_DeregisterUserMemoryMappedBuffers(handle(), getHandle(bufferNames)); - } - - const char* getLastErrorString(){ - return Snpe_PSNPE_GetLastErrorString(handle()); - } - -private: - PSNPE(const PSNPE&) = delete; - PSNPE& operator=(const PSNPE&) = delete; - -}; - -} // ns PSNPE - - - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, BuildMode) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputTransmissionMode) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, OutputAsyncCallbackParam) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputAsyncCallbackParam) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputInputAsyncCallbackParam) - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, OutputAsyncCallbackFunc) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputAsyncCallbackFunc) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, InputOutputAsyncInputCallback) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, BuildConfig) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, PSNPE) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.h deleted file mode 100644 index 59295d59..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.h +++ /dev/null @@ -1,118 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef _SNPE_RUNTIME_CONFIG_LIST_H_ -#define _SNPE_RUNTIME_CONFIG_LIST_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/DlEnums.h" -#include "DlSystem/RuntimeList.h" -#include "DlSystem/TensorShapeMap.h" - - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void* Snpe_RuntimeConfig_Handle_t; - -SNPE_API -Snpe_RuntimeConfig_Handle_t Snpe_RuntimeConfig_Create(); - -SNPE_API -Snpe_RuntimeConfig_Handle_t Snpe_RuntimeConfig_CreateCopy(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_Delete(Snpe_RuntimeConfig_Handle_t rcHandle); - - -SNPE_API -Snpe_Runtime_t Snpe_RuntimeConfig_GetRuntime(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetRuntime(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_Runtime_t runtime); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetRuntimeList(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_RuntimeList_Handle_t rlHandle); - -SNPE_API -Snpe_RuntimeList_Handle_t Snpe_RuntimeConfig_GetRuntimeList_Ref(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_PerformanceProfile_t Snpe_RuntimeConfig_GetPerformanceProfile(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetPerformanceProfile(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_PerformanceProfile_t perfProfile); - -SNPE_API -int Snpe_RuntimeConfig_GetEnableCPUFallback(Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetEnableCPUFallback(Snpe_RuntimeConfig_Handle_t rcHandle, int enableCpuFallback); - - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfig_SetInputDimensionsMap(Snpe_RuntimeConfig_Handle_t rcHandle, Snpe_TensorShapeMap_Handle_t tsmHandle); - -SNPE_API -Snpe_TensorShapeMap_Handle_t Snpe_RuntimeConfig_GetInputDimensionsMap_Ref(Snpe_RuntimeConfig_Handle_t rcHandle); - - - -typedef void* Snpe_RuntimeConfigList_Handle_t; - -SNPE_API -Snpe_RuntimeConfigList_Handle_t Snpe_RuntimeConfigList_Create(); - -SNPE_API -Snpe_RuntimeConfigList_Handle_t Snpe_RuntimeConfigList_CreateSize(size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_Delete(Snpe_RuntimeConfigList_Handle_t rclHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_PushBack(Snpe_RuntimeConfigList_Handle_t rclHandle, Snpe_RuntimeConfig_Handle_t rcHandle); - -SNPE_API -Snpe_RuntimeConfig_Handle_t Snpe_RuntimeConfigList_At_Ref(Snpe_RuntimeConfigList_Handle_t rclHandle, size_t idx); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_Assign(Snpe_RuntimeConfigList_Handle_t rclSrcHandle, Snpe_RuntimeConfigList_Handle_t rclDstHandle); - -SNPE_API -size_t Snpe_RuntimeConfigList_Size(Snpe_RuntimeConfigList_Handle_t rclHandle); - -SNPE_API -size_t Snpe_RuntimeConfigList_Capacity(Snpe_RuntimeConfigList_Handle_t rclHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_RuntimeConfigList_Clear(Snpe_RuntimeConfigList_Handle_t rclHandle); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_RUNTIME_CONFIG_LIST_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.hpp deleted file mode 100644 index faf052c5..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/RuntimeConfigList.hpp +++ /dev/null @@ -1,153 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - - -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/RuntimeList.hpp" -#include "DlSystem/TensorShapeMap.hpp" - - -#include "SNPE/RuntimeConfigList.h" - -namespace PSNPE { - - - -struct RuntimeConfig : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_RuntimeConfig_Delete}; - - template - using RuntimeReference = WrapperDetail::GenericMemberReference - , - CastingSetter >; - - - template - using RuntimeListReference = WrapperMemberReference< - RuntimeListType, - Snpe_RuntimeList_Handle_t, - Snpe_RuntimeConfig_GetRuntimeList_Ref, - Snpe_RuntimeConfig_SetRuntimeList - >; - - template - using InputDimensionsMapReference = WrapperMemberReference< - InputDimensionsMapType, - Snpe_TensorShapeMap_Handle_t, - Snpe_RuntimeConfig_GetInputDimensionsMap_Ref, - Snpe_RuntimeConfig_SetInputDimensionsMap - >; - - template - using PerfProfileReference = WrapperDetail::GenericMemberReference - , - CastingSetter >; - - template - using EnableCPUFallbackReference = WrapperDetail::GenericMemberReference - , - CastingSetter >; - -public: - RuntimeConfig() - : BaseType(Snpe_RuntimeConfig_Create()) - { } - RuntimeConfig(const RuntimeConfig& other) - : BaseType(Snpe_RuntimeConfig_CreateCopy(other.handle())) - { } - - RuntimeConfig(RuntimeConfig&& other) noexcept - : BaseType(std::move(other)) - { } - - RuntimeConfig& operator=(RuntimeConfig&& other) noexcept{ - return moveAssign(std::move(other)); - } - - - RuntimeReference runtime{*this, DlSystem::Runtime_t::CPU_FLOAT32}; - RuntimeListReference runtimeList{*this}; - PerfProfileReference perfProfile{*this, DlSystem::PerformanceProfile_t::HIGH_PERFORMANCE}; - InputDimensionsMapReference inputDimensionsMap{*this}; - EnableCPUFallbackReference enableCPUFallback{*this, false}; - -}; - - -class RuntimeConfigList : public Wrapper { -private: - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_RuntimeConfigList_Delete}; - -public: - RuntimeConfigList() - : BaseType(Snpe_RuntimeConfigList_Create()) - { } - RuntimeConfigList(size_t size) - : BaseType(Snpe_RuntimeConfigList_CreateSize(size)) - { } - - RuntimeConfigList(RuntimeConfigList&& other) noexcept - : BaseType(std::move(other)) - { } - - RuntimeConfigList& operator=(RuntimeConfigList&& other) noexcept{ - return moveAssign(std::move(other)); - } - RuntimeConfigList& operator=(const RuntimeConfigList& other){ - Snpe_RuntimeConfigList_Assign(other.handle(), handle()); - return *this; - } - - - - void push_back(const RuntimeConfig& runtimeConfig){ - Snpe_RuntimeConfigList_PushBack(handle(), getHandle(runtimeConfig)); - } - - RuntimeConfig& operator[](size_t index){ - return *makeReference(Snpe_RuntimeConfigList_At_Ref(handle(), index)); - } - const RuntimeConfig& operator[](size_t index) const{ - return *makeReference(Snpe_RuntimeConfigList_At_Ref(handle(), index)); - } - - size_t size() const noexcept{ - return Snpe_RuntimeConfigList_Size(handle()); - } - size_t capacity() const noexcept{ - return Snpe_RuntimeConfigList_Capacity(handle()); - } - - void clear() noexcept{ - Snpe_RuntimeConfigList_Clear(handle()); - } - -}; - -} // ns PSNPE - - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, RuntimeConfig) -ALIAS_IN_ZDL_NAMESPACE(PSNPE, RuntimeConfigList) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPE.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPE.h deleted file mode 100644 index eb05473a..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPE.h +++ /dev/null @@ -1,336 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================= -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -/** - * @file - */ - -#ifndef _SNPE_SNPE_H_ -#define _SNPE_SNPE_H_ - - -#include "DlSystem/IBufferAttributes.h" -#include "DlSystem/ITensor.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/TensorMap.h" -#include "DlSystem/StringList.h" -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/UserBufferMap.h" -#include "DlSystem/UserMemoryMap.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" - -#include "DiagLog/IDiagLog.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * A typedef to indicate a SNPE handle - */ -typedef void* Snpe_SNPE_Handle_t; - -/** - * Destroys/frees a SNPE object - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_Delete(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the names of input tensors to the network - * - * To support multiple input scenarios, where multiple tensors are - * passed through execute() in a TensorMap, each tensor needs to - * be uniquely named. The names of tensors can be retrieved - * through this function. - * - * In the case of a single input, one name will be returned. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return A StringList of input tensor names. - * - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetInputTensorNames(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the names of output tensors to the network - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return List of output tensor names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetOutputTensorNames(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the names of output tensor from the input layer name - * - * @param[in] snpeHandle Handle to access the SNPE object - * @param[in] name Layer name - * - * @return Output tensor names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetOutputTensorNamesByLayerName(Snpe_SNPE_Handle_t snpeHandle, const char* name); - - -/** - * @brief Processes the input data and returns the output - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] inputHandle A map of tensors that contains the input data for - * each input. The names of tensors needs to be - * matched with names retrieved through - * getInputTensorNames() - * - * @param[in,out] outputHandle An empty map of tensors that will contain the output - * data of potentially multiple layers (the key - * in the map is the layer name) upon return - * - * @note output TensorMap has to be empty. To forward propagate - * and get results in user-supplied tensors, use - * Snpe_SNPE_ExecuteUserBuffers(). - * - * @return SNPE_SUCCESS upon successful execution - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_ExecuteITensors(Snpe_SNPE_Handle_t snpeHandle, Snpe_TensorMap_Handle_t inputHandle, Snpe_TensorMap_Handle_t outputHandle); - -/** - * @brief Processes the input data and returns the output - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] inputHandle A single tensor contains the input data. - * - * @param[in,out] outputHandle An empty map of tensors that will contain the output - * data of potentially multiple layers (the key - * in the map is the layer name) upon return - * - * @note output TensorMap has to be empty. To forward propagate - * and get results in user-supplied tensors, use - * Snpe_SNPE_ExecuteUserBuffers. - * - * @return SNPE_SUCCESS upon successful execution - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_ExecuteITensor(Snpe_SNPE_Handle_t snpeHandle, Snpe_ITensor_Handle_t inputHandle, Snpe_TensorMap_Handle_t outputHandle); - -/** - * @brief Processes the input data and returns the output, using - * user-supplied buffers - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] inputHandle A map of UserBuffers that contains the input data for - * each input. The names of UserBuffers needs to be - * matched with names retrieved through - * getInputTensorNames() - * - * @param[in,out] outputHandle A map of UserBuffers that will hold the output - * data of potentially multiple layers (the key - * in the map is the UserBuffer name) - * - * @note input and output UserBuffer maps must be fully pre-populated. with - * dimensions matching what the network expects. - * For example, if there are 5 output UserBuffers they all have to be - * present in map. - * - * Caller must guarantee that for the duration of execute(), the buffer - * stored in UserBuffer would remain valid. For more detail on buffer - * ownership and lifetime requirements, please refer to zdl::DlSystem::UserBuffer - * documentation. - * - * @return SNPE_SUCCESS upon successful execution - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_ExecuteUserBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_UserBufferMap_Handle_t inputHandle, Snpe_UserBufferMap_Handle_t outputHandle); - - -/** - * @brief Register Client ION Buffers - * - * @note To be deprecated, please use new api Snpe_SNPE_RegisterUserMemoryMappedBuffers - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] ionBufferMapHandle A UserMemoryMap of virtual addresses - * - * @return SNPE_SUCCESS upon successful ION Buffer registration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_RegisterIonBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_UserMemoryMap_Handle_t ionBufferMapHandle); - -/** - * @brief Deregister Client ION Buffers - * - * @note To be deprecated, please use new api Snpe_SNPE_DeregisterUserMemoryMappedBuffers - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] ionBufferNamesHandle A StringList of ION Buffer names - * - * @return SNPE_SUCCESS upon successful ION Buffer deregistration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_DeregisterIonBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_StringList_Handle_t ionBufferNamesHandle); - -/** - * @brief Register Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferMapHandle A UserMemoryMap of virtual addresses - * - * @note UserBuffer type passed for registration must match the data type of the tensor in the dlc - * For regular UserBuffers SNPE performs an online data conversion (quantization or - * dequantization etc). This is not possible for memory mapped buffers hence can lead to - * issues during execution or accuracy degradation - * - * @return SNPE_SUCCESS upon successful memory mapped buffer registration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_RegisterUserMemoryMappedBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_UserMemoryMap_Handle_t bufferMapHandle); - -/** - * @brief Deregister Client Memory-Mapped Buffers (Example ION buffers in Android) - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] bufferNamesHandle A StringList of memory mapped buffer names - * - * @return SNPE_SUCCESS upon successful memory mapped buffer deregistration - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPE_DeregisterUserMemoryMappedBuffers(Snpe_SNPE_Handle_t snpeHandle, Snpe_StringList_Handle_t bufferNamesHandle); - -/** - * @brief Returns the version string embedded at model conversion - * time. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @return Model version string, which is a free-form string - * supplied at the time of the conversion - * - */ -SNPE_API -const char* Snpe_SNPE_GetModelVersion(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Returns the dimensions of the input data to the model in the - * form of TensorShape. The dimensions in TensorShape corresponds to - * what the tensor dimensions would need to be for an input tensor to - * the model. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] name input name. - * - * @note Note that this function only makes sense for networks - * that have a fixed input size. For networks in which the - * input size varies with each call of Execute(), this - * function should not be used. - * - * @return a TensorShape that maintains dimensions, - * matching the tensor dimensions for input to the model, - * where the last entry is the fastest varying dimension, etc. - * - * @see Snpe_ITensor_Handle_t - * @see Snpe_TensorShape_Handle_t - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_SNPE_GetInputDimensions(Snpe_SNPE_Handle_t snpeHandle, const char* name); - -/** - * @brief Returns the dimensions of the first input's data to the model in the - * form of TensorShape. The dimensions in TensorShape corresponds to - * what the tensor dimensions would need to be for an input tensor to - * the model. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @note Note that this function only makes sense for networks - * that have a fixed input size. For networks in which the - * input size varies with each call of Execute(), this - * function should not be used. - * - * @return a TensorShape that maintains dimensions, - * matching the tensor dimensions for first input to the model, - * where the last entry is the fastest varying dimension, etc. - * - * @see Snpe_ITensor_Handle_t - * @see Snpe_TensorShape_Handle_t - */ -SNPE_API -Snpe_TensorShape_Handle_t Snpe_SNPE_GetInputDimensionsOfFirstTensor(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Gets the output layer(s) for the network. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @note The output layers returned by this function may be - * different than those specified when the network was created - * via the @ref CAPI_SNPEBuilder "SNPEBuilder". For example, if the - * network was created in debug mode with no explicit output - * layers specified, this will contain all layers. - * - * - * @return A StringList of output layer names. - */ -SNPE_API -Snpe_StringList_Handle_t Snpe_SNPE_GetOutputLayerNames(Snpe_SNPE_Handle_t snpeHandle); - -/** - * @brief Returns attributes of buffers used to feed input tensors and receive result from output tensors. - * - * @param[in] snpeHandle Handle to access the SNPE object - * - * @param[in] name Tensor name. - * - * @return BufferAttributes of input/output tensor named - */ -SNPE_API -Snpe_IBufferAttributes_Handle_t Snpe_SNPE_GetInputOutputBufferAttributes(Snpe_SNPE_Handle_t snpeHandle, const char *name); - -/** - * @brief . - * - * Get the diagnostic logging interface - * - * @param[in] snpeHandle Handle to access the SNPE object - * - */ -SNPE_API -Snpe_IDiagLog_Handle_t Snpe_SNPE_GetDiagLogInterface_Ref(Snpe_SNPE_Handle_t snpeHandle); - - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPE.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPE.hpp deleted file mode 100644 index d4ad18df..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPE.hpp +++ /dev/null @@ -1,125 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/StringList.hpp" -#include "DlSystem/TensorMap.hpp" -#include "DlSystem/UserBufferMap.hpp" -#include "DlSystem/UserMemoryMap.hpp" -#include "DlSystem/IBufferAttributes.hpp" -#include "DiagLog/IDiagLog.hpp" - -#include "DlSystem/DlOptional.hpp" - - -#include "SNPE/SNPE.h" - -namespace SNPE{ - -class SNPE : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_SNPE_Delete}; - - template - static DlSystem::Optional makeOptional(H handle){ - return DlSystem::Optional(T(moveHandle(handle))); - } -public: - - DlSystem::Optional getInputTensorNames() const noexcept{ - return makeOptional(Snpe_SNPE_GetInputTensorNames(handle())); - } - - DlSystem::Optional getOutputTensorNames() const noexcept{ - return makeOptional(Snpe_SNPE_GetOutputTensorNames(handle())); - } - - DlSystem::StringList getOutputTensorNamesByLayerName(const char *name) const noexcept{ - return moveHandle(Snpe_SNPE_GetOutputTensorNamesByLayerName(handle(), name)); - } - - bool execute(const DlSystem::TensorMap& input, DlSystem::TensorMap& output) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_ExecuteITensors(handle(), getHandle(input), getHandle(output)); - } - - - bool execute(const DlSystem::ITensor* input, DlSystem::TensorMap& output) noexcept{ - if(!input) return false; - return SNPE_SUCCESS == Snpe_SNPE_ExecuteITensor(handle(), getHandle(*input), getHandle(output)); - } - - bool execute(const DlSystem::UserBufferMap& input, const DlSystem::UserBufferMap& output) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_ExecuteUserBuffers(handle(), getHandle(input), getHandle(output)); - } - - - /* To be deprecated, please use new api registerMemoryMappedBuffers */ - bool registerIonBuffers(const DlSystem::UserMemoryMap& ionBufferMap) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_RegisterUserMemoryMappedBuffers(handle(), getHandle(ionBufferMap)); - } - - /* To be deprecated, please use new api deregisterMemoryMappedBuffers */ - bool deregisterIonBuffers(const DlSystem::StringList& ionBufferNames) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_DeregisterUserMemoryMappedBuffers(handle(), getHandle(ionBufferNames)); - } - - bool registerMemoryMappedBuffers(const DlSystem::UserMemoryMap& memoryMappedBufferMap) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_RegisterUserMemoryMappedBuffers(handle(), getHandle(memoryMappedBufferMap)); - } - - bool deregisterMemoryMappedBuffers(const DlSystem::StringList& bufferNames) noexcept{ - return SNPE_SUCCESS == Snpe_SNPE_DeregisterUserMemoryMappedBuffers(handle(), getHandle(bufferNames)); - } - - std::string getModelVersion() const{ - auto str = Snpe_SNPE_GetModelVersion(handle()); - return str ? str : ""; - } - - DlSystem::Optional getInputDimensions() const noexcept{ - return makeOptional(Snpe_SNPE_GetInputDimensionsOfFirstTensor(handle())); - } - - DlSystem::Optional getInputDimensions(const char* name) const noexcept{ - return makeOptional(Snpe_SNPE_GetInputDimensions(handle(), name)); - } - - DlSystem::Optional getOutputLayerNames() const noexcept{ - return makeOptional(Snpe_SNPE_GetOutputLayerNames(handle())); - } - - - DlSystem::Optional getInputOutputBufferAttributes(const char* name) const noexcept{ - return DlSystem::Optional( - new DlSystem::IBufferAttributes(moveHandle(Snpe_SNPE_GetInputOutputBufferAttributes(handle(), name))), - DlSystem::Optional::LIFECYCLE::POINTER_OWNED - ); - } - - DlSystem::Optional getDiagLogInterface() noexcept{ - auto diagLogHandle = Snpe_SNPE_GetDiagLogInterface_Ref(handle()); - if(!diagLogHandle) return {}; - // Bind lifespan of this reference to this object - auto toret = makeReference(diagLogHandle); - return {toret, DlSystem::Optional::LIFECYCLE::POINTER_NOT_OWNED}; - } - -private: - SNPE(const SNPE&) = delete; - SNPE& operator=(const SNPE&) = delete; - -}; - -} // ns SNPE - -ALIAS_IN_ZDL_NAMESPACE(SNPE, SNPE) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.h deleted file mode 100644 index 6adcebad..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.h +++ /dev/null @@ -1,334 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _SNPE_BUILDER_H_ -#define _SNPE_BUILDER_H_ - -#include "SNPE/SNPE.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/DlError.h" -#include "DlSystem/IOBufferDataTypeMap.h" -#include "DlSystem/TensorShapeMap.h" -#include "DlSystem/RuntimeList.h" -#include "DlSystem/PlatformConfig.h" -#include "DlContainer/DlContainer.h" - -#ifdef __cplusplus -extern "C" { -#endif - - - -/** - * A typedef to indicate a SNPEBuilder handle - */ -typedef void* Snpe_SNPEBuilder_Handle_t; - -/** - * The builder class for creating SNPE objects. - * Not meant to be extended. - */ - - -/** - * @brief Constructor of NeuralNetwork Builder ith a supplied model. - * - * @param[in] containerHandle A DlContainer holding the model. - * - * @return A new instance of a SNPEBuilder object - * that can be used to configure and build - * an instance of SNPE. - * - */ -SNPE_API -Snpe_SNPEBuilder_Handle_t Snpe_SNPEBuilder_Create(Snpe_DlContainer_Handle_t containerHandle); - -/** - * Destroys/frees a SNPEBuilder object - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @return SNPE_SUCCESS if Delete operation successful. - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_Delete(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle); - -/** - * @brief Requests a performance profile. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] performanceProfile The target performance profile. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetPerformanceProfile(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_PerformanceProfile_t performanceProfile); - -/** - * @brief Sets the profiling level. Default profiling level for - * SNPEBuilder is off. Off and basic only applies to DSP runtime. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] profilingLevel The target profiling level. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetProfilingLevel(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_ProfilingLevel_t profilingLevel); - -/** - * @brief Sets a preference for execution priority. - * - * This allows the caller to give coarse hint to SNPE runtime - * about the priority of the network. SNPE runtime is free to use - * this information to co-ordinate between different workloads - * that may or may not extend beyond SNPE. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] priority The target performance profile. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetExecutionPriorityHint(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_ExecutionPriorityHint_t priority); - -/** - * @brief Sets the layers that will generate output. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] outputLayerNames List of layer names to - * output. An empty list will - * result in only the final - * layer of the model being - * the output layer. The list - * will be copied. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetOutputLayers(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_StringList_Handle_t outputLayerNames); - -/** - * @brief Sets the output tensor names. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] outputTensorNames List of tensor names to - * output. An empty list will - * result in producing output for the final - * output tensor of the model. - * The list will be copied. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetOutputTensors(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_StringList_Handle_t outputTensorNames); - -/** - * @brief Sets whether this neural network will perform inference with - * input from user-supplied buffers, and write output to user-supplied - * buffers. Default behaviour is to use tensors created by - * ITensorFactory. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] bufferMode Boolean whether to use user-supplied buffer or not. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetUseUserSuppliedBuffers(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int bufferMode); - -/** - * @brief Sets the debug mode of the runtime. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] debugMode This enables debug mode for the runtime. It - * does two things. For an empty - * outputLayerNames list, all layers will be - * output. It might also disable some internal - * runtime optimizations (e.g., some networks - * might be optimized by combining layers, - * etc.). - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetDebugMode(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int debugMode); - - - -/** - * @brief Sets network's input dimensions to enable resizing of - * the spatial dimensions of each layer for fully convolutional networks, - * and the batch dimension for all networks. - * - * @param[in] tensorShapeMapHandle : Handle to the map of input names and their new dimensions. - * The new dimensions overwrite the input dimensions - * embedded in the model and then resize each layer - * of the model. If the model contains - * layers whose dimensions cannot be resized e.g FullyConnected, - * exception will be thrown when SNPE instance is actually built. - * In general the batch dimension is always resizable. - * After resizing of layers' dimensions in model based - * on new input dimensions, the new model is revalidated - * against all runtime constraints, whose failures may - * result in cpu fallback situation. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetInputDimensions(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_TensorShapeMap_Handle_t inputDimensionsMapHandle); - -/** - * @brief Sets the mode of init caching functionality. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] mode Boolean. This flag enables/disables the functionality of init caching. - * When init caching functionality is enabled, a set of init caches - * will be created during network building/initialization process - * and will be added to DLC container. If such DLC container is saved - * by the user, in subsequent network building/initialization processes - * these init caches will be loaded from the DLC so as to reduce initialization time. - * In disable mode, no init caches will be added to DLC container. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetInitCacheMode(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int cacheMode); - -/** - * @brief Returns an instance of SNPE based on the current parameters. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @return A new instance of a @ref CAPI_SNPE "SNPE" object that can be used - * to execute models or null if any errors occur. - */ -SNPE_API -Snpe_SNPE_Handle_t Snpe_SNPEBuilder_Build(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle); - -/** - * @brief Sets the platform configuration. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] platformConfig The platform configuration. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetPlatformConfig(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_PlatformConfig_Handle_t platformConfigHandle); - -/** - * @brief Sets network's runtime order of precedence. Example: - * CPU_FLOAT32, GPU_FLOAT16, AIP_FIXED8_TF - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] runtimeListHandle The list of runtime in order of precedence - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetRuntimeProcessorOrder(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_RuntimeList_Handle_t runtimeListHandle); - -/** - * @brief Sets the unconsumed tensors as output - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] setOutput Boolean. This enables unconsumed tensors (i.e) - * outputs which are not inputs to any - * layer (basically dead ends) to be marked - * for output - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetUnconsumedTensorsAsOutputs(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int setOutput); - -/** - * @brief Execution terminated when exceeding time limit. - * Only valid for dsp runtime currently. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] timeout Time limit value in microseconds - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetTimeOut(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, uint64_t timeoutMicroSec); - - -/** - * @brief Sets the datatype of the buffer. - * Only valid for dsp runtime currently. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] dataTypeMapHandle Map of the buffer names and the datatype that needs to be set. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetBufferDataType(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, Snpe_IOBufferDataTypeMap_Handle_t dataTypeMapHandle); - -/** - * @brief Sets up the entire initialization callflow to - * happen on the user's thread - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] singleThreadedInit Flag indicating user's intent to perform initialization callflow - * on caller's thread. - * When set to 1, initialization will happen on the user's thread - * When set to 0, initialization will happen on a new thread. This is the default - * behavior (analogous to not calling this API) -*/ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetSingleThreadedInit(Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, int singleThreadedInit); - -/** - * @brief Sets the fixed point execution mode for CPU runtime. - * If a floating point DLC is executed with this option set, the program will be terminated with an exception. - * If a quantized DLC is executed without this option set, the execution will be in floating point mode in CPU. - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] cpuFxpMode Boolean If set to true, enables the fixed point mode. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetCpuFixedPointMode( - Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, bool cpuFxpMode); - -/** - * @brief Sets model name for logging - * - * @param[in] snpeBuilderHandle Handle to access the SNPEBuilder object - * - * @param[in] modelName String Model name for logging. - * - */ -SNPE_API -Snpe_ErrorCode_t Snpe_SNPEBuilder_SetModelName( - Snpe_SNPEBuilder_Handle_t snpeBuilderHandle, const char *modelName); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_BUILDER_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.hpp deleted file mode 100644 index 37995f4e..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEBuilder.hpp +++ /dev/null @@ -1,136 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include - - -#include "Wrapper.hpp" -#include "SNPE.hpp" -#include "DlSystem/RuntimeList.hpp" -#include "DlContainer/IDlContainer.hpp" -#include "DlSystem/PlatformConfig.hpp" -#include "DlSystem/TensorShapeMap.hpp" - -#include "DlSystem/DlEnums.hpp" - -#include "DlSystem/IOBufferDataTypeMap.hpp" - -#include "SNPE/SNPEBuilder.h" - - -namespace SNPE { - -class SNPEBuilder : public Wrapper { - friend BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_SNPEBuilder_Delete}; -public: - - explicit SNPEBuilder(DlContainer::IDlContainer *container) - : BaseType(Snpe_SNPEBuilder_Create(getHandle(container))) - { } - - - SNPEBuilder& setPerformanceProfile(DlSystem::PerformanceProfile_t performanceProfile){ - Snpe_SNPEBuilder_SetPerformanceProfile(handle(), static_cast(performanceProfile)); - return *this; - } - - SNPEBuilder& setProfilingLevel(DlSystem::ProfilingLevel_t profilingLevel){ - Snpe_SNPEBuilder_SetProfilingLevel(handle(), static_cast(profilingLevel)); - return *this; - } - - SNPEBuilder& setExecutionPriorityHint(DlSystem::ExecutionPriorityHint_t priority){ - Snpe_SNPEBuilder_SetExecutionPriorityHint(handle(), static_cast(priority)); - return *this; - } - - SNPEBuilder& setOutputLayers(const DlSystem::StringList& outputLayerNames){ - Snpe_SNPEBuilder_SetOutputLayers(handle(), getHandle(outputLayerNames)); - return *this; - } - - SNPEBuilder& setOutputTensors(const DlSystem::StringList& outputTensorNames){ - Snpe_SNPEBuilder_SetOutputTensors(handle(), getHandle(outputTensorNames)); - return *this; - } - - SNPEBuilder& setUseUserSuppliedBuffers(int bufferMode){ - Snpe_SNPEBuilder_SetUseUserSuppliedBuffers(handle(), bufferMode); - return *this; - } - - SNPEBuilder& setDebugMode(int debugMode){ - Snpe_SNPEBuilder_SetDebugMode(handle(), debugMode); - return *this; - } - - SNPEBuilder& setInputDimensions(const DlSystem::TensorShapeMap& inputDimensionsMap){ - Snpe_SNPEBuilder_SetInputDimensions(handle(), getHandle(inputDimensionsMap)); - return *this; - } - - SNPEBuilder& setInitCacheMode(int cacheMode){ - Snpe_SNPEBuilder_SetInitCacheMode(handle(), cacheMode); - return *this; - } - - SNPEBuilder& setPlatformConfig(const DlSystem::PlatformConfig& platformConfigHandle){ - Snpe_SNPEBuilder_SetPlatformConfig(handle(), getHandle(platformConfigHandle)); - return *this; - } - - SNPEBuilder& setRuntimeProcessorOrder(const DlSystem::RuntimeList& runtimeList){ - Snpe_SNPEBuilder_SetRuntimeProcessorOrder(handle(), getHandle(runtimeList)); - return *this; - } - - SNPEBuilder& setUnconsumedTensorsAsOutputs(int setOutput){ - Snpe_SNPEBuilder_SetUnconsumedTensorsAsOutputs(handle(), setOutput); - return *this; - } - - SNPEBuilder& setTimeOut(uint64_t timeoutMicroSec){ - Snpe_SNPEBuilder_SetTimeOut(handle(), timeoutMicroSec); - return *this; - } - - - SNPEBuilder& setBufferDataType(const DlSystem::IOBufferDataTypeMap& dataTypeMap){ - Snpe_SNPEBuilder_SetBufferDataType(handle(), getHandle(dataTypeMap)); - return *this; - } - - SNPEBuilder& setSingleThreadedInit(int singleThreadedInit){ - Snpe_SNPEBuilder_SetSingleThreadedInit(handle(), singleThreadedInit); - return *this; - } - - SNPEBuilder& setCpuFixedPointMode(bool cpuFxpMode){ - Snpe_SNPEBuilder_SetCpuFixedPointMode(handle(), cpuFxpMode); - return *this; - } - - SNPEBuilder& setModelName(DlSystem::String modelName){ - Snpe_SNPEBuilder_SetModelName(handle(), modelName.c_str()); - return *this; - } - - std::unique_ptr build() noexcept{ - auto h = Snpe_SNPEBuilder_Build(handle()); - return h ? makeUnique(h) : nullptr; - } - -}; - -} // ns SNPE - - -ALIAS_IN_ZDL_NAMESPACE(SNPE, SNPEBuilder) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEFactory.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEFactory.hpp deleted file mode 100644 index 6c2486ee..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEFactory.hpp +++ /dev/null @@ -1,88 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" - -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/DlVersion.hpp" -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/IUserBufferFactory.hpp" - - -#include "SNPE/SNPEUtil.h" -#include "DlSystem/DlEnums.h" - -namespace SNPE { - - -class SNPEFactory { -public: - - - static bool isRuntimeAvailable(DlSystem::Runtime_t runtime){ - return Snpe_Util_IsRuntimeAvailable(static_cast(runtime)); - } - - static bool isRuntimeAvailable(DlSystem::Runtime_t runtime, DlSystem::RuntimeCheckOption_t option){ - return Snpe_Util_IsRuntimeAvailableCheckOption(static_cast(runtime), - static_cast(option)); - } - - static DlSystem::ITensorFactory& getTensorFactory(){ - static DlSystem::ITensorFactory iTensorFactory; - return iTensorFactory; - } - - static DlSystem::IUserBufferFactory& getUserBufferFactory(){ - static DlSystem::IUserBufferFactory iUserBufferFactory; - return iUserBufferFactory; - } - - static DlSystem::Version_t getLibraryVersion(){ - return WrapperDetail::moveHandle(Snpe_Util_GetLibraryVersion()); - } - - static bool setSNPEStorageLocation(const char* storagePath){ - return SNPE_SUCCESS == Snpe_Util_SetSNPEStorageLocation(storagePath); - } - - static bool addOpPackage(const std::string& regLibraryPath){ - return SNPE_SUCCESS == Snpe_Util_AddOpPackage(regLibraryPath.c_str()); - } - - static bool isGLCLInteropSupported(){ - return Snpe_Util_IsGLCLInteropSupported(); - } - - static const char* getLastError(){ - return Snpe_Util_GetLastError(); - } - - static bool initializeLogging(const DlSystem::LogLevel_t& level){ - return Snpe_Util_InitializeLogging(static_cast(level)); - } - - static bool initializeLogging(const DlSystem::LogLevel_t& level, const std::string& logPath){ - return Snpe_Util_InitializeLoggingPath(static_cast(level), logPath.c_str()); - } - - static bool setLogLevel(const DlSystem::LogLevel_t& level){ - return Snpe_Util_SetLogLevel(static_cast(level)); - } - - static bool terminateLogging(){ - return Snpe_Util_TerminateLogging(); - } -}; - - -} // ns SNPE - - -ALIAS_IN_ZDL_NAMESPACE(SNPE, SNPEFactory) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEUtil.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEUtil.h deleted file mode 100644 index a3e1d1e1..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/SNPEUtil.h +++ /dev/null @@ -1,354 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -/** - * @file - */ - -#ifndef _SNPE_UTIL_H_ -#define _SNPE_UTIL_H_ - -#include "SNPE/SNPE.h" -#include "DlSystem/DlEnums.h" -#include "DlSystem/DlError.h" -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/IUserBuffer.h" -#include "DlSystem/ITensor.h" -#include "DlSystem/TensorShape.h" -#include "DlSystem/DlVersion.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * @brief Creates a UserBuffer - * - * @param[in] buffer Pointer to the buffer that the caller supplies - * - * @param[in] bufSize Buffer size, in bytes - * - * @param[in] stridesHandle Total number of bytes between elements in each dimension. - * E.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would have strides of [24, 8, 4]. - * - * @param[in] userBufferEncodingHandle Handle to a UserBufferEncoding object - * - * @note Caller has to ensure that memory pointed to by buffer stays accessible - * for the lifetime of the object created - * - * @return Handle to the created UserBuffer - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_Util_CreateUserBuffer(void *buffer, - size_t bufSize, - Snpe_TensorShape_Handle_t stridesHandle, - Snpe_IUserBuffer_Handle_t userBufferEncodingHandle); - -/** - * @brief Creates a UserBuffer with a provided UserBufferSource - * - * @param[in] buffer Pointer to the buffer that the caller supplies - * - * @param[in] bufSize Buffer size, in bytes - * - * @param[in] stridesHandle Total number of bytes between elements in each dimension. - * E.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would have strides of [24, 8, 4]. - * - * @param[in] userBufferEncodingHandle Handle to a UserBufferEncoding object - * - * @param[in] userBufferSourceHandle Handle to a UserBufferSource object - * - * @return Handle to the created UserBuffer - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_Util_CreateUserBufferFromSource(void *buffer, - size_t bufSize, - Snpe_TensorShape_Handle_t stridesHandle, - Snpe_IUserBuffer_Handle_t userBufferEncodingHandle, - Snpe_UserBufferSource_Handle_t userBufferSourceHandle); - -/** - * @brief Creates a UserBuffer - * - * @param[in] buffer Pointer to the buffer that the caller supplies - * - * @param[in] bufSize Buffer size, in bytes - * - * @param[in] stridesHandle Total number of bytes between elements in each dimension. - * E.g. A tightly packed tensor of floats with dimensions [4, 3, 2] would have strides of [24, 8, 4]. - * - * @param[in] userBufferEncodingHandle Reference to an UserBufferEncoding object - * - * @param[in] userBufferSourceHandle Reference to an UserBufferSource object - * - * @note Caller has to ensure that memory pointed to by buffer stays accessible - * for the lifetime of the object created - * - * @return the created UserBuffer - * - */ -SNPE_API -Snpe_IUserBuffer_Handle_t Snpe_Util_CreateUserGlBuffer(void *buffer, - size_t bufSize, - Snpe_TensorShape_Handle_t stridesHandle, - Snpe_IUserBuffer_Handle_t userBufferEncodingHandle, - Snpe_IUserBuffer_Handle_t userBufferSourceHandle); - -/** - * Creates a new ITensor with uninitialized data. - * - * ITensor buffer size assumes float32 encoding for each element. - * (i.e., a tensor with dimensions (2,3) will be represented by (2 * 3) * 4 = 24 bytes in memory) - * - * The strides for the tensor will match the tensor dimensions - * (i.e., the tensor data is contiguous in memory). - * - * @param[in] shapeHandle The dimensions for the tensor in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @return The created tensor - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_Util_CreateITensor(Snpe_TensorShape_Handle_t shapeHandle); - -/** - * Create a new ITensor with specific data. - * (i.e. the tensor data is contiguous in memory). This tensor is - * primarily used to create a tensor where tensor size can't be - * computed directly from dimension. One such example is - * NV21-formatted image, or any YUV formatted image - * - * @param[in] shapeHandle The dimensions for the tensor in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] data The actual data with which the Tensor object is filled. - * - * @param[in] dataSize The size of data - * - * @return A handle to the created tensor - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_Util_CreateITensorDataSize(Snpe_TensorShape_Handle_t shapeHandle, const uint8_t* data, size_t dataSize); - -/** - * Create a new ITensor with specific data. - * (i.e. the tensor data is contiguous in memory). This tensor is - * primarily used to create a tensor where tensor size can't be - * computed directly from dimension. One such example is - * NV21-formatted image, or any YUV formatted image - * - * @param[in] shapeHandle The dimensions for the tensor in which the last - * element of the vector represents the fastest varying - * dimension and the zeroth element represents the slowest - * varying, etc. - * - * @param[in] data The actual data with which the Tensor object is filled. - * - * @param[in] dataSize The size of data - * - * @return the created tensor - */ -SNPE_API -Snpe_ITensor_Handle_t Snpe_Util_CreateITensor_NV21(Snpe_TensorShape_Handle_t shapeHandle, unsigned char *data, size_t dataSize); - -/** - * Indicates whether the supplied runtime is available on the - * current platform. - * - * @param[in] runtime The target runtime to check. - * - * @return Boolean: Non-zero if the supplied runtime is available; 0 otherwise - * - */ -SNPE_API -int Snpe_Util_IsRuntimeAvailable(Snpe_Runtime_t runtime); - -/** - * Indicates whether the supplied runtime is available on the - * current platform. - * - * @param[in] runtime The target runtime to check. - * - * @param[in] runtimeCheckOption Extent to perform runtime available check. - * - * @return Boolean: Non-zero if the supplied runtime is available; 0 otherwise - * - */ -SNPE_API -int Snpe_Util_IsRuntimeAvailableCheckOption(Snpe_Runtime_t runtime, Snpe_RuntimeCheckOption_t runtimeCheckOption); - - -/** - * Gets the version of the SNPE library. - * - * @return Version of the SNPE library. - * - */ -SNPE_API -Snpe_DlVersion_Handle_t Snpe_Util_GetLibraryVersion(); - -/** - * Set the SNPE storage location for all SNPE instances in this - * process. Note that this may only be called once, and if so - * must be called before creating any SNPE instances. - * - * @param[in] storagePath Absolute path to a directory which SNPE may - * use for caching and other storage purposes. - * - * @return Boolean: Non-zero if the supplied path was succesfully set as - * the SNPE storage location, 0 otherwise. - * - */ -SNPE_API -int Snpe_Util_SetSNPEStorageLocation(const char* storagePath); - -/** - * @brief Register a user-defined op package with SNPE. - * - * @param[in] regLibraryPath Path to the registration library - * that allows clients to register a set of operations that are - * part of the package, and share op info with SNPE - * - * @return Boolean: Non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_AddOpPackage(const char* regLibraryPath ); - -/** - * Indicates whether the OpenGL and OpenCL interoperability is supported - * on GPU platform. - * - * @return Boolean: Non-zero if the OpenGL and OpenCl interop is supported; 0 otherwise - * - */ -SNPE_API -int Snpe_Util_IsGLCLInteropSupported(); - -/** - * @return A string description of the last error - */ -SNPE_API -const char* Snpe_Util_GetLastError(); - -/** - * Initializes logging with the specified log level. - * initializeLogging with level, is used on Android platforms - * and after successful initialization, SNPE - * logs are printed in android logcat logs. - * - * It is recommended to initializeLogging before creating any - * SNPE instances, in order to capture information related to - * core initialization. If this is called again after first - * time initialization, subsequent calls are ignored. - * Also, Logging can be re-initialized after a call to - * terminateLogging API by calling initializeLogging again. - * - * A typical usage of Logging life cycle can be - * initializeLogging() - * any other SNPE API like isRuntimeAvailable() - * * setLogLevel() - optional - can be called anytime - * between initializeLogging & terminateLogging - * SNPE instance creation, inference, destroy - * terminateLogging(). - * - * Please note, enabling logging can have performance impact. - * - * @param[in] level Log level (LOG_INFO, LOG_WARN, etc.). - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_InitializeLogging(Snpe_LogLevel_t level); - -/** - * Initializes logging with the specified log level and log path. - * initializeLogging with level & log path, is used on non Android - * platforms and after successful initialization, SNPE - * logs are printed in std output & into log files created in the - * log path. - * - * It is recommended to initializeLogging before creating any - * SNPE instances, in order to capture information related to - * core initialization. If this is called again after first - * time initialization, subsequent calls are ignored. - * Also, Logging can be re-initialized after a call to - * terminateLogging API by calling initializeLogging again. - * - * A typical usage of Logging life cycle can be - * initializeLogging() - * any other SNPE API like isRuntimeAvailable() - * * setLogLevel() - optional - can be called anytime - * between initializeLogging & terminateLogging - * SNPE instance creation, inference, destroy - * terminateLogging() - * - * Please note, enabling logging can have performance impact - * - * @param[in] level Log level (LOG_INFO, LOG_WARN, etc.). - * - * @param[in] logPath of directory to store logs. - * If path is empty, the default path is "./Log". - * For android, the log path is ignored. - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_InitializeLoggingPath(Snpe_LogLevel_t level, const char* logPath); - -/** - * Updates the current logging level with the specified level. - * setLogLevel is optional, called anytime after initializeLogging - * and before terminateLogging, to update the log level set. - * Log levels can be updated multiple times by calling setLogLevel - * A call to setLogLevel() is ignored if it is made before - * initializeLogging() or after terminateLogging() - * - * @param[in] level Log level (LOG_INFO, LOG_WARN, etc.). - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_SetLogLevel(Snpe_LogLevel_t level); - -/** - * Terminates logging. - * - * It is recommended to terminateLogging after initializeLogging - * in order to disable logging information. - * If this is called before initialization or after first time termination, - * calls are ignored. - * - * @warning Snpe_Util_TerminateLogging() must not be called while another thread is executing. - * In a multi-threaded use case, the individual threads must have a cooperative life cycle - * management strategy for the logger. - * - * @return Boolean: non-zero if successful, 0 otherwise. - */ -SNPE_API -int Snpe_Util_TerminateLogging(); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_UTIL_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/UserBufferList.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/UserBufferList.h deleted file mode 100644 index e6a42ddb..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/UserBufferList.h +++ /dev/null @@ -1,77 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2022,2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef _SNPE_USER_BUFFER_LIST_H_ -#define _SNPE_USER_BUFFER_LIST_H_ - - -#ifdef __cplusplus -#include -#else -#include -#endif - -#include "DlSystem/SnpeApiExportDefine.h" -#include "DlSystem/DlError.h" - -#include "DlSystem/UserBufferMap.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void* Snpe_UserBufferList_Handle_t; - -SNPE_API -Snpe_UserBufferList_Handle_t Snpe_UserBufferList_Create(); - -SNPE_API -Snpe_UserBufferList_Handle_t Snpe_UserBufferList_CreateCopy(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -Snpe_UserBufferList_Handle_t Snpe_UserBufferList_CreateSize(size_t size); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_Delete(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_PushBack(Snpe_UserBufferList_Handle_t userBufferListHandle, - Snpe_UserBufferMap_Handle_t userBufferMapHandle); - -SNPE_API -Snpe_UserBufferMap_Handle_t Snpe_UserBufferList_At_Ref(Snpe_UserBufferList_Handle_t userBufferListHandle, - size_t idx); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_Assign(Snpe_UserBufferList_Handle_t srcUserBufferListHandle, - Snpe_UserBufferList_Handle_t dstUserBufferListHandle); - -SNPE_API -size_t Snpe_UserBufferList_Size(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -size_t Snpe_UserBufferList_Capacity(Snpe_UserBufferList_Handle_t userBufferListHandle); - -SNPE_API -Snpe_ErrorCode_t Snpe_UserBufferList_Clear(Snpe_UserBufferList_Handle_t userBufferListHandle); - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // _SNPE_USER_BUFFER_LIST_H_ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/UserBufferList.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/UserBufferList.hpp deleted file mode 100644 index fec82dbc..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SNPE/UserBufferList.hpp +++ /dev/null @@ -1,76 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#include "Wrapper.hpp" -#include "DlSystem/UserBufferMap.hpp" - -#include "SNPE/UserBufferList.h" - - -namespace PSNPE { - -class UserBufferList : public Wrapper { - friend BaseType; - // Use this to get free move Ctor and move assignment operator, provided this class does not specify - // as copy assignment operator or copy Ctor - using BaseType::BaseType; - - static constexpr DeleteFunctionType DeleteFunction{Snpe_UserBufferList_Delete}; - -public: - UserBufferList() - : BaseType(Snpe_UserBufferList_Create()) - { } - explicit UserBufferList(size_t size) - : BaseType(Snpe_UserBufferList_CreateSize(size)) - { } - - UserBufferList(const UserBufferList& other) - : BaseType(Snpe_UserBufferList_CreateCopy(other.handle())) - { } - UserBufferList(UserBufferList&& other) noexcept - : BaseType(std::move(other)) - { } - - UserBufferList& operator=(const UserBufferList& other){ - if(this != &other){ - Snpe_UserBufferList_Assign(other.handle(), handle()); - } - return *this; - } - UserBufferList& operator=(UserBufferList&& other){ - return moveAssign(std::move(other)); - } - - - void push_back(const DlSystem::UserBufferMap& userBufferMap){ - Snpe_UserBufferList_PushBack(handle(), getHandle(userBufferMap)); - } - - DlSystem::UserBufferMap& operator[](size_t idx){ - return *makeReference(Snpe_UserBufferList_At_Ref(handle(), idx)); - } - - size_t size() const noexcept{ - return Snpe_UserBufferList_Size(handle()); - } - - size_t capacity() const noexcept{ - return Snpe_UserBufferList_Capacity(handle()); - } - - void clear() noexcept{ - Snpe_UserBufferList_Clear(handle()); - } -}; - - -} // ns PSNPE - -ALIAS_IN_ZDL_NAMESPACE(PSNPE, UserBufferList) diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SnpeUdo/UdoBase.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SnpeUdo/UdoBase.h deleted file mode 100644 index f7af604a..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SnpeUdo/UdoBase.h +++ /dev/null @@ -1,546 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SNPE_UDO_BASE_H -#define SNPE_UDO_BASE_H - -#include - -// Provide values to use for API version. -#define API_VERSION_MAJOR 1 -#define API_VERSION_MINOR 6 -#define API_VERSION_TEENY 0 - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -// Defines a bitmask of enum values. -typedef uint32_t SnpeUdo_Bitmask_t; -typedef SnpeUdo_Bitmask_t Udo_Bitmask_t; - -// A string of characters, rather than an array of bytes. -// Assumed to be UTF-8. -typedef char* SnpeUdo_String_t; -typedef SnpeUdo_String_t Udo_String_t; - -// The maximum allowable length of a SnpeUdo_String_t in bytes, -// including null terminator. SNPE will truncate strings longer -// than this. -#define SNPE_UDO_MAX_STRING_SIZE 1024 - -/** - * An enum which holds the various error types. - * The error types are divided to classes : - * 0 - 99 : generic errors - * 100 - 200 : errors related to configuration - * - */ -typedef enum -{ - /// No Error - SNPE_UDO_NO_ERROR = 0, UDO_NO_ERROR = 0, - /// Unsupported value for core type - SNPE_UDO_WRONG_CORE = 1, UDO_WRONG_CORE = 1, - /// Invalid attribute/argument passed into UDO API - SNPE_UDO_INVALID_ARGUMENT = 2, UDO_INVALID_ARGUMENT = 2, - /// Unsupported feature error - SNPE_UDO_UNSUPPORTED_FEATURE = 3, UDO_UNSUPPORTED_FEATURE = 3, - /// Error relating to memory allocation - SNPE_UDO_MEM_ALLOC_ERROR = 4, UDO_MEM_ALLOC_ERROR = 4, - /* Configuration Specific errors */ - /// No op with given attributes available in library - SNPE_UDO_WRONG_OPERATION = 100, UDO_WRONG_OPERATION = 100, - /// Unsupported value for core type in UDO configuration - SNPE_UDO_WRONG_CORE_TYPE = 101, UDO_WRONG_CORE_TYPE = 101, - /// Wrong number of params in UDO definition - SNPE_UDO_WRONG_NUM_OF_PARAMS = 102, UDO_WRONG_NUM_OF_PARAMS = 102, - /// Wrong number of dimensions for tensor(s) in UDO definition - SNPE_UDO_WRONG_NUM_OF_DIMENSIONS = 103, UDO_WRONG_NUM_OF_DIMENSIONS = 103, - /// Wrong number of input tensors in UDO definition - SNPE_UDO_WRONG_NUM_OF_INPUTS = 104, UDO_WRONG_NUM_OF_INPUTS = 104, - /// Wrong number of output tensors in UDO definition - SNPE_UDO_WRONG_NUM_OF_OUTPUTS = 105, UDO_WRONG_NUM_OF_OUTPUTS = 105, - SNPE_UDO_PROGRAM_CACHE_NOT_FOUND = 106, UDO_PROGRAM_CACHE_NOT_FOUND = 106, - SNPE_UDO_UNKNOWN_ERROR = 0xFFFFFFFF, UDO_UNKNOWN_ERROR = 0xFFFFFFFF -} SnpeUdo_ErrorType_t; - -typedef SnpeUdo_ErrorType_t Udo_ErrorType_t; - -/** - * An enum which holds the various data types. - * Designed to be used as single values or combined into a bitfield parameter - * (0x1, 0x2, 0x4, etc) - * \n FIXED_XX types are targeted for data in tensors. - * \n UINT / INT types are targeted for scalar params - */ -typedef enum -{ - /// data type: 16-bit floating point - SNPE_UDO_DATATYPE_FLOAT_16 = 0x01, UDO_DATATYPE_FLOAT_16 = 0x01, - /// data type: 32-bit floating point - SNPE_UDO_DATATYPE_FLOAT_32 = 0x02, UDO_DATATYPE_FLOAT_32 = 0x02, - /// data type: 4-bit fixed point - SNPE_UDO_DATATYPE_FIXED_4 = 0x04, UDO_DATATYPE_FIXED_4 = 0x04, - /// data type: 8-bit fixed point - SNPE_UDO_DATATYPE_FIXED_8 = 0x08, UDO_DATATYPE_FIXED_8 = 0x08, - /// data type: 16-bit fixed point - SNPE_UDO_DATATYPE_FIXED_16 = 0x10, UDO_DATATYPE_FIXED_16 = 0x10, - /// data type: 32-bit fixed point - SNPE_UDO_DATATYPE_FIXED_32 = 0x20, UDO_DATATYPE_FIXED_32 = 0x20, - /// data type: 8-bit unsigned integer - SNPE_UDO_DATATYPE_UINT_8 = 0x100, UDO_DATATYPE_UINT_8 = 0x100, - /// data type: 16-bit unsigned integer - SNPE_UDO_DATATYPE_UINT_16 = 0x200, UDO_DATATYPE_UINT_16 = 0x200, - /// data type: 32-bit unsigned integer - SNPE_UDO_DATATYPE_UINT_32 = 0x400, UDO_DATATYPE_UINT_32 = 0x400, - /// data type: 8-bit signed integer - SNPE_UDO_DATATYPE_INT_8 = 0x1000, UDO_DATATYPE_INT_8 = 0x1000, - /// data type: 16-bit signed integer - SNPE_UDO_DATATYPE_INT_16 = 0x2000, UDO_DATATYPE_INT_16 = 0x2000, - /// data type: 32-bit signed integer - SNPE_UDO_DATATYPE_INT_32 = 0x4000, UDO_DATATYPE_INT_32 = 0x4000, - SNPE_UDO_DATATYPE_LAST = 0xFFFFFFFF, UDO_DATATYPE_LAST = 0xFFFFFFFF -} SnpeUdo_DataType_t; - -typedef SnpeUdo_DataType_t Udo_DataType_t; - -/** - * An enum which holds the various layouts. - * Designed to be used as single values or combined into a bitfield parameter - * (0x1, 0x2, 0x4, etc) - */ -typedef enum -{ - /// data layout (4D): NHWC (batch-height-width-channel) - SNPE_UDO_LAYOUT_NHWC = 0x01, UDO_LAYOUT_NHWC = 0x01, - /// data layout (4D): NCHW (batch-channel-height-width) - SNPE_UDO_LAYOUT_NCHW = 0x02, UDO_LAYOUT_NCHW = 0x02, - /// data layout (5D): NDHWC (batch-depth-height-width-channel) - SNPE_UDO_LAYOUT_NDHWC = 0x04, UDO_LAYOUT_NDHWC = 0x04, - SNPE_UDO_LAYOUT_GPU_OPTIMAL1 = 0x08, UDO_LAYOUT_GPU_OPTIMAL1 = 0x08, - SNPE_UDO_LAYOUT_GPU_OPTIMAL2 = 0x10, UDO_LAYOUT_GPU_OPTIMAL2 = 0x10, - SNPE_UDO_LAYOUT_DSP_OPTIMAL1 = 0x11, UDO_LAYOUT_DSP_OPTIMAL1 = 0x11, - SNPE_UDO_LAYOUT_DSP_OPTIMAL2 = 0x12, UDO_LAYOUT_DSP_OPTIMAL2 = 0x12, - // Indicates no data will be allocated for this tensor. - // Used to specify optional inputs/outputs positionally. - SNPE_UDO_LAYOUT_NULL = 0x13, UDO_LAYOUT_NULL = 0x13, - SNPE_UDO_LAYOUT_LAST = 0xFFFFFFFF, UDO_LAYOUT_LAST = 0xFFFFFFFF -} SnpeUdo_TensorLayout_t; - -typedef SnpeUdo_TensorLayout_t Udo_TensorLayout_t; - -/** - * An enum which holds the UDO library Core type . - * Designed to be used as single values or combined into a bitfield parameter - * (0x1, 0x2, 0x4, etc) - */ -typedef enum -{ - /// Library target IP Core is undefined - SNPE_UDO_CORETYPE_UNDEFINED = 0x00, UDO_CORETYPE_UNDEFINED = 0x00, - /// Library target IP Core is CPU - SNPE_UDO_CORETYPE_CPU = 0x01, UDO_CORETYPE_CPU = 0x01, - /// Library target IP Core is GPU - SNPE_UDO_CORETYPE_GPU = 0x02, UDO_CORETYPE_GPU = 0x02, - /// Library target IP Core is DSP - SNPE_UDO_CORETYPE_DSP = 0x04, UDO_CORETYPE_DSP = 0x04, - SNPE_UDO_CORETYPE_LAST = 0xFFFFFFFF, UDO_CORETYPE_LAST = 0xFFFFFFFF -} SnpeUdo_CoreType_t; - -typedef SnpeUdo_CoreType_t Udo_CoreType_t; - -/** - * An enum to specify the parameter type : Scalar or Tensor - */ -typedef enum -{ - /// UDO static param type: scalar - SNPE_UDO_PARAMTYPE_SCALAR = 0x00, UDO_PARAMTYPE_SCALAR = 0x00, - /// UDO static param type: string - SNPE_UDO_PARAMTYPE_STRING = 0x01, UDO_PARAMTYPE_STRING = 0x01, - /// UDO static param type: tensor - SNPE_UDO_PARAMTYPE_TENSOR = 0x02, UDO_PARAMTYPE_TENSOR = 0x02, - SNPE_UDO_PARAMTYPE_LAST = 0xFFFFFFFF, UDO_PARAMTYPE_LAST = 0xFFFFFFFF -} SnpeUdo_ParamType_t; - -typedef SnpeUdo_ParamType_t Udo_ParamType_t; - -/** - * An enum to specify quantization type - */ -typedef enum -{ - /// Tensor Quantization type: NONE. Signifies unquantized tensor data - SNPE_UDO_QUANTIZATION_NONE = 0x00, UDO_QUANTIZATION_NONE = 0x00, - /// Tensor Quantization type: Tensorflow-style - SNPE_UDO_QUANTIZATION_TF = 0x01, UDO_QUANTIZATION_TF = 0x01, - SNPE_UDO_QUANTIZATION_QMN = 0x02, UDO_QUANTIZATION_QMN = 0x02, - SNPE_UDO_QUANTIZATION_LAST = 0xFFFFFFFF, UDO_QUANTIZATION_LAST = 0xFFFFFFFF -} SnpeUdo_QuantizationType_t; - -typedef SnpeUdo_QuantizationType_t Udo_QuantizationType_t; - -/** - * @brief A struct which is used to provide a version number using 3 values : major, minor, teeny - * - */ -typedef struct -{ - /// version field: major - for backward-incompatible changes - uint32_t major; - /// version field: minor - for backward-compatible feature updates - uint32_t minor; - /// version field: teeny - for minor bug-fixes and clean-up - uint32_t teeny; -} SnpeUdo_Version_t; - -typedef SnpeUdo_Version_t Udo_Version_t; - -/** - * @brief A struct returned from version query, contains the Library version and API version - * - */ -typedef struct -{ - /// Version of UDO library. Controlled by users - SnpeUdo_Version_t libVersion; - /// Version of SNPE UDO API used in compiling library. Determined by SNPE - SnpeUdo_Version_t apiVersion; -} SnpeUdo_LibVersion_t; - -/** - * @brief A struct returned from version query, contains the package version - * - */ -typedef struct -{ - /// Version of UDO API used in package. - Udo_Version_t apiVersion; -} Udo_PkgVersion_t; - -/** - * @brief A union to hold the value of a generic type. Allows defining a parameter struct - * in a generic way, with a "value" location that holds the data regardless of the type. - * - */ -typedef union -{ - /// value type: float - float floatValue; - /// value type: unsigned 32-bit integer - uint32_t uint32Value; - /// value type: signed 32-bit integer - int32_t int32Value; - /// value type: unsigned 16-bit integer - uint16_t uint16Value; - /// value type: signed 16-bit integer - int16_t int16Value; - /// value type: unsigned 8-bit integer - uint8_t uint8Value; - /// value type: signed 8-bit integer - int8_t int8Value; -} SnpeUdo_Value_t; - -typedef SnpeUdo_Value_t Udo_Value_t; - -/** - * @brief A struct which defines a scalar parameter : name, data type, and union of values - * - */ -typedef struct -{ - /// The parameter data type : float, int, etc. - SnpeUdo_DataType_t dataType; - /// a union of specified type which holds the data - SnpeUdo_Value_t dataValue; -} SnpeUdo_ScalarParam_t; - -typedef SnpeUdo_ScalarParam_t Udo_ScalarParam_t; - -/** - * @brief A struct which defines the quantization parameters in case of Tensorflow style quantization - * - */ -typedef struct -{ - /// minimum value of the quantization range of data - float minValue; - /// maximum value of the quantization range of data - float maxValue; -} SnpeUdo_TFQuantize_t; - -typedef SnpeUdo_TFQuantize_t Udo_TFQuantize_t; - -/** - * @brief A struct which defines the quantization type, and union of supported quantization structs - * - */ -typedef struct -{ - /// quantization type (only TF-style currently supported) - SnpeUdo_QuantizationType_t quantizeType; - union - { - /// TF-style min-max quantization ranges - SnpeUdo_TFQuantize_t TFParams; - }; -} SnpeUdo_QuantizeParams_t; - -typedef SnpeUdo_QuantizeParams_t Udo_QuantizeParams_t; - -/** - * @brief A struct which defines the datatype associated with a specified core-type - * This should be used to denote the datatypes for a single tensor info, depending - * on the intended execution core. - * - */ -typedef struct -{ - /// The IP Core - SnpeUdo_CoreType_t coreType; - /// The associated datatype for this coreType - SnpeUdo_DataType_t dataType; -} SnpeUdo_PerCoreDatatype_t; - -typedef SnpeUdo_PerCoreDatatype_t Udo_PerCoreDatatype_t; - -/** - * @brief A struct which defines a tensor parameter : name, data type, layout, quantization, more. - * Also holds a pointer to the tensor data. - * - */ -typedef struct -{ - /// The maximum allowable dimensions of the tensor. The memory held in - /// _tensorData_ is guaranteed to be large enough for this. - uint32_t* maxDimensions; - /// The current dimensions of the tensor. An operation may modify the current - /// dimensions of its output, to indicate cases where the output has been - /// "resized". - /// Note that for static parameters, the current and max dimensions must - /// match. - uint32_t* currDimensions; - /// Quantization params applicable to the tensor. Currently only supports - /// Tensorflow quantization style. - SnpeUdo_QuantizeParams_t quantizeParams; - /// Number of dimensions to the tensor: 3D, 4D, etc. - uint32_t tensorRank; - /// The parameter data type: float, int, etc. - SnpeUdo_DataType_t dataType; - /// The tensor layout type: NCHW, NHWC, etc. - SnpeUdo_TensorLayout_t layout; - /// Opaque pointer to tensor data. User may be required to re-interpret the pointer - /// based on core-specific definitions. - void* tensorData; -} SnpeUdo_TensorParam_t; - -typedef SnpeUdo_TensorParam_t Udo_TensorParam_t; - -/** - * @brief A struct which defines tensor information for activation tensors only - * - * It describes an activation tensor object using its name, the intended layout and the datatype - * it will take depending on the intended runtime core. The repeated field indicates that - * that the tensor info describes several input/output activation tensors, which all share the - * aforementioned properties. - */ -typedef struct -{ - /// The tensor name - SnpeUdo_String_t tensorName; - /// The tensor layout type: NCHW, NHWC, etc. - SnpeUdo_TensorLayout_t layout; - /// The per core datatype: {SNPE_UDO_DATATYPE, SNPE_UDO_CORE_TYPE} - SnpeUdo_PerCoreDatatype_t* perCoreDatatype; - /// A boolean field indicating that this tensorinfo will be repeated e.x for ops such as Concat or Split - bool repeated; - /// A boolean field indicating whether input is static or not. - bool isStatic; -} SnpeUdo_TensorInfo_t; - -typedef SnpeUdo_TensorInfo_t Udo_TensorInfo_t; - -/** - * @brief struct which defines a UDO parameter - a union of scalar, tensor and string parameters - * - */ -typedef struct -{ - /// Type is scalar or tensor - SnpeUdo_ParamType_t paramType; - /// The param name, for example : "offset", "activation_type" - SnpeUdo_String_t paramName; - union - { - /// scalar param value - SnpeUdo_ScalarParam_t scalarParam; - /// tensor param value - SnpeUdo_TensorParam_t tensorParam; - /// string param value - SnpeUdo_String_t stringParam; - }; -} SnpeUdo_Param_t; - -typedef SnpeUdo_Param_t Udo_Param_t; - -/** - * @brief A struct which defines Operation information which is specific for IP core (CPU, GPU, DSP ...) - * - */ -typedef struct -{ - /// The IP Core - SnpeUdo_CoreType_t udoCoreType; - /// Bitmask, defines supported internal calculation types (like FLOAT_32, etc) - /// Based on SnpeUdo_DataType - SnpeUdo_Bitmask_t operationCalculationTypes; -} SnpeUdo_OpCoreInfo_t; - -typedef SnpeUdo_OpCoreInfo_t Udo_OpCoreInfo_t; - -/** - * @brief A struct which defines the common and core-specific Operation information - * - */ -typedef struct -{ - /// Operation type - SnpeUdo_String_t operationType; - /// A bitmask describing which IP Cores (CPU, GPU, DSP ...) support this operation - /// Translated based on SnpeUdo_CoreType - SnpeUdo_Bitmask_t supportedByCores; - /// Number of static parameters defined by the op - uint32_t numOfStaticParams; - /// Array of static parameters. Can be scalar or tensor params - SnpeUdo_Param_t* staticParams; - /// Number of input tensors this op receives - uint32_t numOfInputs; - /// Array of input tensor names to this operation - SnpeUdo_String_t* inputNames; - /// Number of output tensors this op receives - uint32_t numOfOutputs; - /// Array of output tensor names to this operation - SnpeUdo_String_t* outputNames; - /// Number of cores that the op can execute on - uint32_t numOfCoreInfo; - /// Array of per-core information entries - SnpeUdo_OpCoreInfo_t* opPerCoreInfo; - /// Array of input tensor infos for this operation - SnpeUdo_TensorInfo_t* inputInfos; - /// Array of output tensor infos for this operation - SnpeUdo_TensorInfo_t* outputInfos; -} SnpeUdo_OperationInfo_t; - -typedef SnpeUdo_OperationInfo_t Udo_OperationInfo_t; - -/** - * @brief A struct which provides the implementation library info : type, name - * - */ -typedef struct -{ - /// Defines the IP Core that this implementation library is targeting - SnpeUdo_CoreType_t udoCoreType; - /// library name. will be looked at in the standard library path - SnpeUdo_String_t libraryName; -} SnpeUdo_LibraryInfo_t; - -typedef SnpeUdo_LibraryInfo_t Udo_LibraryInfo_t; - -/** - * @brief A struct returned by the registration library and contains information on the UDO package : - * name, operations, libraries, etc. - * - */ -typedef struct -{ - /// A string containing the package name - SnpeUdo_String_t packageName; - /// A bitmask describing supported IP cores (CPU, GPU, DSP ...) - /// Translated based on SnpeUdo_CoreType - SnpeUdo_Bitmask_t supportedCoreTypes; - /// The number of implementation libraries in the package - uint32_t numOfImplementationLib; - /// Array of implementation libraries names/types - SnpeUdo_LibraryInfo_t* implementationLib; - /// A string containing all operation types separated by space - SnpeUdo_String_t operationsString; - /// Number of supported operations - uint32_t numOfOperations; - /// Array of Operation info structs. Each entry describes one - /// Operation (name, params, inputs, outputs) - SnpeUdo_OperationInfo_t* operationsInfo; -} SnpeUdo_RegInfo_t; - -typedef SnpeUdo_RegInfo_t Udo_RegInfo_t; - -/** -* @brief A struct returned by the implementation library and contains information on the -* specific library: name, IP Core, operations, etc. -* -*/ -typedef struct -{ - /// Defines the IP Core that this implementation library is targeting - SnpeUdo_CoreType_t udoCoreType; - /// A string containing the package name - SnpeUdo_String_t packageName; - /// A string containing all operation types separated by space - SnpeUdo_String_t operationsString; - /// Number of supported operations - uint32_t numOfOperations; -} SnpeUdo_ImpInfo_t; - -typedef SnpeUdo_ImpInfo_t Udo_ImpInfo_t; - -/** - * @brief This struct defines an operation. It is used for validation - * or creation of an operation. - * In case of using it for creation, the static params which are tensors - * contain pointers to the real data (weights, for example), and input/output - * tensors also include pointers to the buffers used. - */ -typedef struct -{ - /// The IP Core that the operation is defined for - CPU, GPU, DSP... - SnpeUdo_CoreType_t udoCoreType; - /// Operation type - SnpeUdo_String_t operationType; - /// The number of static parameters provided in the staticParams array. - /// this number has to match the number provided by the UDO Registration library information - uint32_t numOfStaticParams; - /// Array of static parameters - SnpeUdo_Param_t* staticParams; - /// The number of input parameters provided in inputs array. - /// this number has to match the number provided by the UDO Registration library information - uint32_t numOfInputs; - /// Array of input tensors, providing layout, data type, sizes, etc - /// When used to create an operation, also contains the initial location of the data - SnpeUdo_TensorParam_t* inputs; - /// The number of output parameters provided in inputs array. - /// this number has to match the number provided by the UDO Registration library information - uint32_t numOfOutputs; - /// Array of output tensors, providing layout, data type, sizes, etc - /// When used to create an operation, also contains the initial location of the data - SnpeUdo_TensorParam_t* outputs; -} SnpeUdo_OpDefinition_t; - -typedef SnpeUdo_OpDefinition_t Udo_OpDefinition_t; - -/** @} */ /* end_addtogroup c_plus_plus_apis C++ */ - -#endif //SNPE_UDO_BASE_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SnpeUdo/UdoReg.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SnpeUdo/UdoReg.h deleted file mode 100644 index 2166be59..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SnpeUdo/UdoReg.h +++ /dev/null @@ -1,117 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2019-2020 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SNPE_UDO_REG_H -#define SNPE_UDO_REG_H - -#include "SnpeUdo/UdoShared.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * @brief Initialize the shared library's data structures. Calling any other - * library function before this one will result in an error being returned. - * - * @return Error code - */ -SnpeUdo_ErrorType_t -SnpeUdo_initRegLibrary(void); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_InitRegLibraryFunction_t)(void); - -/** - * @brief A function to query the API version of the UDO registration library. - * The function populates a SnpeUdo_LibVersion_t struct, which contains a SnpeUdo_Version_t - * struct for API version and library version. - * - * @param[in, out] version A pointer to struct which contains major, minor, teeny information for - * library and api versions. - * - * @return Error code - */ -SnpeUdo_ErrorType_t -SnpeUdo_getRegLibraryVersion(SnpeUdo_LibVersion_t** version); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_getRegLibraryVersion_t)(SnpeUdo_LibVersion_t** version); - -/** - * @brief Release the shared library's data structures, and invalidate any - * handles returned by the library. The behavior of any outstanding - * asynchronous calls made to this library when this function is called - * are undefined. All library functions (except SnpeUdo_InitRegLibrary) will - * return an error after this function has been successfully called. - * - * It should be possible to call SnpeUdo_InitRegLibrary after calling this - * function, and re-initialize the library. - * - * @return Error code - */ -SnpeUdo_ErrorType_t -SnpeUdo_terminateRegLibrary(void); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_TerminateRegLibraryFunction_t)(void); - - -/** - * @brief A function to query the info on the UDO set. - * The function populates a structure which contains information about - * the package and operations contained in it. - * - * @param[in, out] registrationInfo A struct which contains information on the set of UDOs - * - * @return Error code - * - */ -SnpeUdo_ErrorType_t -SnpeUdo_getRegInfo(SnpeUdo_RegInfo_t** registrationInfo); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_GetRegInfoFunction_t)(SnpeUdo_RegInfo_t** registrationInfo); - -/** - * @brief A function to validate that a set of params is supported by an operation - * The function receives an operation definition struct, and returns if this configuration is - * supported (e.g. if an operation can be created using this configuration) - * - * @param[in] opDefinition A struct of SnpeUdo_OpDefinition type, containing the information needed to - * validate that an operation can be created with this configuration. - * - * @return Error code, indicating is the operation can be created on this set or not. - * - */ -SnpeUdo_ErrorType_t -SnpeUdo_validateOperation(SnpeUdo_OpDefinition_t* opDefinition); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_ValidateOperationFunction_t)(SnpeUdo_OpDefinition_t* opDefinition); - -/** @} */ /* end_addtogroup c_plus_plus_apis C++ */ - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif //SNPE_UDO_REG_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SnpeUdo/UdoShared.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SnpeUdo/UdoShared.h deleted file mode 100644 index 816a8a74..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/SnpeUdo/UdoShared.h +++ /dev/null @@ -1,57 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -//============================================================================== -// -// Copyright (c) 2019-2021 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#ifndef SNPE_UDO_SHARED_H -#define SNPE_UDO_SHARED_H - -#include "SnpeUdo/UdoBase.h" - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** @addtogroup c_plus_plus_apis C++ -@{ */ - -/** - * @brief A function to return the various versions as they relate to the UDO - * The function returns a struct containing the the following: - * libVersion: the version of the implementation library compiled for the UDO. Set by user - * apiVersion: the version of the UDO API used in compiling the implementation library. - * Set by SNPE - * - * @param[in, out] version A pointer to Version struct of type SnpeUdo_LibVersion_t - * - * @return Error code - * - */ -SnpeUdo_ErrorType_t -SnpeUdo_getVersion (SnpeUdo_LibVersion_t** version); - -typedef SnpeUdo_ErrorType_t -(*SnpeUdo_GetVersionFunction_t) (SnpeUdo_LibVersion_t** version); - -typedef SnpeUdo_GetVersionFunction_t Udo_GetVersionFunction_t; - -#ifdef __cplusplus -} // extern "C" -#endif - -/** @} */ /* end_addtogroup c_plus_plus_apis C++ */ - -#endif // SNPE_UDO_SHARED_H diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/Wrapper.hpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/Wrapper.hpp deleted file mode 100644 index 5f908f15..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inc/zdl/Wrapper.hpp +++ /dev/null @@ -1,449 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= -#pragma once - -#define SNPE_WRAPPER_TYPES - -#include -#include -#include -#include - -#include - -#include - - -#include "DlSystem/DlError.h" - -// Put type aliases in zdl::namespace -#define ALIAS_IN_ZDL_NAMESPACE(ns, type) namespace zdl{ namespace ns { using type = ::ns::type; }} - - -// Uncomment to print info from the Wrapper base class -//#define WRAPPER_DEBUG_PRINTS - - -#ifdef WRAPPER_DEBUG_PRINTS - -#ifdef _MSC_VER -#define WRAPPER_FUNCTION_NAME __FUNCTION__ -#define WRAPPER_TRACE() std::cout << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << std::endl -#define WRAPPER_ETRACE() std::cout << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << std::endl -#else -#define WRAPPER_FUNCTION_NAME __PRETTY_FUNCTION__ -#define WRAPPER_TRACE() std::cout << "\e[33m" << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << "\e[0m" << std::endl -#define WRAPPER_ETRACE() std::cout << "\e[31m" << __LINE__ << ":\t" << WRAPPER_FUNCTION_NAME << "\e[0m" << std::endl -#endif - -#include -#else -#define WRAPPER_TRACE() do{}while(0) -#define WRAPPER_ETRACE() do{}while(0) -#endif - - -namespace WrapperDetail { - - -template -using GetterFuncType = MemberType(*)(HandleType); - -template -using SetterFuncType = Snpe_ErrorCode_t(*)(HandleType, MemberType); - - - -// Allow Wrappers to have members that require CAPI calls for access -template GetterFunc, - SetterFuncType SetterFunc -> -class GenericMemberReference{ - OwnerType& owner; -public: - - - ~GenericMemberReference() = default; - GenericMemberReference() = delete; - - GenericMemberReference(const GenericMemberReference&) = delete; - GenericMemberReference(GenericMemberReference&&) noexcept = default; - - GenericMemberReference(OwnerType& owner) - : owner{owner} - { } - explicit GenericMemberReference(OwnerType& owner, MemberType member) - : owner{owner} - { - operator=(member); - } - GenericMemberReference& operator=(MemberType member){ - SetterFunc(owner.handle(), member); - return *this; - } - - operator MemberType() const{ - return GetterFunc(owner.handle()); - } - - GenericMemberReference& - operator=(const GenericMemberReference& other){ - return operator=(other.operator MemberType()); - } - - MemberType operator()() const{ - return operator MemberType(); - } - -}; - -// Allow Wrappers to have members that require CAPI calls for access -template GetterFunc -> -class GenericConstMemberReference{ - - OwnerType& owner; - -public: - ~GenericConstMemberReference() = default; - GenericConstMemberReference() = delete; - - GenericConstMemberReference(const GenericConstMemberReference&) = delete; - GenericConstMemberReference(GenericConstMemberReference&&) noexcept = default; - - GenericConstMemberReference(OwnerType& owner) - : owner{owner} - { } - - operator MemberType() const{ - return GetterFunc(owner.handle()); - } - - - template::value,int>::Type=0> - operator const char*() const{ - thread_local std::string tlss; - tlss = operator MemberType(); - return tlss.c_str(); - } - - MemberType operator()() const{ - return operator MemberType(); - } - -}; - - - -// Allows returning references to literals through the CAPI's _Get and _Set functions -template -using GetterIndexedFuncType = MemberType(*)(HandleType, IndexType); - -template -using SetterIndexedFuncType = Snpe_ErrorCode_t(*)(HandleType, IndexType, MemberType); - -template GetterFunc, - SetterIndexedFuncType SetterFunc -> -class MemberIndexedReference{ - OwnerType& owner; - IndexType idx; - -public: - MemberIndexedReference(OwnerType& owner, IndexType idx) - : owner{owner}, - idx{idx} - { } - MemberIndexedReference(const MemberIndexedReference&) noexcept = default; - MemberIndexedReference(MemberIndexedReference&&) noexcept = default; - - MemberIndexedReference& operator=(const MemberIndexedReference&) noexcept = default; - MemberIndexedReference& operator=(MemberIndexedReference&&) noexcept = default; - - MemberIndexedReference operator=(MemberType member){ - SetterFunc(owner.handle(), idx, member); - return *this; - } - - operator MemberType() const{ - return GetterFunc(owner.handle(), idx); - } - -}; - - - -// Allow moving ownership of handles -template -struct HandleMover { - Handle handle; - bool isReference; -}; - -template -HandleMover moveHandle(Handle handle, bool isReference = false){ - return {handle, isReference}; -} - -// Virtual base class to allow for WrapperStorage to hold pointers to any Wrapper type -class WrapperBase{ -public: - virtual ~WrapperBase() = default; -}; - -// Storage type for Wrappers. Will have a set if the CAPI type is capable of creating reference handles -template -struct WrapperStorage{ - Handle handle; - bool isReference; - constexpr WrapperStorage(Handle handle = {}, bool isReference = false) noexcept - : handle{handle}, - isReference{isReference} - { } -}; - -template -struct WrapperStorage{ - Handle handle; - bool isReference; - mutable std::set> referencedObjects; - WrapperStorage(Handle handle = {}, bool isReference = false) noexcept - : handle{handle}, - isReference{isReference} - { } -}; - -// Allow a handle to be unbound from a Wrapper -struct HandleReleaser{ - template - static typename WrapperType::HandleType release(WrapperType& wrapper){ - auto toret = wrapper.m_Storage.handle; - wrapper.m_Storage.handle = {}; - return toret; - } -}; - -} // ns WrapperDetail - - - -// The base class for all Wrappers around the CAPI -// NOTE: This Wrapper class leverages the Curiously Recurring Template Pattern (CRTP) -template -class Wrapper : public WrapperDetail::WrapperBase{ - friend struct WrapperDetail::HandleReleaser; - // Allow certain types to access getHandle() and handle() - template - friend class Wrapper; - - template, - WrapperDetail::SetterIndexedFuncType> - friend class WrapperDetail::MemberIndexedReference; - - template> - friend class WrapperDetail::GenericConstMemberReference; - - template, WrapperDetail::SetterFuncType> - friend class WrapperDetail::GenericMemberReference; - - - -protected: - using HandleType = Handle; - using BaseType = Wrapper; - using DeleteFunctionType = Snpe_ErrorCode_t(*)(Handle); - - using StorageType = WrapperDetail::WrapperStorage; - - - template Getter> - static WrapperValueType CastingGetter(HandleType handle){ - return static_cast(Getter(handle)); - } - template Setter> - static Snpe_ErrorCode_t CastingSetter(HandleType handle, WrapperValueType value){ - return Setter(handle,static_cast(value)); - } - - - template - struct WrapperMemberReference{ - Derived& owner; - - WrapperMemberReference(Derived& owner) - : owner{owner} - { } - WrapperMemberReference(Derived& owner, const RlType& other) - : owner{owner} - { - operator=(other); - } - - WrapperMemberReference& operator=(const RlType& rl){ - Setter(getHandle(owner), getHandle(rl)); - return *this; - } - - operator RlType&() { - return *owner.template makeReference( Getter(getHandle(owner)) ); - } - operator RlType&() const { - return *owner.template makeReference( Getter(getHandle(owner)) ); - } - - RlType& operator()(){ - return operator RlType&(); - } - const RlType& operator()() const{ - return operator RlType&(); - } - }; - - // For Factory/Singleton types, we need a way for the deleter to do nothing - static Snpe_ErrorCode_t NoOpDeleter(Handle){ - return SNPE_SUCCESS; - } - - // Simplify calls to WrapperDetail::moveHandle. Can be removed, but will require updating all calls to moveHandle - template - static WrapperDetail::HandleMover moveHandle(H handle, bool isReference = false){ - return WrapperDetail::moveHandle(handle, isReference); - } - - - HandleType& handle() noexcept{ return m_Storage.handle; } - const HandleType& handle() const noexcept{ return m_Storage.handle; } - - bool isReference() const noexcept{ return m_Storage.isReference; } - - void Dtor(){ - if(!isReference() && !handle()){ - if(Derived::DeleteFunction != NoOpDeleter){ - WRAPPER_ETRACE(); - } - } - if(!isReference() && handle()){ - WRAPPER_TRACE(); -#ifdef WRAPPER_DEBUG_PRINTS - auto status = Derived::DeleteFunction(handle()); - if(status != SNPE_SUCCESS){ - WRAPPER_ETRACE(); - } -#else - Derived::DeleteFunction(handle()); -#endif - - handle() = nullptr; - } else { - WRAPPER_TRACE(); - } - } - -protected: - - // Only compile these if the class creates references. This will save memory and time - template::type=0> - void addReference(WrapperBase* wrapperBase) const{ // accesses mutable member - if(!wrapperBase){ - WRAPPER_ETRACE(); - } - m_Storage.referencedObjects.insert(std::unique_ptr(wrapperBase)); - } - - template::type=0> - T* makeReference(H referenceHandle) const{ - if(!referenceHandle){ - WRAPPER_ETRACE(); - return nullptr; - } - auto refObj = new T(moveHandle(referenceHandle, true)); - addReference(refObj); - return refObj; - } - - // This will be used to access another Wrapped type's handles once handle() is made protected - template - static OtherHandle getHandle(const Wrapper& otherObject){ - return otherObject.handle(); - } - - template - static OtherHandle getHandle(const Wrapper* otherObject){ - if(!otherObject) return {}; - return getHandle(*otherObject); - } - - template - static std::unique_ptr makeUnique(H handle){ - if(!handle) return {}; - return std::unique_ptr(new T(moveHandle(handle))); - } - - -public: - ~Wrapper(){ - Dtor(); - } -protected: - // Only derived types should have access to this - Wrapper(HandleType handle, bool isReference = false) - : m_Storage{handle, isReference} - { WRAPPER_TRACE(); } - -public: - // We should never have an empty wrapper - Wrapper() = delete; - - // Move semantics are essentially free for all wrapper types - Wrapper(Wrapper&& other) noexcept - : m_Storage{std::move(other.m_Storage)} - { - WRAPPER_TRACE(); - other.handle() = nullptr; - } - Wrapper(const Wrapper&) = delete; - - - Wrapper& operator=(Wrapper&& other) noexcept{ - WRAPPER_TRACE(); - if(this != &other){ - std::swap(m_Storage, other.m_Storage); - other.Dtor(); - } - return *this; - } - Wrapper& operator=(const Wrapper&) = delete; - - - // Allow a CAPI handle to be taken over by a Wrapper - Wrapper(WrapperDetail::HandleMover handleMover) noexcept - : Wrapper(handleMover.handle, handleMover.isReference) - { WRAPPER_TRACE(); } - -protected: - // Simplify Derived's move assignment operators - Derived& moveAssign(Derived&& other) noexcept{ WRAPPER_TRACE(); - return static_cast(operator=(std::move(other))); - } - - -private: - StorageType m_Storage; - -}; diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inference.cpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inference.cpp deleted file mode 100644 index d0cd64da..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inference.cpp +++ /dev/null @@ -1,194 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "android/log.h" - -#include "hpp/CheckRuntime.hpp" -#include "hpp/SetBuilderOptions.hpp" -#include "hpp/Util.hpp" -#include "LoadContainer.hpp" -#include "CreateUserBuffer.hpp" -#include "LoadInputTensor.hpp" - -#include -#include -#include - -std::unique_ptr snpe_HRNET; -std::unique_ptr snpe_BB; - -std::mutex mtx; -static zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::Runtime_t::CPU; -static zdl::DlSystem::RuntimeList runtimeList; -bool useUserSuppliedBuffers = true; -bool useIntBuffer = false; - -zdl::DlSystem::UserBufferMap inputMap, outputMap; -std::vector > snpeUserBackedInputBuffers, snpeUserBackedOutputBuffers; -std::unordered_map > applicationOutputBuffers; -std::unordered_map > applicationInputBuffers; -int bitWidth = 32; - - -#include -#include - -std::string build_network_BB(const uint8_t * dlc_buffer_BB, const size_t dlc_size_BB, const char runtime_arg, ModelName modelName) -{ - std::string outputLogger; - bool usingInitCaching = false; //shubham: TODO check with true - - std::unique_ptr container_BB = nullptr ; - - container_BB = loadContainerFromBuffer(dlc_buffer_BB, dlc_size_BB); - - if (container_BB == nullptr) { - LOGE("Error while opening the container file."); - return "Error while opening the container file.\n"; - } - - runtimeList.clear(); - LOGI("runtime arg %c",runtime_arg); - zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::Runtime_t::CPU; - if (runtime_arg == 'D'){ - runtime = zdl::DlSystem::Runtime_t::DSP; - LOGI("Added DSP"); - } - else if (runtime_arg == 'G') - { - runtime = zdl::DlSystem::Runtime_t::GPU_FLOAT32_16_HYBRID; //can be written as GPU - LOGI("Added GPU"); - } - - if(runtime != zdl::DlSystem::Runtime_t::UNSET) - { - bool ret = runtimeList.add(checkRuntime(runtime)); - if(ret == false){ - LOGE("Cannot set runtime"); - return outputLogger + "\nCannot set runtime"; - } - } else { - return outputLogger + "\nCannot set runtime"; - } - - - mtx.lock(); - snpe_BB = setBuilderOptions(container_BB, runtime, runtimeList, useUserSuppliedBuffers, usingInitCaching, modelName); - mtx.unlock(); - - if (snpe_BB == nullptr) { - LOGE("SNPE Prepare failed: Builder option failed for BB"); - outputLogger += "Model Prepare failed for BB"; - return outputLogger + "SNPE Prepare failed for BB"; - } - - outputLogger += "\nBB Model Network Prepare success !!!\n"; - - //Creating Buffer - createInputBufferMap(inputMap, applicationInputBuffers, snpeUserBackedInputBuffers, snpe_BB, useIntBuffer, bitWidth); - createOutputBufferMap(outputMap, applicationOutputBuffers, snpeUserBackedOutputBuffers, snpe_BB, useIntBuffer, bitWidth); - return outputLogger; -} - - - -bool executeDLC(cv::Mat &img, int orig_width, int orig_height, int &numberofobj, std::vector> &BB_coords, std::vector &BB_names, Model *modelobj) { - - LOGI("execute_net_BB"); - ATrace_beginSection("preprocessing"); - - struct timeval start_time, end_time; - float milli_time, seconds, useconds; - - mtx.lock(); - assert(snpe_BB!=nullptr); - - if(!loadInputUserBuffer(applicationInputBuffers, snpe_BB, img, inputMap, bitWidth, modelobj)) - { - LOGE("Failed to load Input UserBuffer"); - mtx.unlock(); - return false; - } - - //std::string name_out_boxes = "885"; - //std::string name_out_classes = "877"; - - // get output tensor names of the network that need to be populated - const auto &outputNamesOpt = snpe_BB->getOutputTensorNames(); - if (!outputNamesOpt) throw std::runtime_error("Error obtaining output tensor names"); - const zdl::DlSystem::StringList &outputNames = *outputNamesOpt; - assert(outputNames.size() > 0); - - if (outputNames.size()) LOGI("Preprocessing and loading in application Output Buffer for BB"); - - std::string name_out_boxes; - - //YoloX is using only single output tensor - if (modelobj->model_name != YoloX) { - name_out_boxes = outputNames.at(1); - LOGI("Filling %s buffer name_out_boxes", name_out_boxes.c_str()); - } - - - std::string name_out_classes = outputNames.at(0); - LOGI("Filling %s buffer name_out_classes", name_out_classes.c_str()); - - ATrace_endSection(); - gettimeofday(&start_time, NULL); - ATrace_beginSection("inference time"); - - bool execStatus = snpe_BB->execute(inputMap, outputMap); - ATrace_endSection(); - ATrace_beginSection("postprocessing time"); - gettimeofday(&end_time, NULL); - seconds = end_time.tv_sec - start_time.tv_sec; //seconds - useconds = end_time.tv_usec - start_time.tv_usec; //milliseconds - milli_time = ((seconds) * 1000 + useconds/1000.0); - //LOGI("Inference time %f ms", milli_time); - - if(execStatus== true){ - LOGI("Exec BB status is true"); - } - else{ - LOGE("Exec BB status is false"); - mtx.unlock(); - return false; - } - - std::vector BBout_boxcoords; - - //YoloX is using only single output tensor - if (modelobj->model_name != YoloX) { - LOGI("reading output name_out_boxes"); - BBout_boxcoords = applicationOutputBuffers.at(name_out_boxes); - } - - LOGI("reading output name_out_classes"); - std::vector BBout_class = applicationOutputBuffers.at(name_out_classes); - //LOGI("reading output done. Calling postprocess"); - - modelobj->postprocess(orig_width, orig_height, numberofobj, BB_coords, BB_names, BBout_boxcoords, BBout_class, milli_time); - - ATrace_endSection(); - mtx.unlock(); - return true; -} - diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inference_helper.cpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inference_helper.cpp deleted file mode 100644 index bef75104..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/inference_helper.cpp +++ /dev/null @@ -1,288 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include -#include -#include -#include -#include "android/log.h" - -#include "zdl/SNPE/SNPE.hpp" -#include "zdl/SNPE/SNPEFactory.hpp" -#include "zdl/DlSystem/DlVersion.hpp" -#include "zdl/DlSystem/DlEnums.hpp" -#include "zdl/DlSystem/String.hpp" -#include "zdl/DlContainer/IDlContainer.hpp" -#include "zdl/SNPE/SNPEBuilder.hpp" -#include "zdl/DlSystem/ITensor.hpp" -#include "zdl/DlSystem/StringList.hpp" -#include "zdl/DlSystem/TensorMap.hpp" -#include "zdl/DlSystem/TensorShape.hpp" -#include "DlSystem/ITensorFactory.hpp" - -#include "hpp/LoadInputTensor.hpp" -#include "hpp/Util.hpp" -#include "inference.h" - -bool SetAdspLibraryPath(std::string nativeLibPath) { - nativeLibPath += ";/data/local/tmp/mv_dlc;/vendor/lib/rfsa/adsp;/vendor/dsp/cdsp;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp"; - - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "ADSP Lib Path = %s \n", nativeLibPath.c_str()); - std::cout << "ADSP Lib Path = " << nativeLibPath << std::endl; - - return setenv("ADSP_LIBRARY_PATH", nativeLibPath.c_str(), 1 /*override*/) == 0; -} - - -std::unique_ptr loadContainerFromBuffer(const uint8_t * buffer, const size_t size) -{ - std::unique_ptr container; - container = zdl::DlContainer::IDlContainer::open(buffer, size); - return container; -} - - -zdl::DlSystem::Runtime_t checkRuntime(zdl::DlSystem::Runtime_t runtime) -{ - static zdl::DlSystem::Version_t Version = zdl::SNPE::SNPEFactory::getLibraryVersion(); - - LOGI("SNPE Version = %s", Version.asString().c_str()); //Print Version number - - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(runtime)) { - LOGE("Selected runtime not present. Falling back to GPU."); - runtime = zdl::DlSystem::Runtime_t::GPU; - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(runtime)){ - LOGE("Selected runtime not present. Falling back to CPU."); - runtime = zdl::DlSystem::Runtime_t::CPU; - } - } - - return runtime; -} - -std::unique_ptr setBuilderOptions(std::unique_ptr & container, - zdl::DlSystem::Runtime_t runtime, - zdl::DlSystem::RuntimeList runtimeList, - bool useUserSuppliedBuffers, - bool useCaching, - ModelName modelName) -{ - std::unique_ptr snpe; - zdl::SNPE::SNPEBuilder snpeBuilder(container.get()); - - if(runtimeList.empty()) - { - runtimeList.add(runtime); - } - - std::string platformOptionStr = "useAdaptivePD:ON"; -// if (isSignedStatus == UNSIGNED_PD) { - // use unsignedPD feature for untrusted app. - // platformOptionStr += "unsignedPD:ON"; -// } - zdl::DlSystem::PlatformConfig platformConfig; - bool setSuccess = platformConfig.setPlatformOptions(platformOptionStr); - if (!setSuccess) - LOGE("=========> failed to set platformconfig: %s", platformOptionStr.c_str()); - else - LOGI("=========> platformconfig set: %s", platformOptionStr.c_str()); - - bool isValid = platformConfig.isOptionsValid(); - if (!isValid) - LOGE("=========> platformconfig option is invalid"); - else - LOGI("=========> platformconfig option: valid"); - - - zdl::DlSystem::StringList stringruntime = runtimeList.getRuntimeListNames(); - for (const char *name : stringruntime) - LOGI("runtime sh %s", name); - - snpe = snpeBuilder.setOutputLayers({}) - .setPerformanceProfile(zdl::DlSystem::PerformanceProfile_t::BURST) - .setExecutionPriorityHint( - zdl::DlSystem::ExecutionPriorityHint_t::HIGH) - .setRuntimeProcessorOrder(runtimeList) - .setUseUserSuppliedBuffers(useUserSuppliedBuffers) - .setPlatformConfig(platformConfig) - .setInitCacheMode(useCaching) - .setUnconsumedTensorsAsOutputs(true) - .build(); - - return snpe; - -} - -// ==============================User Buffer func=================================== // -// ================================================================================= // - - -//CreateUserbuffer INPUT/OUTPUT for BB -void createUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - const char * name, - const bool isTfNBuffer, - int bitWidth) -{ - - auto bufferAttributesOpt = snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) throw std::runtime_error(std::string("Error obtaining attributes for input tensor ") + name); - - // calculate the size of buffer required by the input tensor - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - - size_t bufferElementSize = 0; - if (isTfNBuffer) { - bufferElementSize = bitWidth / 8; - } - else { - bufferElementSize = sizeof(float); - } - - // Calculate the stride based on buffer strides. - // Note: Strides = Number of bytes to advance to the next element in each dimension. - // For example, if a float tensor of dimension 2x4x3 is tightly packed in a buffer of 96 bytes, then the strides would be (48,12,4) - // Note: Buffer stride is usually known and does not need to be calculated. - - int num_dims = bufferShape.rank(); //bufferShape rank is generally 1 more than expected, as it add 1 for batchSize, so 320x320x3 will look like 1x320x320x3 - std::vector strides(num_dims); - strides[strides.size() - 1] = bufferElementSize; - size_t stride = strides[strides.size() - 1]; - for (size_t i = num_dims - 1; i > 0; i--) { - stride *= bufferShape[i]; - strides[i - 1] = stride; - //LOGI("\nstrides[%d]: %d",i,stride); - //LOGI("\nbuffershape[%d]: %d",i,bufferShape[i]); - } - - size_t bufSize=bufferElementSize; - for(int i=0;i userBufferEncoding; - if (isTfNBuffer) - userBufferEncoding = std::unique_ptr( - new zdl::DlSystem::UserBufferEncodingTfN(0, 1.0, bitWidth)); - else - userBufferEncoding = std::unique_ptr( - new zdl::DlSystem::UserBufferEncodingFloat()); - - // create user-backed storage to load input data onto it - applicationBuffers.emplace(name, std::vector(bufSize)); - - // create SNPE user buffer from the user-backed buffer - zdl::DlSystem::IUserBufferFactory &ubFactory = zdl::SNPE::SNPEFactory::getUserBufferFactory(); - snpeUserBackedBuffers.push_back( - ubFactory.createUserBuffer(applicationBuffers.at(name).data(), - bufSize, - strides, - userBufferEncoding.get())); - if (snpeUserBackedBuffers.back() == nullptr) - throw std::runtime_error(std::string("Error while creating user buffer.")); - - // add the user-backed buffer to the inputMap, which is later on fed to the network for execution - userBufferMap.add(name, snpeUserBackedBuffers.back().get()); - -} - -/* - Cretae OutPut Buffer Map for BB - */ -void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - bool isTfNBuffer, - int bitWidth) -{ - //LOGI("Creating Output Buffer for BB"); - const auto& outputNamesOpt = snpe->getOutputTensorNames(); - if (!outputNamesOpt) throw std::runtime_error("Error obtaining output tensor names"); - const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; - - // create SNPE user buffers for each application storage buffer - for (const char *name : outputNames) { - LOGI("Creating output buffer %s", name); - createUserBuffer(outputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name, isTfNBuffer, bitWidth); - } - -} -/* - * Create Input Buffer Map for BB - */ -void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe, - bool isTfNBuffer, - int bitWidth) { - //LOGI("Creating Input Buffer for BB"); - const auto &inputNamesOpt = snpe->getInputTensorNames(); - if (!inputNamesOpt) throw std::runtime_error("Error obtaining input tensor names"); - const zdl::DlSystem::StringList &inputNames = *inputNamesOpt; - assert(inputNames.size() > 0); - - // create SNPE user buffers for each application storage buffer - for (const char *name: inputNames) { - LOGI("Creating Input Buffer = %s", name); - createUserBuffer(inputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name, - isTfNBuffer, bitWidth); - } -} - -//Preprocessing and loading in application Input Buffer for BB -bool loadInputUserBuffer(std::unordered_map>& applicationBuffers, - std::unique_ptr& snpe, - cv::Mat &img, - zdl::DlSystem::UserBufferMap& inputMap, - int bitWidth, Model *modelobj) { - - // get input tensor names of the network that need to be populated - const auto &inputNamesOpt = snpe->getInputTensorNames(); - if (!inputNamesOpt) throw std::runtime_error("Error obtaining input tensor names"); - const zdl::DlSystem::StringList &inputNames = *inputNamesOpt; - assert(inputNames.size() > 0); - - if (inputNames.size()) LOGI("Preprocessing and loading in application Input Buffer for BB"); - - - for (size_t j = 0; j < inputNames.size(); j++) { - const char *name = inputNames.at(j); - LOGI("Filling %s buffer ", name); - - if(bitWidth == 8 || bitWidth == 16) { - LOGE("bitwidth 8 and 16 are NOT DEFINED"); - return false; - } else { - - std::vector dims; - auto bufferAttributesOpt = snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) throw std::runtime_error(std::string("Error obtaining attributes for input tensor ") + name); - - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - int num_dims = bufferShape.rank(); - for(int i=0;ipreprocess(applicationBuffers.at(name),img, dims); //functions loads data in applicationBuffer - - } - } - return true; -} diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/objectdetectionYoloNas.cpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/objectdetectionYoloNas.cpp deleted file mode 100644 index c53cfa54..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/objectdetectionYoloNas.cpp +++ /dev/null @@ -1,195 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -using namespace cv; -#include -#include -#include -#include -#include - -#include "hpp/inference.h" -#include "hpp/Util.hpp" - -#include "zdl/SNPE/SNPE.hpp" -#include "zdl/SNPE/SNPEFactory.hpp" - -#include "YOLONAS_Model.h" -#include "SSDMobileNetV2_Model.h" -#include "YOLO_X_Model.h" - -Model *modelobj; - -extern "C" JNIEXPORT jstring JNICALL -Java_com_qcom_aistack_1objdetect_SNPEHelper_queryRuntimes( - JNIEnv* env, - jobject /* this */, - jstring native_dir_path) { - const char *cstr = env->GetStringUTFChars(native_dir_path, nullptr); - env->ReleaseStringUTFChars(native_dir_path, cstr); - - std::string runT_Status; - std::string nativeLibPath = std::string(cstr); - -// runT_Status += "\nLibs Path : " + nativeLibPath + "\n"; - - if (!SetAdspLibraryPath(nativeLibPath)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "Failed to set ADSP Library Path\n"); - - runT_Status += "\nFailed to set ADSP Library Path\nTerminating"; - return env->NewStringUTF(runT_Status.c_str()); - } - - // ====================================================================================== // - runT_Status = "Querying Runtimes : \n\n"; - // DSP unsignedPD check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::DSP,zdl::DlSystem::RuntimeCheckOption_t::UNSIGNEDPD_CHECK)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "UnsignedPD DSP runtime : Absent\n"); - runT_Status += "UnsignedPD DSP runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "UnsignedPD DSP runtime : Present\n"); - runT_Status += "UnsignedPD DSP runtime : Present\n"; - } - // DSP signedPD check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::DSP)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "DSP runtime : Absent\n"); - runT_Status += "DSP runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "DSP runtime : Present\n"); - runT_Status += "DSP runtime : Present\n"; - } - // GPU check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::GPU)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "GPU runtime : Absent\n"); - runT_Status += "GPU runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "GPU runtime : Present\n"); - runT_Status += "GPU runtime : Present\n"; - } - // CPU check - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::CPU)) { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "CPU runtime : Absent\n"); - runT_Status += "CPU runtime : Absent\n"; - } - else { - __android_log_print(ANDROID_LOG_INFO, "SNPE ", "CPU runtime : Present\n"); - runT_Status += "CPU runtime : Present\n"; - } - - return env->NewStringUTF(runT_Status.c_str()); -} - - - -//initializing network -extern "C" -JNIEXPORT jstring JNICALL -Java_com_qcom_aistack_1objdetect_SNPEHelper_initSNPE(JNIEnv *env, jobject thiz, jobject asset_manager, jchar runtime, jstring jdlc_name) { - LOGI("Reading SNPE DLC ..."); - std::string result; - - //AAssetManager* mgr = AAssetManager_fromJava(env, asset_manager); - //AAsset* asset_BB = AAssetManager_open(mgr, "Quant_yoloNas_s_320.dlc", AASSET_MODE_UNKNOWN); - - const char *cstr = env->GetStringUTFChars(jdlc_name, 0); - AAssetManager* mgr = AAssetManager_fromJava(env, asset_manager); - AAsset* asset_BB = AAssetManager_open(mgr, cstr, AASSET_MODE_UNKNOWN); - - if(strcmp(cstr,"Quant_yoloNas_s_320.dlc")==0) { - LOGI("Quant_yoloNas_s_320 dlc"); - } - - modelobj= new YOLONAS_Model(); - - //Changing PrePost for different models - if (strcmp(cstr,"ssd_mobilenetV2_without_ABP-NMS_Q.dlc")==0){ - LOGI("ssd_mobilenetV2_without_ABP-NMS_Q dlc"); - modelobj = new SSDMobileNetV2_Model(); - modelobj->msg(); - } - else if(strcmp(cstr,"yolox_x_212_Q.dlc")==0){ - LOGI("YOLO_X dlc"); - modelobj = new YOLO_X_Model(); - modelobj->msg(); - } - - env->ReleaseStringUTFChars(jdlc_name, cstr); - - - if (NULL == asset_BB) { - LOGE("Failed to load ASSET, needed to load DLC\n"); - result = "Failed to load ASSET, needed to load DLC\n"; - return env->NewStringUTF(result.c_str()); - } - - long dlc_size_BB = AAsset_getLength(asset_BB); - LOGI("DLC BB Size = %ld MB\n", dlc_size_BB / (1024*1024)); - result += "DLC BB Size = " + std::to_string(dlc_size_BB); - char* dlc_buffer_BB = (char*) malloc(sizeof(char) * dlc_size_BB); - AAsset_read(asset_BB, dlc_buffer_BB, dlc_size_BB); - - result += "\n\nBuilding Models DLC Network:\n"; - result += build_network_BB(reinterpret_cast(dlc_buffer_BB), dlc_size_BB,runtime, modelobj->model_name); - - return env->NewStringUTF(result.c_str()); -} - - -//inference -extern "C" -JNIEXPORT jint JNICALL -Java_com_qcom_aistack_1objdetect_SNPEHelper_inferSNPE(JNIEnv *env, jobject thiz, jlong inputMat, jint actual_width, jint actual_height, - jobjectArray jboxcoords, jobjectArray objnames) { - - LOGI("infer SNPE S"); - - cv::Mat &img = *(cv::Mat*) inputMat; - std::string bs; - int numberofobj = 0; - std::vector> BB_coords; - std::vector BB_names; - - bool status = executeDLC(img,actual_width, actual_height, numberofobj, BB_coords, BB_names, modelobj); - - if(numberofobj ==0) - { - LOGI("No object detected"); - } - else if (numberofobj == -1){ - LOGE("ERROR in loading model properly"); - return -1; - } - else if(status == false) - { - LOGE("fatal ERROR"); - return 0; - } - else { - //LOGI("number of detected objects: %d",numberofobj); - - for (int z = 0; z < numberofobj; z++){ - jfloatArray boxcoords = (jfloatArray) env->GetObjectArrayElement(jboxcoords, z); - env->SetObjectArrayElement(objnames, z,env->NewStringUTF(BB_names[z].data())); - - - float tempbox[5]; //4 coords and 1 processing time - for(int k=0;k<5;k++) - tempbox[k]=BB_coords[z][k]; - env->SetFloatArrayRegion(boxcoords,0,5,tempbox); - } - //LOGI("executeDLC_returned successfully"); - } - //LOGD("infer SNPE E"); - return numberofobj; - -} \ No newline at end of file diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/ic_launcher-playstore.png b/ai-solutions/android/03-ObjectDetection/app/src/main/ic_launcher-playstore.png deleted file mode 100644 index 58cdfd91..00000000 Binary files a/ai-solutions/android/03-ObjectDetection/app/src/main/ic_launcher-playstore.png and /dev/null differ diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/CameraFragment.java b/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/CameraFragment.java deleted file mode 100644 index 4534ddf7..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/CameraFragment.java +++ /dev/null @@ -1,1131 +0,0 @@ -// -*- mode: java -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -package com.qcom.aistack_objdetect; - -import android.Manifest; -import android.app.ProgressDialog; -import android.content.Context; -import android.content.pm.PackageManager; -import android.graphics.Bitmap; -import android.graphics.Matrix; -import android.graphics.SurfaceTexture; -import android.hardware.camera2.CameraAccessException; -import android.hardware.camera2.CameraCaptureSession; -import android.hardware.camera2.CameraDevice; -import android.hardware.camera2.CameraManager; -import android.hardware.camera2.CaptureFailure; -import android.hardware.camera2.CaptureRequest; -import android.hardware.camera2.CaptureResult; -import android.hardware.camera2.TotalCaptureResult; -import android.os.Bundle; -import android.os.Handler; -import android.os.HandlerThread; -import android.support.annotation.NonNull; -import android.support.v4.app.Fragment; -import android.support.v4.content.ContextCompat; -import android.view.LayoutInflater; -import android.view.Surface; -import android.view.TextureView; -import android.view.View; -import android.view.ViewGroup; - -import java.util.ArrayList; -import java.util.Arrays; - - -import android.app.Activity; -import android.app.AlertDialog; -import android.app.Dialog; -import android.content.DialogInterface; -import android.graphics.ImageFormat; -import android.graphics.Point; -import android.graphics.RectF; -import android.hardware.camera2.CameraCharacteristics; -import android.hardware.camera2.CameraMetadata; -import android.hardware.camera2.params.StreamConfigurationMap; -import android.media.Image; -import android.media.ImageReader; -import android.support.v4.app.DialogFragment; -import android.util.Size; -import android.util.SparseIntArray; -import android.widget.Toast; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; - - - -public class CameraFragment extends Fragment -{ - - /** - * Conversion from screen rotation to JPEG orientation. - */ - private SNPEHelper mSnpeHelper; - public long tic = 0,tic2=0; - private boolean mNetworkLoaded; - - private ProgressDialog dialog_model_error; - - private FragmentRender mFragmentRender; - - public int fps=0,frame_count =-1; - public static char runtime_var; - - public static String dlc_name_var; - - private String mCameraId; - private static final SparseIntArray ORIENTATIONS = new SparseIntArray(); - private static final int REQUEST_CAMERA_PERMISSION = 1; - private static final String FRAGMENT_DIALOG = "dialog"; - - static { - ORIENTATIONS.append(Surface.ROTATION_0, 90); - ORIENTATIONS.append(Surface.ROTATION_90, 0); - ORIENTATIONS.append(Surface.ROTATION_180, 270); - ORIENTATIONS.append(Surface.ROTATION_270, 180); - } - - - /** - * Camera state: Showing camera preview. - */ - private static final int STATE_PREVIEW = 0; - - /** - * Camera state: Waiting for the focus to be locked. - */ - private static final int STATE_WAITING_LOCK = 1; - - /** - * Camera state: Waiting for the exposure to be precapture state. - */ - private static final int STATE_WAITING_PRECAPTURE = 2; - - /** - * Camera state: Waiting for the exposure state to be something other than precapture. - */ - private static final int STATE_WAITING_NON_PRECAPTURE = 3; - - /** - * Camera state: Picture was taken. - */ - private static final int STATE_PICTURE_TAKEN = 4; - - /** - * Max preview width that is guaranteed by Camera2 API - */ - private static final int MAX_PREVIEW_WIDTH = 1920; - - /** - * Max preview height that is guaranteed by Camera2 API - */ - private static final int MAX_PREVIEW_HEIGHT = 1080; - - private TextureView mTextureView; - /** - * {@link TextureView.SurfaceTextureListener} handles several lifecycle events on a - * {@link TextureView}. - - */ - private final TextureView.SurfaceTextureListener mSurfaceTextureListener - = new TextureView.SurfaceTextureListener() { - - @Override - public void onSurfaceTextureAvailable(SurfaceTexture texture, int width, int height) { -// System.out.println("Textureiew widthxheight: "+mTextureView.getWidth()+ mTextureView.getHeight()); - openCamera(width, height); - } - - @Override - public void onSurfaceTextureSizeChanged(SurfaceTexture texture, int width, int height) { - configureTransform(width, height); - } - - @Override - public boolean onSurfaceTextureDestroyed(SurfaceTexture texture) { - return true; - } - - @Override - public void onSurfaceTextureUpdated(SurfaceTexture texture) { - } - - }; - - /** - * A {@link CameraCaptureSession } for camera preview. - */ - private CameraCaptureSession mCaptureSession; - - /** - * A reference to the opened {@link CameraDevice}. - */ - private CameraDevice mCameraDevice; - - /** - * The {@link android.util.Size} of camera preview. - */ - private Size mPreviewSize; - - /** - * {@link CameraDevice.StateCallback} is called when {@link CameraDevice} changes its state. - */ - private final CameraDevice.StateCallback mStateCallback = new CameraDevice.StateCallback() { - - @Override - public void onOpened(@NonNull CameraDevice cameraDevice) { - // This method is called when the camera is opened. We start camera preview here. - mCameraOpenCloseLock.release(); - mCameraDevice = cameraDevice; - createCameraPreviewSession(); - } - - @Override - public void onDisconnected(@NonNull CameraDevice cameraDevice) { - mCameraOpenCloseLock.release(); - cameraDevice.close(); - mCameraDevice = null; - } - - @Override - public void onError(@NonNull CameraDevice cameraDevice, int error) { - mCameraOpenCloseLock.release(); - cameraDevice.close(); - mCameraDevice = null; - Activity activity = getActivity(); - if (null != activity) { - activity.finish(); - } - } - - }; - - public static CameraFragment create(Bundle SavedInstanceState) { - System.out.println("==>CameraFragment"); - final CameraFragment fragment = new CameraFragment(); - runtime_var = SavedInstanceState.getChar("key"); - dlc_name_var = SavedInstanceState.getString("selected_dlc_name"); - System.out.println("CameraFragment class got data runtime_var="+runtime_var); - System.out.println("CameraFragment class got data selected_dlc_name="+dlc_name_var); - return fragment; - } - /** - * An additional thread for running tasks that shouldn't block the UI. - */ - private HandlerThread mBackgroundThread; - - /** - * A {@link Handler} for running tasks in the background. - */ - private Handler mBackgroundHandler; - - /** - * An {@link ImageReader} that handles still image capture. - */ - private ImageReader mImageReader; - - /** - * This is the output file for our picture. - */ - private File mFile; - - /** - * This a callback object for the {@link ImageReader}. "onImageAvailable" will be called when a - * still image is ready to be saved. - */ - private final ImageReader.OnImageAvailableListener mOnImageAvailableListener - = new ImageReader.OnImageAvailableListener() { - - @Override - public void onImageAvailable(ImageReader reader) { - mBackgroundHandler.post(new ImageSaver(reader.acquireNextImage(), mFile)); - } - - }; - - /** - * {@link CaptureRequest.Builder} for the camera preview - */ - private CaptureRequest.Builder mPreviewRequestBuilder; - - /** - * {@link CaptureRequest} generated by {@link #mPreviewRequestBuilder} - */ - private CaptureRequest mPreviewRequest; - - /** - * The current state of camera state for taking pictures. - * - * @see #mCaptureCallback - */ - private int mState = STATE_PREVIEW; - - /** - * A {@link Semaphore} to prevent the app from exiting before closing the camera. - */ - private Semaphore mCameraOpenCloseLock = new Semaphore(1); - - /** - * Whether the current camera device supports Flash or not. - */ - private boolean mFlashSupported; - - /** - * Orientation of the camera sensor - */ - private int mSensorOrientation; - /** - * A {@link CameraCaptureSession.CaptureCallback} that handles events related to JPEG capture. - */ - private CameraCaptureSession.CaptureCallback mCaptureCallback - = new CameraCaptureSession.CaptureCallback() { - - private void process(CaptureResult result) { - switch (mState) { - case STATE_PREVIEW: { - // We have nothing to do when the camera preview is working normally. - break; - } - case STATE_WAITING_LOCK: { - Integer afState = result.get(CaptureResult.CONTROL_AF_STATE); - if (afState == null) { - captureStillPicture(); - } else if (CaptureResult.CONTROL_AF_STATE_FOCUSED_LOCKED == afState || - CaptureResult.CONTROL_AF_STATE_NOT_FOCUSED_LOCKED == afState) { - // CONTROL_AE_STATE can be null on some devices - Integer aeState = result.get(CaptureResult.CONTROL_AE_STATE); - if (aeState == null || - aeState == CaptureResult.CONTROL_AE_STATE_CONVERGED) { - mState = STATE_PICTURE_TAKEN; - captureStillPicture(); - } else { - runPrecaptureSequence(); - } - } - break; - } - case STATE_WAITING_PRECAPTURE: { - // CONTROL_AE_STATE can be null on some devices - Integer aeState = result.get(CaptureResult.CONTROL_AE_STATE); - if (aeState == null || - aeState == CaptureResult.CONTROL_AE_STATE_PRECAPTURE || - aeState == CaptureRequest.CONTROL_AE_STATE_FLASH_REQUIRED) { - mState = STATE_WAITING_NON_PRECAPTURE; - } - break; - } - case STATE_WAITING_NON_PRECAPTURE: { - // CONTROL_AE_STATE can be null on some devices - Integer aeState = result.get(CaptureResult.CONTROL_AE_STATE); - if (aeState == null || aeState != CaptureResult.CONTROL_AE_STATE_PRECAPTURE) { - mState = STATE_PICTURE_TAKEN; - captureStillPicture(); - } - break; - } - } - } - - @Override - public void onCaptureProgressed(@NonNull CameraCaptureSession session, - @NonNull CaptureRequest request, - @NonNull CaptureResult partialResult) { - process(partialResult); - } - - @Override - public void onCaptureCompleted(@NonNull CameraCaptureSession session, - @NonNull CaptureRequest request, - @NonNull TotalCaptureResult result) { - process(result); - } - - }; - - - /** - * Shows a {@link Toast} on the UI thread. - * - * @param text The message to show - */ - private void showToast(final String text) { - final Activity activity = getActivity(); - if (activity != null) { - activity.runOnUiThread(new Runnable() { - @Override - public void run() { - Toast.makeText(activity, text, Toast.LENGTH_SHORT).show(); - } - }); - } - } - - /** - * Given {@code choices} of {@code Size}s supported by a camera, choose the smallest one that - * is at least as large as the respective texture view size, and that is at most as large as the - * respective max size, and whose aspect ratio matches with the specified value. If such size - * doesn't exist, choose the largest one that is at most as large as the respective max size, - * and whose aspect ratio matches with the specified value. - * - * @param choices The list of sizes that the camera supports for the intended output - * class - * @param textureViewWidth The width of the texture view relative to sensor coordinate - * @param textureViewHeight The height of the texture view relative to sensor coordinate - * @param maxWidth The maximum width that can be chosen - * @param maxHeight The maximum height that can be chosen - * @param aspectRatio The aspect ratio - * @return The optimal {@code Size}, or an arbitrary one if none were big enough - */ - private static Size chooseOptimalSize(Size[] choices, int textureViewWidth, - int textureViewHeight, int maxWidth, int maxHeight, Size aspectRatio) { - - // Collect the supported resolutions that are at least as big as the preview Surface - List bigEnough = new ArrayList<>(); - // Collect the supported resolutions that are smaller than the preview Surface - List notBigEnough = new ArrayList<>(); - int w = aspectRatio.getWidth(); - int h = aspectRatio.getHeight(); - for (Size option : choices) { - if (option.getWidth() <= maxWidth && option.getHeight() <= maxHeight && - option.getHeight() == option.getWidth() * h / w) { - if (option.getWidth() >= textureViewWidth && - option.getHeight() >= textureViewHeight) { - bigEnough.add(option); - } else { - notBigEnough.add(option); - } - } - } - - // Pick the smallest of those big enough. If there is no one big enough, pick the - // largest of those not big enough. - if (bigEnough.size() > 0) { - return Collections.min(bigEnough, new CompareSizesByArea()); - } else if (notBigEnough.size() > 0) { - return Collections.max(notBigEnough, new CompareSizesByArea()); - } else { - return choices[0]; - } - } - - public static CameraFragment newInstance() { - return new CameraFragment(); - } - - @Override - public View onCreateView(LayoutInflater inflater, ViewGroup container, - Bundle savedInstanceState) { - return inflater.inflate(R.layout.fragment_camera, container, false); - } - - @Override - public void onViewCreated(final View view, Bundle savedInstanceState) { - super.onViewCreated(view, savedInstanceState); - mTextureView = view.findViewById(R.id.surface); - mTextureView.setSurfaceTextureListener(mSurfaceTextureListener); - mFragmentRender = view.findViewById(R.id.fragmentRender); - - } - - @Override - public void onResume() { - super.onResume(); - - startBackgroundThread(); - - // When the screen is turned off and turned back on, the SurfaceTexture is already - // available, and "onSurfaceTextureAvailable" will not be called. In that case, we can open - // a camera and start preview from here (otherwise, we wait until the surface is ready in - // the SurfaceTextureListener). - if (mTextureView.isAvailable()) { - openCamera(mTextureView.getWidth(), mTextureView.getHeight()); - } else { - mTextureView.setSurfaceTextureListener(mSurfaceTextureListener); - } - - ensureNetCreated(); - - } - - @Override - public void onPause() { - closeCamera(); - stopBackgroundThread(); - super.onPause(); - } - - @Override - public void onDestroy() { - stopBackgroundThread(); - closeCamera(); - super.onDestroy(); - } - - private void requestCameraPermission() { - if (shouldShowRequestPermissionRationale(Manifest.permission.CAMERA)) { - new ConfirmationDialog().show(getChildFragmentManager(), FRAGMENT_DIALOG); - } else { - requestPermissions(new String[]{Manifest.permission.CAMERA}, REQUEST_CAMERA_PERMISSION); - } - } - - @Override - public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, - @NonNull int[] grantResults) { - if (requestCode == REQUEST_CAMERA_PERMISSION) { - if (grantResults.length != 1 || grantResults[0] != PackageManager.PERMISSION_GRANTED) { - ErrorDialog.newInstance(getString(R.string.request_permission)) - .show(getChildFragmentManager(), FRAGMENT_DIALOG); - } - } else { - super.onRequestPermissionsResult(requestCode, permissions, grantResults); - } - } - - - /** - * Sets up member variables related to camera. - * - * @param width The width of available size for camera preview - * @param height The height of available size for camera preview - */ - @SuppressWarnings("SuspiciousNameCombination") - private void setUpCameraOutputs(int width, int height) { - Activity activity = getActivity(); - CameraManager mCameraManager = (CameraManager) activity.getSystemService(Context.CAMERA_SERVICE); - try { - for (String cameraId : mCameraManager.getCameraIdList()) { - CameraCharacteristics characteristics - = mCameraManager.getCameraCharacteristics(cameraId); - - // We don't use a front facing camera in this sample. - Integer facing = characteristics.get(CameraCharacteristics.LENS_FACING); - if (facing != null && facing == CameraCharacteristics.LENS_FACING_FRONT) { - continue; - } - - StreamConfigurationMap map = characteristics.get( - CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP); - if (map == null) { - continue; - } - - // For still image captures, we use the largest available size. - Size largest = Collections.max( - Arrays.asList(map.getOutputSizes(ImageFormat.JPEG)), - new CompareSizesByArea()); - mImageReader = ImageReader.newInstance(largest.getWidth(), largest.getHeight(), - ImageFormat.JPEG, /*maxImages*/2); - mImageReader.setOnImageAvailableListener( - mOnImageAvailableListener, mBackgroundHandler); - - // Find out if we need to swap dimension to get the preview size relative to sensor - // coordinate. - int displayRotation = activity.getWindowManager().getDefaultDisplay().getRotation(); - //noinspection ConstantConditions - mSensorOrientation = characteristics.get(CameraCharacteristics.SENSOR_ORIENTATION); - boolean swappedDimensions = false; - switch (displayRotation) { - case Surface.ROTATION_0: - case Surface.ROTATION_180: - if (mSensorOrientation == 90 || mSensorOrientation == 270) { - swappedDimensions = true; - } - break; - case Surface.ROTATION_90: - case Surface.ROTATION_270: - if (mSensorOrientation == 0 || mSensorOrientation == 180) { - swappedDimensions = true; - } - break; - default: - System.out.println("Display rotation is invalid: " + displayRotation); - } - - Point displaySize = new Point(); - activity.getWindowManager().getDefaultDisplay().getSize(displaySize); - int rotatedPreviewWidth = width; - int rotatedPreviewHeight = height; - int maxPreviewWidth = displaySize.x; - int maxPreviewHeight = displaySize.y; - - if (swappedDimensions) { - rotatedPreviewWidth = height; - rotatedPreviewHeight = width; - maxPreviewWidth = displaySize.y; - maxPreviewHeight = displaySize.x; - } - - if (maxPreviewWidth > MAX_PREVIEW_WIDTH) { - maxPreviewWidth = MAX_PREVIEW_WIDTH; - } - - if (maxPreviewHeight > MAX_PREVIEW_HEIGHT) { - maxPreviewHeight = MAX_PREVIEW_HEIGHT; - } - - // Danger, W.R.! Attempting to use too large a preview size could exceed the camera - // bus' bandwidth limitation, resulting in gorgeous previews but the storage of - // garbage capture data. - mPreviewSize = chooseOptimalSize(map.getOutputSizes(SurfaceTexture.class), - rotatedPreviewWidth, rotatedPreviewHeight, maxPreviewWidth, - maxPreviewHeight, largest); - - // Check if the flash is supported. - Boolean available = characteristics.get(CameraCharacteristics.FLASH_INFO_AVAILABLE); - mFlashSupported = available == null ? false : available; - - mCameraId = cameraId; - return; - } - } catch (CameraAccessException e) { - e.printStackTrace(); - } catch (NullPointerException e) { - // Currently an NPE is thrown when the Camera2API is used but not supported on the - // device this code runs. - ErrorDialog.newInstance(getString(R.string.camera_error)) - .show(getChildFragmentManager(), FRAGMENT_DIALOG); - } - } - - /** - * Opens the camera specified by {@link CameraFragment#mCameraId}. - */ - private void openCamera(int width, int height) { - if (ContextCompat.checkSelfPermission(getActivity(), Manifest.permission.CAMERA) - != PackageManager.PERMISSION_GRANTED) { - requestCameraPermission(); - return; - } - setUpCameraOutputs(width, height); - configureTransform(width, height); - Activity activity = getActivity(); - CameraManager manager = (CameraManager) activity.getSystemService(Context.CAMERA_SERVICE); - try { - if (!mCameraOpenCloseLock.tryAcquire(2500, TimeUnit.MILLISECONDS)) { - throw new RuntimeException("Time out waiting to lock camera opening."); - } - manager.openCamera(mCameraId, mStateCallback, mBackgroundHandler); - } catch (CameraAccessException e) { - e.printStackTrace(); - } catch (InterruptedException e) { - throw new RuntimeException("Interrupted while trying to lock camera opening.", e); - } - } - - /** - * Closes the current {@link CameraDevice}. - */ - private void closeCamera() { - try { - mCameraOpenCloseLock.acquire(); - if (null != mCaptureSession) { - mCaptureSession.close(); - mCaptureSession = null; - } - if (null != mCameraDevice) { - mCameraDevice.close(); - mCameraDevice = null; - } - if (null != mImageReader) { - mImageReader.close(); - mImageReader = null; - } - } catch (InterruptedException e) { - throw new RuntimeException("Interrupted while trying to lock camera closing.", e); - } finally { - mCameraOpenCloseLock.release(); - } - } - - /** - * Starts a background thread and its {@link Handler}. - */ - private void startBackgroundThread() { - mBackgroundThread = new HandlerThread("CameraBackground"); - mBackgroundThread.start(); - mBackgroundHandler = new Handler(mBackgroundThread.getLooper()); - } - - /** - * Stops the background thread and its {@link Handler}. - */ - private void stopBackgroundThread() { - - if (mBackgroundThread!=null) { - - mBackgroundThread.quitSafely(); - - mBackgroundThread = null; - mBackgroundHandler = null; - - } - } - - /** - * Creates a new {@link CameraCaptureSession} for camera preview. - */ - private void createCameraPreviewSession() { - try { - SurfaceTexture texture = mTextureView.getSurfaceTexture(); - assert texture != null; - - // We configure the size of default buffer to be the size of camera preview we want. - texture.setDefaultBufferSize(mPreviewSize.getWidth(), mPreviewSize.getHeight()); - - // This is the output Surface we need to start preview. - Surface surface = new Surface(texture); - - // We set up a CaptureRequest.Builder with the output Surface. - mPreviewRequestBuilder - = mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW); - mPreviewRequestBuilder.addTarget(surface); - - //Shubham - try { - mCameraDevice.createCaptureSession(Arrays.asList(surface), new CameraCapture(), - null); - } catch (CameraAccessException e) { - e.printStackTrace(); - } - } catch (CameraAccessException e) { - e.printStackTrace(); - } - } - - /** - * Configures the necessary {@link android.graphics.Matrix} transformation to `mTextureView`. - * This method should be called after the camera preview size is determined in - * setUpCameraOutputs and also the size of `mTextureView` is fixed. - * - * @param viewWidth The width of `mTextureView` - * @param viewHeight The height of `mTextureView` - */ - private void configureTransform(int viewWidth, int viewHeight) { - Activity activity = getActivity(); - if (null == mTextureView || null == mPreviewSize || null == activity) { - return; - } - int rotation = activity.getWindowManager().getDefaultDisplay().getRotation(); - Matrix matrix = new Matrix(); - RectF viewRect = new RectF(0, 0, viewWidth, viewHeight); - RectF bufferRect = new RectF(0, 0, mPreviewSize.getHeight(), mPreviewSize.getWidth()); - float centerX = viewRect.centerX(); - float centerY = viewRect.centerY(); - if (Surface.ROTATION_90 == rotation || Surface.ROTATION_270 == rotation) { - bufferRect.offset(centerX - bufferRect.centerX(), centerY - bufferRect.centerY()); - matrix.setRectToRect(viewRect, bufferRect, Matrix.ScaleToFit.FILL); - float scale = Math.max( - (float) viewHeight / mPreviewSize.getHeight(), - (float) viewWidth / mPreviewSize.getWidth()); - matrix.postScale(scale, scale, centerX, centerY); - matrix.postRotate(90 * (rotation - 2), centerX, centerY); - } else if (Surface.ROTATION_180 == rotation) { - matrix.postRotate(180, centerX, centerY); - } - mTextureView.setTransform(matrix); - } - - /** - * Lock the focus as the first step for a still image capture. - */ - private void lockFocus() { - try { - // This is how to tell the camera to lock focus. - mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_TRIGGER, - CameraMetadata.CONTROL_AF_TRIGGER_START); - // Tell #mCaptureCallback to wait for the lock. - mState = STATE_WAITING_LOCK; - mCaptureSession.capture(mPreviewRequestBuilder.build(), mCaptureCallback, - mBackgroundHandler); - } catch (CameraAccessException e) { - e.printStackTrace(); - } - } - - /** - * Run the precapture sequence for capturing a still image. This method should be called when - * we get a response in {@link #mCaptureCallback} from {@link #lockFocus()}. - */ - private void runPrecaptureSequence() { - try { - // This is how to tell the camera to trigger. - mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AE_PRECAPTURE_TRIGGER, - CaptureRequest.CONTROL_AE_PRECAPTURE_TRIGGER_START); - // Tell #mCaptureCallback to wait for the precapture sequence to be set. - mState = STATE_WAITING_PRECAPTURE; - mCaptureSession.capture(mPreviewRequestBuilder.build(), mCaptureCallback, - mBackgroundHandler); - } catch (CameraAccessException e) { - e.printStackTrace(); - } - } - - /** - * Capture a still picture. This method should be called when we get a response in - * {@link #mCaptureCallback} from both {@link #lockFocus()}. - */ - private void captureStillPicture() { - try { - final Activity activity = getActivity(); - if (null == activity || null == mCameraDevice) { - return; - } - // This is the CaptureRequest.Builder that we use to take a picture. - final CaptureRequest.Builder captureBuilder = - mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_STILL_CAPTURE); - captureBuilder.addTarget(mImageReader.getSurface()); - - // Use the same AE and AF modes as the preview. - captureBuilder.set(CaptureRequest.CONTROL_AF_MODE, - CaptureRequest.CONTROL_AF_MODE_CONTINUOUS_PICTURE); - setAutoFlash(captureBuilder); - - // Orientation - int rotation = activity.getWindowManager().getDefaultDisplay().getRotation(); - captureBuilder.set(CaptureRequest.JPEG_ORIENTATION, getOrientation(rotation)); - - CameraCaptureSession.CaptureCallback CaptureCallback - = new CameraCaptureSession.CaptureCallback() { - - @Override - public void onCaptureCompleted(@NonNull CameraCaptureSession session, - @NonNull CaptureRequest request, - @NonNull TotalCaptureResult result) { - showToast("Saved: " + mFile); - unlockFocus(); - } - }; - - mCaptureSession.stopRepeating(); - mCaptureSession.abortCaptures(); - mCaptureSession.capture(captureBuilder.build(), CaptureCallback, null); - } catch (CameraAccessException e) { - e.printStackTrace(); - } - } - - /** - * Retrieves the JPEG orientation from the specified screen rotation. - * - * @param rotation The screen rotation. - * @return The JPEG orientation (one of 0, 90, 270, and 360) - */ - private int getOrientation(int rotation) { - // Sensor orientation is 90 for most devices, or 270 for some devices (eg. Nexus 5X) - // We have to take that into account and rotate JPEG properly. - // For devices with orientation of 90, we simply return our mapping from ORIENTATIONS. - // For devices with orientation of 270, we need to rotate the JPEG 180 degrees. - return (ORIENTATIONS.get(rotation) + mSensorOrientation + 270) % 360; - } - - /** - * Unlock the focus. This method should be called when still image capture sequence is - * finished. - */ - private void unlockFocus() { - try { - // Reset the auto-focus trigger - mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_TRIGGER, - CameraMetadata.CONTROL_AF_TRIGGER_CANCEL); - setAutoFlash(mPreviewRequestBuilder); - mCaptureSession.capture(mPreviewRequestBuilder.build(), mCaptureCallback, - mBackgroundHandler); - // After this, the camera will go back to the normal state of preview. - mState = STATE_PREVIEW; - mCaptureSession.setRepeatingRequest(mPreviewRequest, mCaptureCallback, - mBackgroundHandler); - } catch (CameraAccessException e) { - e.printStackTrace(); - } - } - - private void setAutoFlash(CaptureRequest.Builder requestBuilder) { - if (mFlashSupported) { - requestBuilder.set(CaptureRequest.CONTROL_AE_MODE, - CaptureRequest.CONTROL_AE_MODE_ON_AUTO_FLASH); - } - } - - /** - * Saves a JPEG {@link Image} into the specified {@link File}. - */ - private static class ImageSaver implements Runnable { - - /** - * The JPEG image - */ - private final Image mImage; - /** - * The file we save the image into. - */ - private final File mFile; - - ImageSaver(Image image, File file) { - mImage = image; - mFile = file; - } - - @Override - public void run() { - ByteBuffer buffer = mImage.getPlanes()[0].getBuffer(); - byte[] bytes = new byte[buffer.remaining()]; - buffer.get(bytes); - FileOutputStream output = null; - try { - output = new FileOutputStream(mFile); - output.write(bytes); - } catch (IOException e) { - e.printStackTrace(); - } finally { - mImage.close(); - if (null != output) { - try { - output.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } - } - } - - } - - /** - * Compares two {@code Size}s based on their areas. - */ - static class CompareSizesByArea implements Comparator { - - @Override - public int compare(Size lhs, Size rhs) { - // We cast here to ensure the multiplications won't overflow - return Long.signum((long) lhs.getWidth() * lhs.getHeight() - - (long) rhs.getWidth() * rhs.getHeight()); - } - - } - - /** - * Shows an error message dialog. - */ - public static class ErrorDialog extends DialogFragment { - - private static final String ARG_MESSAGE = "message"; - - public static ErrorDialog newInstance(String message) { - ErrorDialog dialog = new ErrorDialog(); - Bundle args = new Bundle(); - args.putString(ARG_MESSAGE, message); - dialog.setArguments(args); - return dialog; - } - - @NonNull - @Override - public Dialog onCreateDialog(Bundle savedInstanceState) { - final Activity activity = getActivity(); - return new AlertDialog.Builder(activity) - .setMessage(getArguments().getString(ARG_MESSAGE)) - .setPositiveButton(android.R.string.ok, new DialogInterface.OnClickListener() { - @Override - public void onClick(DialogInterface dialogInterface, int i) { - activity.finish(); - } - }) - .create(); - } - - } - - /** - * Shows OK/Cancel confirmation dialog about camera permission. - */ - public static class ConfirmationDialog extends DialogFragment { - - @NonNull - @Override - public Dialog onCreateDialog(Bundle savedInstanceState) { - final Fragment parent = getParentFragment(); - return new AlertDialog.Builder(getActivity()) - .setMessage(R.string.request_permission) - .setPositiveButton(android.R.string.ok, new DialogInterface.OnClickListener() { - @Override - public void onClick(DialogInterface dialog, int which) { - parent.requestPermissions(new String[]{Manifest.permission.CAMERA}, - REQUEST_CAMERA_PERMISSION); - } - }) - .setNegativeButton(android.R.string.cancel, - new DialogInterface.OnClickListener() { - @Override - public void onClick(DialogInterface dialog, int which) { - Activity activity = parent.getActivity(); - if (activity != null) { - activity.finish(); - } - } - }) - .create(); - } - } - - public class CameraCapture extends android.hardware.camera2.CameraCaptureSession.StateCallback { - @Override - public void onConfigured(@NonNull android.hardware.camera2.CameraCaptureSession - cameraCaptureSession) { - mCaptureSession = cameraCaptureSession; - - try { - // Auto focus should be continuous for camera preview. - mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_MODE, - CaptureRequest.CONTROL_AF_MODE_CONTINUOUS_PICTURE); - // Flash is automatically enabled when necessary. - setAutoFlash(mPreviewRequestBuilder); - // Finally, we start displaying the camera preview. - mPreviewRequest = mPreviewRequestBuilder.build(); - cameraCaptureSession.setRepeatingRequest(mPreviewRequestBuilder.build(), new CameraSession(), - mBackgroundHandler); - } catch (Exception e) { - e.printStackTrace(); - } - } - - @Override - public void onConfigureFailed(@NonNull android.hardware.camera2.CameraCaptureSession - cameraCaptureSession) { - } - - } - - private class CameraSession extends android.hardware.camera2.CameraCaptureSession.CaptureCallback { - - @Override - public void onCaptureCompleted(@NonNull CameraCaptureSession session, @NonNull - CaptureRequest request, @NonNull TotalCaptureResult result) { - - super.onCaptureCompleted(session, request, result); -// int rotation = getActivity().getWindowManager().getDefaultDisplay().getRotation(); - frame_count+=1; - - try { - if (frame_count == 0) { - tic = System.currentTimeMillis(); - } else { - tic2 = System.currentTimeMillis(); - fps = (int) (1000 / (tic2 - tic)); - tic = System.currentTimeMillis(); - } - } - catch (Exception e) { - e.printStackTrace(); - } - - System.out.println("mNetworkLoaded: "+mNetworkLoaded +" runtime_var: "+runtime_var+" dlc_name_var: "+dlc_name_var); - - if (mNetworkLoaded == true ) { - - Bitmap mBitmap = mTextureView.getBitmap(mTextureView.getWidth(),mTextureView.getHeight()); - - ArrayList BBlist = new ArrayList<>(); - System.out.println("calling inference"); - - int infer_result = mSnpeHelper.snpeInference(mBitmap, fps, BBlist); - - //sanjeev - added temp dialogue info box to graceful exit if model not loaded properly - if (infer_result == -1) - { - if (dialog_model_error==null) { - - dialog_model_error = new ProgressDialog(getActivity()); - - getActivity().runOnUiThread(new Runnable() { - - @Override - public void run() { - - if (dialog_model_error!=null) { - try { - dialog_model_error.setMessage(getString(R.string.model_loading_error)); - dialog_model_error.show(); - }catch (Exception e){ - e.printStackTrace(); - Toast.makeText(getContext(),getString(R.string.model_loading_error), Toast.LENGTH_SHORT).show(); - } - } - } - }); - - } - - } - - mFragmentRender.setCoordsList(BBlist); - } - } - - @Override - public void onCaptureFailed(@NonNull CameraCaptureSession session, @NonNull - CaptureRequest request, @NonNull CaptureFailure failure) { - super.onCaptureFailed(session, request, failure); - } - - @Override - public void onCaptureProgressed(@NonNull CameraCaptureSession session, @NonNull - CaptureRequest request, @NonNull CaptureResult partialResult) { - super.onCaptureProgressed(session, request, partialResult); - } - - @Override - public void onCaptureStarted(@NonNull CameraCaptureSession session, @NonNull - CaptureRequest request, long timestamp, long frameNumber) { - super.onCaptureStarted(session, request, timestamp, frameNumber); - - } - - } - - /** - * Method to ensure if neural network is loaded - * - * @return - */ - private boolean ensureNetCreated() { - if (mSnpeHelper == null) { - // load the neural network for object detection with SNPE - mSnpeHelper = new SNPEHelper(getActivity().getApplication()); - - // Show UI dialog till the model is not loaded yet. - ProgressDialog dialog=new ProgressDialog(getActivity()); - dialog.setMessage("Loading Model.."); - dialog.show(); - - //TODO for time being disabling - new Thread() { - public void run() { - mNetworkLoaded = mSnpeHelper.loadingMODELS(runtime_var, dlc_name_var); - //dismiss dialog when the model is loaded - dialog.dismiss(); - } - }.start() ; - - } - return mNetworkLoaded; - } - -} diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/FragmentRender.java b/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/FragmentRender.java deleted file mode 100644 index a353b005..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/FragmentRender.java +++ /dev/null @@ -1,104 +0,0 @@ -// -*- mode: java -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -package com.qcom.aistack_objdetect; - -import android.content.Context; -import android.graphics.Canvas; -import android.graphics.Color; -import android.graphics.Paint; -import android.graphics.Typeface; -import android.support.annotation.Nullable; -import android.util.AttributeSet; -import android.view.View; - - -import java.util.ArrayList; -import java.util.concurrent.locks.ReentrantLock; - -/** - * FragmentRender class is utility for making boxes on camera frames. - * FragmentRender has utility in fragment_camera.xml and CameraFragment Class - */ -public class FragmentRender extends View { - - private ReentrantLock mLock = new ReentrantLock(); - private ArrayList boxlist = new ArrayList<>(); - - private Paint mTextColor= new Paint(); - private Paint mBorderColor= new Paint(); - - public FragmentRender(Context context, @Nullable AttributeSet attrs) { - super(context, attrs); - init(); - } - - - public void setCoordsList(ArrayList t_boxlist) { - mLock.lock(); - postInvalidate(); - - if (boxlist==null) - { - mLock.unlock(); - return; - } - boxlist.clear(); - for(int j=0;j parent) { - System.out.println("Nothing"); - } - }); - - } - - public void startManinCameraActivity(View v) - { - Intent i =new Intent(this, MainActivity.class); - - Bundle args = new Bundle(); - args.putChar("key", runtime_var); - args.putCharSequence("selected_dlc_name", dlc_name); - i.putExtras(args); - - startActivity(i); - } - -} diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/MainActivity.java b/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/MainActivity.java deleted file mode 100644 index ee572868..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/MainActivity.java +++ /dev/null @@ -1,138 +0,0 @@ -// -*- mode: java -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -package com.qcom.aistack_objdetect; - -import android.Manifest; -import android.content.Intent; -import android.content.pm.PackageManager; -import android.os.Build; -import android.support.v4.app.FragmentTransaction; -import android.support.v7.app.AppCompatActivity; -import android.os.Bundle; -import android.view.WindowManager; -import android.widget.TextView; -import org.opencv.android.OpenCVLoader; -import java.util.HashMap; -import java.util.Map; - -/** - * MainActivity class displays the info of Selected Model,Runtime, classes supported by the model on UI through main_activity.xml - * It also passes the selected model and runtime info to the CameraFragment for making inference using selected Model and Runtime. - */ -public class MainActivity extends AppCompatActivity { - - static final Map model_name=new HashMap(); - static Map runtime_name=new HashMap(); - static Map class_count=new HashMap(); - - static final String[] modeloptions = {"YOLONAS", "SSDMobilenetV2", "YoloX"}; - static final String[] modeldlcname = {"Quant_yoloNas_s_320.dlc", "ssd_mobilenetV2_without_ABP-NMS_Q.dlc", "yolox_x_212_Q.dlc"}; - - static final char[] runtimeoptions = {'C', 'G', 'D'}; - static final String[] runtimename = {"CPU", "GPU", "DSP"}; - - static final String[] classcount = {"80", "21", "80"}; - - static { - //System.loadLibrary("objectdetectionYoloNas"); - - for (int i=0;i= Build.VERSION_CODES.M) { - passToFragment = MainActivity.this.checkSelfPermission(Manifest.permission.CAMERA) == PackageManager.PERMISSION_GRANTED; - } - else{ - passToFragment = true; - } - if (passToFragment) { - FragmentTransaction transaction = getSupportFragmentManager().beginTransaction(); - Bundle args = new Bundle(); - args.putChar("key", runtime_value); - args.putCharSequence("selected_dlc_name", selected_dlc_name); - transaction.add(R.id.main_content, CameraFragment.create(args)); - transaction.commit(); - } else { - cameraPermission(); - } - } - - @Override - protected void onResume() { - super.onResume(); - overToCamera(runtime_var, dlc_name); - } - - @Override - protected void onStop() { - super.onStop(); - } -} \ No newline at end of file diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/RectangleBox.java b/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/RectangleBox.java deleted file mode 100644 index c0dcb7d0..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/RectangleBox.java +++ /dev/null @@ -1,36 +0,0 @@ -// -*- mode: java -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -package com.qcom.aistack_objdetect; - -import java.util.ArrayList; -/** - * RectangleBox class defines the property associated with each box like coordinates - * labels, confidence etc. - * Can also create copy of boxes. - */ -public class RectangleBox { - - public float top; - public float bottom; - public float left; - public float right; - - public int fps; - public String processing_time; - public String label; - public static ArrayList createBoxes(int num) { - final ArrayList boxes; - boxes = new ArrayList<>(); - for (int i = 0; i < num; ++i) { - boxes.add(new RectangleBox()); - } - return boxes; - } -} diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/SNPEHelper.java b/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/SNPEHelper.java deleted file mode 100644 index fa16fa86..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/java/com/qcom/aistack_objdetect/SNPEHelper.java +++ /dev/null @@ -1,104 +0,0 @@ -// -*- mode: java -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -package com.qcom.aistack_objdetect; - -import android.app.Application; -import android.content.res.AssetManager; -import android.graphics.Bitmap; -import android.util.Log; -import org.opencv.android.Utils; -import org.opencv.core.Mat; -import java.util.ArrayList; - -public class SNPEHelper { - private final Application mApplication; - private AssetManager assetManager; - - // Constructor - public SNPEHelper(Application application) { - mApplication = application; - } - - //Native functions - public native String queryRuntimes(String a); - public native String initSNPE(AssetManager assetManager, char a, String dlc_name); - public native int inferSNPE(long inputmataddress, int width,int height, float[][]boxcoords, String[] classname); - - - /** - * This method loads ML models on selected runtime - */ - public boolean loadingMODELS(char runtime_var, String dlc_name) { - - assetManager = mApplication.getAssets(); - String nativeDirPath = mApplication.getApplicationInfo().nativeLibraryDir; - String res_query = queryRuntimes(nativeDirPath); - System.out.println(res_query); - String tt = initSNPE(assetManager, runtime_var, dlc_name); - System.out.println("RESULT:"+tt); - - int success_count = tt.split("success", -1).length -1; - - if(success_count==1) - { - System.out.println("Model built successfully"); - return true; - } - - return false; - } - - /* - This method makes inference on bitmap. - */ - public int snpeInference(Bitmap modelInputBitmap, int fps, ArrayList BBlist) { - - int result=0; - - try{ - - Mat inputMat = new Mat(); - Utils.bitmapToMat(modelInputBitmap, inputMat); - - float[][] boxCoords = new float[100][5]; //Stores box coords for all person, MAXLIMIT is 100, last coords i.e. boxCoords[k][4] stores confidence value <-- IMP - String[] boxnames = new String[100]; - - //System.out.println("call inferSNPE input_Width="+modelInputBitmap.getWidth()+" input_Height="+modelInputBitmap.getHeight()); - - int numhuman = inferSNPE(inputMat.getNativeObjAddr(), modelInputBitmap.getWidth(), modelInputBitmap.getHeight(), boxCoords,boxnames); - - if (numhuman == -1) - { - Log.e("SNPEHelper", "Error loading model properly. Return error.."); - return -1; - } - - for(int k=0;k - - - - - - - - - - diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/res/drawable/ic_launcher_background.xml b/ai-solutions/android/03-ObjectDetection/app/src/main/res/drawable/ic_launcher_background.xml deleted file mode 100644 index 50ae786e..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/res/drawable/ic_launcher_background.xml +++ /dev/null @@ -1,74 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/res/layout/activity_home_screen.xml b/ai-solutions/android/03-ObjectDetection/app/src/main/res/layout/activity_home_screen.xml deleted file mode 100644 index 14e99e71..00000000 --- a/ai-solutions/android/03-ObjectDetection/app/src/main/res/layout/activity_home_screen.xml +++ /dev/null @@ -1,78 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/ai-solutions/ubuntu/electron-gui/electron_app_ui/index_sr.html b/ai-solutions/ubuntu/electron-gui/electron_app_ui/index_sr.html deleted file mode 100644 index 0b8c532a..00000000 --- a/ai-solutions/ubuntu/electron-gui/electron_app_ui/index_sr.html +++ /dev/null @@ -1,509 +0,0 @@ - - - - - AI Solutions - - - - - -
- - - -
- -

Super Resolution

-
Input Dims : 128 x 128
-
Output Dims : ----
-
-
-
- - - - - - -
- - - - - - - - \ No newline at end of file diff --git a/ai-solutions/ubuntu/electron-gui/electron_app_ui/main.js b/ai-solutions/ubuntu/electron-gui/electron_app_ui/main.js deleted file mode 100644 index 36e9debb..00000000 --- a/ai-solutions/ubuntu/electron-gui/electron_app_ui/main.js +++ /dev/null @@ -1,158 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// Modules to control application life and create native browser window -const { app, BrowserWindow } = require('electron') -const path = require('path') -var processes = []; //global list to hold PID(s) - -//Module to kill child processes -const killSubprocesses = (main_pid) => { - let cleanup_completed = false; - const psTree = require("ps-tree"); - console.log("killSubprocesses: "); - psTree(main_pid, function (err, children) { - let child_pids_array = [main_pid].concat(children.map(function (p){ - console.log("PID: ",p.PID); - return p.PID})); - child_pids_array.forEach(function (pid) { - console.log("Killing PIDS: ", pid); - process.kill(pid); - }); - cleanup_completed= true; - }); - return new Promise(function (resolve, reject) { - (function waitForSubProcessCleanup() { - if (cleanup_completed) return resolve(); - setTimeout(waitForSubProcessCleanup, 30); - })(); - }); -}; - -function createWindow () { - // Create the browser window. - const mainWindow = new BrowserWindow({ - width: 800, - height: 600, - webPreferences: { - preload: path.join(__dirname, 'preload.js') - } - }) - - mainWindow.maximize() - // and load the index.html of the app. - mainWindow.loadFile('index_sr.html') - console.log("Opened") - // Open the DevTools. - // mainWindow.webContents.openDevTools() -} - -// This method will be called when Electron has finished -// initialization and is ready to create browser windows. -// Some APIs can only be used after this event occurs. -app.whenReady().then(() => { - - - console.log("APP ready") - - server_exe_path = path.join( - __dirname, - 'dist-python', - 'server' - ); - - // console.log("EXE path:", server_exe_path) - - //Run Flask Server - const execFile = require("child_process").spawn(server_exe_path); - processes.push(execFile); - execFile.stdout.on('data', (data) => { - console.log(`stdout: ${data}`); - }); - - execFile.stderr.on('data', (data) => { - console.error(`stderr: ${data}`); - }); - - execFile.on('close', (code) => { - console.log(`child process exited with code ${code}`); - }); - - execFile.on('exit', function(code, signal) { - console.log(`EXITING CHILD PROCESS ${code} ${signal} ${execFile.pid}`); - }); - - execFile.on('error', function(err) { - console.log('Exe Not present at specified path (Use npm run package to make .exe) and paste it at ' + server_exe_path); - processes = processes.filter(function (iter_el) { - return iter_el != execFile; - }); - }); - - //Run SNPE exe - cpp_exe_path = path.join( - __dirname, - 'Release', - 'snpe-sample' - ); - - // console.log("cpp_exe_path path:", cpp_exe_path) - const cppexecFile = require("child_process").spawn(cpp_exe_path); - processes.push(cppexecFile); - cppexecFile.stdout.on('data', (data) => { - console.log(`stdout: ${data}`); - }); - - cppexecFile.stderr.on('data', (data) => { - console.error(`stderr: ${data}`); - }); - - cppexecFile.on('close', (code) => { - console.log(`child process exited with code ${code}`); - }); - - cppexecFile.on('exit', function(code, signal) { - console.log(`EXITING CHILD PROCESS ${code} ${signal} ${cppexecFile.pid}`); - }); - - cppexecFile.on('error', function(err) { - console.log('Exe Not present at specified path (Use npm run package to make .exe) and paste Release folder from SNPE_CPP_CODE at ' + cpp_exe_path); - processes = processes.filter(function (iter_el) { - return iter_el != cppexecFile; - }); - }); - createWindow() - - - app.on('activate', function () { - // On macOS it's common to re-create a window in the app when the - // dock icon is clicked and there are no other windows open. - if (BrowserWindow.getAllWindows().length === 0) createWindow() - }) - -}); - -// Quit when all windows are closed, except on macOS. There, it's common -// for applications and their menu bar to stay active until the user quits -// explicitly with Cmd + Q. -app.on('window-all-closed', function () { - if (process.platform !== 'darwin'){ - console.log("Inside not darwin"); - if(processes.length!=0){ - processes.forEach(function(proc) { - killSubprocesses(proc.pid).then(()=>{app.quit(); - }); - }); - } - else - { - app.quit(); - } - } -}); diff --git a/ai-solutions/ubuntu/electron-gui/electron_app_ui/package-lock.json b/ai-solutions/ubuntu/electron-gui/electron_app_ui/package-lock.json deleted file mode 100644 index 1ddc4852..00000000 --- a/ai-solutions/ubuntu/electron-gui/electron_app_ui/package-lock.json +++ /dev/null @@ -1,3032 +0,0 @@ -{ - "name": "AI-SOLUTIONS", - "version": "1.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "AI-SOLUTIONS", - "version": "1.0.0", - "license": "BSD", - "dependencies": { - "node-cmd": "^5.0.0", - "ps-tree": "^1.2.0" - }, - "devDependencies": { - "electron": "^25.5.0", - "electron-builder": "^24.6.3" - } - }, - "node_modules/@develar/schema-utils": { - "version": "2.6.5", - "resolved": "https://registry.npmjs.org/@develar/schema-utils/-/schema-utils-2.6.5.tgz", - "integrity": "sha512-0cp4PsWQ/9avqTVMCtZ+GirikIA36ikvjtHweU4/j8yLtgObI0+JUPhYFScgwlteveGB1rt3Cm8UhN04XayDig==", - "dev": true, - "dependencies": { - "ajv": "^6.12.0", - "ajv-keywords": "^3.4.1" - }, - "engines": { - "node": ">= 8.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/@electron/asar": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/@electron/asar/-/asar-3.2.4.tgz", - "integrity": "sha512-lykfY3TJRRWFeTxccEKdf1I6BLl2Plw81H0bbp4Fc5iEc67foDCa5pjJQULVgo0wF+Dli75f3xVcdb/67FFZ/g==", - "dev": true, - "dependencies": { - "chromium-pickle-js": "^0.2.0", - "commander": "^5.0.0", - "glob": "^7.1.6", - "minimatch": "^3.0.4" - }, - "bin": { - "asar": "bin/asar.js" - }, - "engines": { - "node": ">=10.12.0" - } - }, - "node_modules/@electron/asar/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/@electron/asar/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/@electron/get": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@electron/get/-/get-2.0.2.tgz", - "integrity": "sha512-eFZVFoRXb3GFGd7Ak7W4+6jBl9wBtiZ4AaYOse97ej6mKj5tkyO0dUnUChs1IhJZtx1BENo4/p4WUTXpi6vT+g==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "env-paths": "^2.2.0", - "fs-extra": "^8.1.0", - "got": "^11.8.5", - "progress": "^2.0.3", - "semver": "^6.2.0", - "sumchecker": "^3.0.1" - }, - "engines": { - "node": ">=12" - }, - "optionalDependencies": { - "global-agent": "^3.0.0" - } - }, - "node_modules/@electron/notarize": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@electron/notarize/-/notarize-1.2.4.tgz", - "integrity": "sha512-W5GQhJEosFNafewnS28d3bpQ37/s91CDWqxVchHfmv2dQSTWpOzNlUVQwYzC1ay5bChRV/A9BTL68yj0Pa+TSg==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "fs-extra": "^9.0.1" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@electron/notarize/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@electron/notarize/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/notarize/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@electron/osx-sign": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@electron/osx-sign/-/osx-sign-1.0.4.tgz", - "integrity": "sha512-xfhdEcIOfAZg7scZ9RQPya1G1lWo8/zMCwUXAulq0SfY7ONIW+b9qGyKdMyuMctNYwllrIS+vmxfijSfjeh97g==", - "dev": true, - "dependencies": { - "compare-version": "^0.1.2", - "debug": "^4.3.4", - "fs-extra": "^10.0.0", - "isbinaryfile": "^4.0.8", - "minimist": "^1.2.6", - "plist": "^3.0.5" - }, - "bin": { - "electron-osx-flat": "bin/electron-osx-flat.js", - "electron-osx-sign": "bin/electron-osx-sign.js" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/@electron/osx-sign/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@electron/osx-sign/node_modules/isbinaryfile": { - "version": "4.0.10", - "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-4.0.10.tgz", - "integrity": "sha512-iHrqe5shvBUcFbmZq9zOQHBoeOhZJu6RQGrDpBgenUm/Am+F3JM2MgQj+rK3Z601fzrL5gLZWtAPH2OBaSVcyw==", - "dev": true, - "engines": { - "node": ">= 8.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/gjtorikian/" - } - }, - "node_modules/@electron/osx-sign/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/osx-sign/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@electron/universal": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/@electron/universal/-/universal-1.3.4.tgz", - "integrity": "sha512-BdhBgm2ZBnYyYRLRgOjM5VHkyFItsbggJ0MHycOjKWdFGYwK97ZFXH54dTvUWEfha81vfvwr5On6XBjt99uDcg==", - "dev": true, - "dependencies": { - "@electron/asar": "^3.2.1", - "@malept/cross-spawn-promise": "^1.1.0", - "debug": "^4.3.1", - "dir-compare": "^3.0.0", - "fs-extra": "^9.0.1", - "minimatch": "^3.0.4", - "plist": "^3.0.4" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/@electron/universal/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/@electron/universal/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@electron/universal/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/universal/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/@electron/universal/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@malept/cross-spawn-promise": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@malept/cross-spawn-promise/-/cross-spawn-promise-1.1.1.tgz", - "integrity": "sha512-RTBGWL5FWQcg9orDOCcp4LvItNzUPcyEU9bwaeJX0rJ1IQxzucC48Y0/sQLp/g6t99IQgAlGIaesJS+gTn7tVQ==", - "dev": true, - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/malept" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/subscription/pkg/npm-.malept-cross-spawn-promise?utm_medium=referral&utm_source=npm_fund" - } - ], - "dependencies": { - "cross-spawn": "^7.0.1" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/@malept/flatpak-bundler": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/@malept/flatpak-bundler/-/flatpak-bundler-0.4.0.tgz", - "integrity": "sha512-9QOtNffcOF/c1seMCDnjckb3R9WHcG34tky+FHpNKKCW0wc/scYLwMtO+ptyGUfMW0/b/n4qRiALlaFHc9Oj7Q==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "fs-extra": "^9.0.0", - "lodash": "^4.17.15", - "tmp-promise": "^3.0.2" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@malept/flatpak-bundler/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@malept/flatpak-bundler/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@malept/flatpak-bundler/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@sindresorhus/is": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", - "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/is?sponsor=1" - } - }, - "node_modules/@szmarczak/http-timer": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-4.0.6.tgz", - "integrity": "sha512-4BAffykYOgO+5nzBWYwE3W90sBgLJoUPRWWcL8wlyiM8IB8ipJz3UMJ9KXQd1RKQXpKp8Tutn80HZtWsu2u76w==", - "dev": true, - "dependencies": { - "defer-to-connect": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@tootallnate/once": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", - "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", - "dev": true, - "engines": { - "node": ">= 10" - } - }, - "node_modules/@types/cacheable-request": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/@types/cacheable-request/-/cacheable-request-6.0.3.tgz", - "integrity": "sha512-IQ3EbTzGxIigb1I3qPZc1rWJnH0BmSKv5QYTalEwweFvyBDLSAe24zP0le/hyi7ecGfZVlIVAg4BZqb8WBwKqw==", - "dev": true, - "dependencies": { - "@types/http-cache-semantics": "*", - "@types/keyv": "^3.1.4", - "@types/node": "*", - "@types/responselike": "^1.0.0" - } - }, - "node_modules/@types/debug": { - "version": "4.1.8", - "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.8.tgz", - "integrity": "sha512-/vPO1EPOs306Cvhwv7KfVfYvOJqA/S/AXjaHQiJboCZzcNDb+TIJFN9/2C9DZ//ijSKWioNyUxD792QmDJ+HKQ==", - "dev": true, - "dependencies": { - "@types/ms": "*" - } - }, - "node_modules/@types/fs-extra": { - "version": "9.0.13", - "resolved": "https://registry.npmjs.org/@types/fs-extra/-/fs-extra-9.0.13.tgz", - "integrity": "sha512-nEnwB++1u5lVDM2UI4c1+5R+FYaKfaAzS4OococimjVm3nQw3TuzH5UNsocrcTBbhnerblyHj4A49qXbIiZdpA==", - "dev": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/http-cache-semantics": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.1.tgz", - "integrity": "sha512-SZs7ekbP8CN0txVG2xVRH6EgKmEm31BOxA07vkFaETzZz1xh+cbt8BcI0slpymvwhx5dlFnQG2rTlPVQn+iRPQ==", - "dev": true - }, - "node_modules/@types/keyv": { - "version": "3.1.4", - "resolved": "https://registry.npmjs.org/@types/keyv/-/keyv-3.1.4.tgz", - "integrity": "sha512-BQ5aZNSCpj7D6K2ksrRCTmKRLEpnPvWDiLPfoGyhZ++8YtiK9d/3DBKPJgry359X/P1PfruyYwvnvwFjuEiEIg==", - "dev": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/ms": { - "version": "0.7.31", - "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.31.tgz", - "integrity": "sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA==", - "dev": true - }, - "node_modules/@types/node": { - "version": "18.15.11", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.11.tgz", - "integrity": "sha512-E5Kwq2n4SbMzQOn6wnmBjuK9ouqlURrcZDVfbo9ftDDTFt3nk7ZKK4GMOzoYgnpQJKcxwQw+lGaBvvlMo0qN/Q==", - "dev": true - }, - "node_modules/@types/plist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/plist/-/plist-3.0.2.tgz", - "integrity": "sha512-ULqvZNGMv0zRFvqn8/4LSPtnmN4MfhlPNtJCTpKuIIxGVGZ2rYWzFXrvEBoh9CVyqSE7D6YFRJ1hydLHI6kbWw==", - "dev": true, - "optional": true, - "dependencies": { - "@types/node": "*", - "xmlbuilder": ">=11.0.1" - } - }, - "node_modules/@types/responselike": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@types/responselike/-/responselike-1.0.0.tgz", - "integrity": "sha512-85Y2BjiufFzaMIlvJDvTTB8Fxl2xfLo4HgmHzVBz08w4wDePCTjYw66PdrolO0kzli3yam/YCgRufyo1DdQVTA==", - "dev": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/verror": { - "version": "1.10.6", - "resolved": "https://registry.npmjs.org/@types/verror/-/verror-1.10.6.tgz", - "integrity": "sha512-NNm+gdePAX1VGvPcGZCDKQZKYSiAWigKhKaz5KF94hG6f2s8de9Ow5+7AbXoeKxL8gavZfk4UquSAygOF2duEQ==", - "dev": true, - "optional": true - }, - "node_modules/@types/yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha512-Cn6WYCm0tXv8p6k+A8PvbDG763EDpBoTzHdA+Q/MF6H3sapGjCm9NzoaJncJS9tUKSuCoDs9XHxYYsQDgxR6kw==", - "dev": true, - "optional": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@xmldom/xmldom": { - "version": "0.8.10", - "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.8.10.tgz", - "integrity": "sha512-2WALfTl4xo2SkGCYRt6rDTFfk9R1czmBvUQy12gK2KuRKIpWEhcbbzy8EZXtz/jkRqHX8bFEc6FC1HjX4TUWYw==", - "dev": true, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/7zip-bin": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/7zip-bin/-/7zip-bin-5.1.1.tgz", - "integrity": "sha512-sAP4LldeWNz0lNzmTird3uWfFDWWTeg6V/MsmyyLR9X1idwKBWIgt/ZvinqQldJm3LecKEs1emkbquO6PCiLVQ==", - "dev": true - }, - "node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "dev": true, - "dependencies": { - "debug": "4" - }, - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "dev": true, - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/app-builder-bin": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/app-builder-bin/-/app-builder-bin-4.0.0.tgz", - "integrity": "sha512-xwdG0FJPQMe0M0UA4Tz0zEB8rBJTRA5a476ZawAqiBkMv16GRK5xpXThOjMaEOFnZ6zabejjG4J3da0SXG63KA==", - "dev": true - }, - "node_modules/app-builder-lib": { - "version": "24.6.3", - "resolved": "https://registry.npmjs.org/app-builder-lib/-/app-builder-lib-24.6.3.tgz", - "integrity": "sha512-++0Zp7vcCHfXMBGVj7luFxpqvMPk5mcWeTuw7OK0xNAaNtYQTTN0d9YfWRsb1MvviTOOhyHeULWz1CaixrdrDg==", - "dev": true, - "dependencies": { - "@develar/schema-utils": "~2.6.5", - "@electron/notarize": "^1.2.3", - "@electron/osx-sign": "^1.0.4", - "@electron/universal": "1.3.4", - "@malept/flatpak-bundler": "^0.4.0", - "@types/fs-extra": "9.0.13", - "7zip-bin": "~5.1.1", - "async-exit-hook": "^2.0.1", - "bluebird-lst": "^1.0.9", - "builder-util": "24.5.0", - "builder-util-runtime": "9.2.1", - "chromium-pickle-js": "^0.2.0", - "debug": "^4.3.4", - "ejs": "^3.1.8", - "electron-publish": "24.5.0", - "form-data": "^4.0.0", - "fs-extra": "^10.1.0", - "hosted-git-info": "^4.1.0", - "is-ci": "^3.0.0", - "isbinaryfile": "^5.0.0", - "js-yaml": "^4.1.0", - "lazy-val": "^1.0.5", - "minimatch": "^5.1.1", - "read-config-file": "6.3.2", - "sanitize-filename": "^1.6.3", - "semver": "^7.3.8", - "tar": "^6.1.12", - "temp-file": "^3.4.0" - }, - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/app-builder-lib/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/app-builder-lib/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/app-builder-lib/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/app-builder-lib/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "node_modules/assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", - "dev": true, - "optional": true, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/astral-regex": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", - "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", - "dev": true, - "optional": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/async": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.4.tgz", - "integrity": "sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ==", - "dev": true - }, - "node_modules/async-exit-hook": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/async-exit-hook/-/async-exit-hook-2.0.1.tgz", - "integrity": "sha512-NW2cX8m1Q7KPA7a5M2ULQeZ2wR5qI5PAbw5L0UOMxdioVk9PMZ0h1TmyZEkPYrCvYjDlFICusOu1dlEKAAeXBw==", - "dev": true, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", - "dev": true - }, - "node_modules/at-least-node": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", - "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", - "dev": true, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/bluebird": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", - "dev": true - }, - "node_modules/bluebird-lst": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/bluebird-lst/-/bluebird-lst-1.0.9.tgz", - "integrity": "sha512-7B1Rtx82hjnSD4PGLAjVWeYH3tHAcVUmChh85a3lltKQm6FresXh9ErQo6oAv6CqxttczC3/kEg8SY5NluPuUw==", - "dev": true, - "dependencies": { - "bluebird": "^3.5.5" - } - }, - "node_modules/boolean": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/boolean/-/boolean-3.2.0.tgz", - "integrity": "sha512-d0II/GO9uf9lfUHH2BQsjxzRJZBdsjgsBiW4BvhWk/3qoKwQFjIDVN19PfX8F2D/r9PCMTtLWjYVCFrpeYUzsw==", - "dev": true, - "optional": true - }, - "node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "optional": true, - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "node_modules/buffer-crc32": { - "version": "0.2.13", - "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", - "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/buffer-equal": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/buffer-equal/-/buffer-equal-1.0.1.tgz", - "integrity": "sha512-QoV3ptgEaQpvVwbXdSO39iqPQTCxSF7A5U99AxbHYqUdCizL/lH2Z0A2y6nbZucxMEOtNyZfG2s6gsVugGpKkg==", - "dev": true, - "engines": { - "node": ">=0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/buffer-from": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", - "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", - "dev": true - }, - "node_modules/builder-util": { - "version": "24.5.0", - "resolved": "https://registry.npmjs.org/builder-util/-/builder-util-24.5.0.tgz", - "integrity": "sha512-STnBmZN/M5vGcv01u/K8l+H+kplTaq4PAIn3yeuufUKSpcdro0DhJWxPI81k5XcNfC//bjM3+n9nr8F9uV4uAQ==", - "dev": true, - "dependencies": { - "@types/debug": "^4.1.6", - "7zip-bin": "~5.1.1", - "app-builder-bin": "4.0.0", - "bluebird-lst": "^1.0.9", - "builder-util-runtime": "9.2.1", - "chalk": "^4.1.2", - "cross-spawn": "^7.0.3", - "debug": "^4.3.4", - "fs-extra": "^10.1.0", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.1", - "is-ci": "^3.0.0", - "js-yaml": "^4.1.0", - "source-map-support": "^0.5.19", - "stat-mode": "^1.0.0", - "temp-file": "^3.4.0" - } - }, - "node_modules/builder-util-runtime": { - "version": "9.2.1", - "resolved": "https://registry.npmjs.org/builder-util-runtime/-/builder-util-runtime-9.2.1.tgz", - "integrity": "sha512-2rLv/uQD2x+dJ0J3xtsmI12AlRyk7p45TEbE/6o/fbb633e/S3pPgm+ct+JHsoY7r39dKHnGEFk/AASRFdnXmA==", - "dev": true, - "dependencies": { - "debug": "^4.3.4", - "sax": "^1.2.4" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/builder-util/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/builder-util/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/builder-util/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/cacheable-lookup": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.4.tgz", - "integrity": "sha512-2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA==", - "dev": true, - "engines": { - "node": ">=10.6.0" - } - }, - "node_modules/cacheable-request": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-7.0.2.tgz", - "integrity": "sha512-pouW8/FmiPQbuGpkXQ9BAPv/Mo5xDGANgSNXzTzJ8DrKGuXOssM4wIQRjfanNRh3Yu5cfYPvcorqbhg2KIJtew==", - "dev": true, - "dependencies": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^4.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^6.0.1", - "responselike": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/chownr": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", - "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/chromium-pickle-js": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/chromium-pickle-js/-/chromium-pickle-js-0.2.0.tgz", - "integrity": "sha512-1R5Fho+jBq0DDydt+/vHWj5KJNJCKdARKOCwZUen84I5BreWoLqRLANH1U87eJy1tiASPtMnGqJJq0ZsLoRPOw==", - "dev": true - }, - "node_modules/ci-info": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", - "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/sibiraj-s" - } - ], - "engines": { - "node": ">=8" - } - }, - "node_modules/cli-truncate": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-2.1.0.tgz", - "integrity": "sha512-n8fOixwDD6b/ObinzTrp1ZKFzbgvKZvuz/TvejnLn1aQfC6r52XEx85FmuC+3HI+JM7coBRXUvNqEU2PHVrHpg==", - "dev": true, - "optional": true, - "dependencies": { - "slice-ansi": "^3.0.0", - "string-width": "^4.2.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "dev": true, - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/clone-response": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz", - "integrity": "sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA==", - "dev": true, - "dependencies": { - "mimic-response": "^1.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dev": true, - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", - "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, - "node_modules/compare-version": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/compare-version/-/compare-version-0.1.2.tgz", - "integrity": "sha512-pJDh5/4wrEnXX/VWRZvruAGHkzKdr46z11OlTPN+VrATlWWhSKewNCJ1futCO5C7eJB3nPMFZA1LeYtcFboZ2A==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true - }, - "node_modules/config-file-ts": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/config-file-ts/-/config-file-ts-0.2.4.tgz", - "integrity": "sha512-cKSW0BfrSaAUnxpgvpXPLaaW/umg4bqg4k3GO1JqlRfpx+d5W0GDXznCMkWotJQek5Mmz1MJVChQnz3IVaeMZQ==", - "dev": true, - "dependencies": { - "glob": "^7.1.6", - "typescript": "^4.0.2" - } - }, - "node_modules/core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==", - "dev": true, - "optional": true - }, - "node_modules/crc": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/crc/-/crc-3.8.0.tgz", - "integrity": "sha512-iX3mfgcTMIq3ZKLIsVFAbv7+Mc10kxabAGQb8HvjA1o3T1PIYprbakQ65d3I+2HGHt6nSKkM9PYjgoJO2KcFBQ==", - "dev": true, - "optional": true, - "dependencies": { - "buffer": "^5.1.0" - } - }, - "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/decompress-response": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", - "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", - "dev": true, - "dependencies": { - "mimic-response": "^3.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/decompress-response/node_modules/mimic-response": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", - "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/defer-to-connect": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", - "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/define-properties": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.4.tgz", - "integrity": "sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA==", - "dev": true, - "optional": true, - "dependencies": { - "has-property-descriptors": "^1.0.0", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "dev": true, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/detect-node": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", - "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==", - "dev": true, - "optional": true - }, - "node_modules/dir-compare": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/dir-compare/-/dir-compare-3.3.0.tgz", - "integrity": "sha512-J7/et3WlGUCxjdnD3HAAzQ6nsnc0WL6DD7WcwJb7c39iH1+AWfg+9OqzJNaI6PkBwBvm1mhZNL9iY/nRiZXlPg==", - "dev": true, - "dependencies": { - "buffer-equal": "^1.0.0", - "minimatch": "^3.0.4" - } - }, - "node_modules/dir-compare/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/dir-compare/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/dmg-builder": { - "version": "24.6.3", - "resolved": "https://registry.npmjs.org/dmg-builder/-/dmg-builder-24.6.3.tgz", - "integrity": "sha512-O7KNT7OKqtV54fMYUpdlyTOCP5DoPuRMLqMTgxxV2PO8Hj/so6zOl5o8GTs8pdDkeAhJzCFOUNB3BDhgXbUbJg==", - "dev": true, - "dependencies": { - "app-builder-lib": "24.6.3", - "builder-util": "24.5.0", - "builder-util-runtime": "9.2.1", - "fs-extra": "^10.1.0", - "iconv-lite": "^0.6.2", - "js-yaml": "^4.1.0" - }, - "optionalDependencies": { - "dmg-license": "^1.0.11" - } - }, - "node_modules/dmg-builder/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/dmg-builder/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/dmg-builder/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/dmg-license": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/dmg-license/-/dmg-license-1.0.11.tgz", - "integrity": "sha512-ZdzmqwKmECOWJpqefloC5OJy1+WZBBse5+MR88z9g9Zn4VY+WYUkAyojmhzJckH5YbbZGcYIuGAkY5/Ys5OM2Q==", - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "dependencies": { - "@types/plist": "^3.0.1", - "@types/verror": "^1.10.3", - "ajv": "^6.10.0", - "crc": "^3.8.0", - "iconv-corefoundation": "^1.1.7", - "plist": "^3.0.4", - "smart-buffer": "^4.0.2", - "verror": "^1.10.0" - }, - "bin": { - "dmg-license": "bin/dmg-license.js" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/dotenv": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-9.0.2.tgz", - "integrity": "sha512-I9OvvrHp4pIARv4+x9iuewrWycX6CcZtoAu1XrzPxc5UygMJXJZYmBsynku8IkrJwgypE5DGNjDPmPRhDCptUg==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/dotenv-expand": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-5.1.0.tgz", - "integrity": "sha512-YXQl1DSa4/PQyRfgrv6aoNjhasp/p4qs9FjJ4q4cQk+8m4r6k4ZSiEyytKG8f8W9gi8WsQtIObNmKd+tMzNTmA==", - "dev": true - }, - "node_modules/duplexer": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", - "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" - }, - "node_modules/ejs": { - "version": "3.1.9", - "resolved": "https://registry.npmjs.org/ejs/-/ejs-3.1.9.tgz", - "integrity": "sha512-rC+QVNMJWv+MtPgkt0y+0rVEIdbtxVADApW9JXrUVlzHetgcyczP/E7DJmWJ4fJCZF2cPcBk0laWO9ZHMG3DmQ==", - "dev": true, - "dependencies": { - "jake": "^10.8.5" - }, - "bin": { - "ejs": "bin/cli.js" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/electron": { - "version": "25.8.1", - "resolved": "https://registry.npmjs.org/electron/-/electron-25.8.1.tgz", - "integrity": "sha512-GtcP1nMrROZfFg0+mhyj1hamrHvukfF6of2B/pcWxmWkd5FVY1NJib0tlhiorFZRzQN5Z+APLPr7aMolt7i2AQ==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "@electron/get": "^2.0.0", - "@types/node": "^18.11.18", - "extract-zip": "^2.0.1" - }, - "bin": { - "electron": "cli.js" - }, - "engines": { - "node": ">= 12.20.55" - } - }, - "node_modules/electron-builder": { - "version": "24.6.3", - "resolved": "https://registry.npmjs.org/electron-builder/-/electron-builder-24.6.3.tgz", - "integrity": "sha512-O6PqhRXwfxCNTXI4BlhELSeYYO6/tqlxRuy+4+xKBokQvwDDjDgZMMoSgAmanVSCuzjE7MZldI9XYrKFk+EQDw==", - "dev": true, - "dependencies": { - "app-builder-lib": "24.6.3", - "builder-util": "24.5.0", - "builder-util-runtime": "9.2.1", - "chalk": "^4.1.2", - "dmg-builder": "24.6.3", - "fs-extra": "^10.1.0", - "is-ci": "^3.0.0", - "lazy-val": "^1.0.5", - "read-config-file": "6.3.2", - "simple-update-notifier": "2.0.0", - "yargs": "^17.6.2" - }, - "bin": { - "electron-builder": "cli.js", - "install-app-deps": "install-app-deps.js" - }, - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/electron-builder/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/electron-builder/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-builder/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/electron-publish": { - "version": "24.5.0", - "resolved": "https://registry.npmjs.org/electron-publish/-/electron-publish-24.5.0.tgz", - "integrity": "sha512-zwo70suH15L15B4ZWNDoEg27HIYoPsGJUF7xevLJLSI7JUPC8l2yLBdLGwqueJ5XkDL7ucYyRZzxJVR8ElV9BA==", - "dev": true, - "dependencies": { - "@types/fs-extra": "^9.0.11", - "builder-util": "24.5.0", - "builder-util-runtime": "9.2.1", - "chalk": "^4.1.2", - "fs-extra": "^10.1.0", - "lazy-val": "^1.0.5", - "mime": "^2.5.2" - } - }, - "node_modules/electron-publish/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/electron-publish/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-publish/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dev": true, - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/env-paths": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", - "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/es6-error": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz", - "integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==", - "dev": true, - "optional": true - }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true, - "optional": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/event-stream": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/event-stream/-/event-stream-3.3.4.tgz", - "integrity": "sha512-QHpkERcGsR0T7Qm3HNJSyXKEEj8AHNxkY3PK8TS2KJvQ7NiSHe3DDpwVKKtoYprL/AreyzFBeIkBIWChAqn60g==", - "dependencies": { - "duplexer": "~0.1.1", - "from": "~0", - "map-stream": "~0.1.0", - "pause-stream": "0.0.11", - "split": "0.3", - "stream-combiner": "~0.0.4", - "through": "~2.3.1" - } - }, - "node_modules/extract-zip": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-2.0.1.tgz", - "integrity": "sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "get-stream": "^5.1.0", - "yauzl": "^2.10.0" - }, - "bin": { - "extract-zip": "cli.js" - }, - "engines": { - "node": ">= 10.17.0" - }, - "optionalDependencies": { - "@types/yauzl": "^2.9.1" - } - }, - "node_modules/extsprintf": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.4.1.tgz", - "integrity": "sha512-Wrk35e8ydCKDj/ArClo1VrPVmN8zph5V4AtHwIuHhvMXsKf73UT3BOD+azBIW+3wOJ4FhEH7zyaJCFvChjYvMA==", - "dev": true, - "engines": [ - "node >=0.6.0" - ], - "optional": true - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true - }, - "node_modules/fd-slicer": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", - "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", - "dev": true, - "dependencies": { - "pend": "~1.2.0" - } - }, - "node_modules/filelist": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/filelist/-/filelist-1.0.4.tgz", - "integrity": "sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==", - "dev": true, - "dependencies": { - "minimatch": "^5.0.1" - } - }, - "node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "dev": true, - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/from": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/from/-/from-0.1.7.tgz", - "integrity": "sha512-twe20eF1OxVxp/ML/kq2p1uc6KvFK/+vs8WjEbeKmV2He22MKm7YF2ANIt+EOqhJ5L3K/SuuPhk0hWQDjOM23g==" - }, - "node_modules/fs-extra": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", - "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - }, - "engines": { - "node": ">=6 <7 || >=8" - } - }, - "node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "dev": true, - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/fs-minipass/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "dev": true - }, - "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true, - "optional": true - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true, - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", - "dev": true, - "optional": true, - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dev": true, - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "dev": true, - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/glob/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/glob/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/global-agent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-agent/-/global-agent-3.0.0.tgz", - "integrity": "sha512-PT6XReJ+D07JvGoxQMkT6qji/jVNfX/h364XHZOWeRzy64sSFr+xJ5OX7LI3b4MPQzdL4H8Y8M0xzPpsVMwA8Q==", - "dev": true, - "optional": true, - "dependencies": { - "boolean": "^3.0.1", - "es6-error": "^4.1.1", - "matcher": "^3.0.0", - "roarr": "^2.15.3", - "semver": "^7.3.2", - "serialize-error": "^7.0.1" - }, - "engines": { - "node": ">=10.0" - } - }, - "node_modules/global-agent/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "optional": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/globalthis": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz", - "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==", - "dev": true, - "optional": true, - "dependencies": { - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/got": { - "version": "11.8.5", - "resolved": "https://registry.npmjs.org/got/-/got-11.8.5.tgz", - "integrity": "sha512-o0Je4NvQObAuZPHLFoRSkdG2lTgtcynqymzg2Vupdx6PorhaT5MCbIyXG6d4D94kk8ZG57QeosgdiqfJWhEhlQ==", - "dev": true, - "dependencies": { - "@sindresorhus/is": "^4.0.0", - "@szmarczak/http-timer": "^4.0.5", - "@types/cacheable-request": "^6.0.1", - "@types/responselike": "^1.0.0", - "cacheable-lookup": "^5.0.3", - "cacheable-request": "^7.0.2", - "decompress-response": "^6.0.0", - "http2-wrapper": "^1.0.0-beta.5.2", - "lowercase-keys": "^2.0.0", - "p-cancelable": "^2.0.0", - "responselike": "^2.0.0" - }, - "engines": { - "node": ">=10.19.0" - }, - "funding": { - "url": "https://github.com/sindresorhus/got?sponsor=1" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.10", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", - "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==", - "dev": true - }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, - "optional": true, - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/has-property-descriptors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", - "dev": true, - "optional": true, - "dependencies": { - "get-intrinsic": "^1.1.1" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "dev": true, - "optional": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/hosted-git-info": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz", - "integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==", - "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/http-cache-semantics": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", - "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==", - "dev": true - }, - "node_modules/http-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", - "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", - "dev": true, - "dependencies": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/http2-wrapper": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-1.0.3.tgz", - "integrity": "sha512-V+23sDMr12Wnz7iTcDeJr3O6AIxlnvT/bmaAAAP/Xda35C90p9599p0F1eHR/N1KILWSoWVAiOMFjBBXaXSMxg==", - "dev": true, - "dependencies": { - "quick-lru": "^5.1.1", - "resolve-alpn": "^1.0.0" - }, - "engines": { - "node": ">=10.19.0" - } - }, - "node_modules/https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", - "dev": true, - "dependencies": { - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/iconv-corefoundation": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/iconv-corefoundation/-/iconv-corefoundation-1.1.7.tgz", - "integrity": "sha512-T10qvkw0zz4wnm560lOEg0PovVqUXuOFhhHAkixw8/sycy7TJt7v/RrkEKEQnAw2viPSJu6iAkErxnzR0g8PpQ==", - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "dependencies": { - "cli-truncate": "^2.1.0", - "node-addon-api": "^1.6.3" - }, - "engines": { - "node": "^8.11.2 || >=10" - } - }, - "node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dev": true, - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "optional": true - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "dev": true, - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true - }, - "node_modules/is-ci": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", - "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", - "dev": true, - "dependencies": { - "ci-info": "^3.2.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/isbinaryfile": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-5.0.0.tgz", - "integrity": "sha512-UDdnyGvMajJUWCkib7Cei/dvyJrrvo4FIrsvSFWdPpXSUorzXrDJ0S+X5Q4ZlasfPjca4yqCNNsjbCeiy8FFeg==", - "dev": true, - "engines": { - "node": ">= 14.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/gjtorikian/" - } - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true - }, - "node_modules/jake": { - "version": "10.8.7", - "resolved": "https://registry.npmjs.org/jake/-/jake-10.8.7.tgz", - "integrity": "sha512-ZDi3aP+fG/LchyBzUM804VjddnwfSfsdeYkwt8NcbKRvo4rFkjhs456iLFn3k2ZUWvNe4i48WACDbza8fhq2+w==", - "dev": true, - "dependencies": { - "async": "^3.2.3", - "chalk": "^4.0.2", - "filelist": "^1.0.4", - "minimatch": "^3.1.2" - }, - "bin": { - "jake": "bin/cli.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/jake/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/jake/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/json-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", - "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", - "dev": true - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "node_modules/json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==", - "dev": true, - "optional": true - }, - "node_modules/json5": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", - "dev": true, - "bin": { - "json5": "lib/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", - "dev": true, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/keyv": { - "version": "4.5.2", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.2.tgz", - "integrity": "sha512-5MHbFaKn8cNSmVW7BYnijeAVlE4cYA/SVkifVgrh7yotnfhKmjuXpDKjrABLnT0SfHWV21P8ow07OGfRrNDg8g==", - "dev": true, - "dependencies": { - "json-buffer": "3.0.1" - } - }, - "node_modules/lazy-val": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/lazy-val/-/lazy-val-1.0.5.tgz", - "integrity": "sha512-0/BnGCCfyUMkBpeDgWihanIAF9JmZhHBgUhEqzvf+adhNGLoP6TaiI5oF8oyb3I45P+PcnrqihSf01M0l0G5+Q==", - "dev": true - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true - }, - "node_modules/lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/map-stream": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/map-stream/-/map-stream-0.1.0.tgz", - "integrity": "sha512-CkYQrPYZfWnu/DAmVCpTSX/xHpKZ80eKh2lAkyA6AJTef6bW+6JpbQZN5rofum7da+SyN1bi5ctTm+lTfcCW3g==" - }, - "node_modules/matcher": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/matcher/-/matcher-3.0.0.tgz", - "integrity": "sha512-OkeDaAZ/bQCxeFAozM55PKcKU0yJMPGifLwV4Qgjitu+5MoAfSQN4lsLJeXZ1b8w0x+/Emda6MZgXS1jvsapng==", - "dev": true, - "optional": true, - "dependencies": { - "escape-string-regexp": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/mime": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz", - "integrity": "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg==", - "dev": true, - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "dev": true, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dev": true, - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "dev": true, - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "dev": true, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/minipass": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", - "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "dev": true, - "dependencies": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/minizlib/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "dev": true, - "bin": { - "mkdirp": "bin/cmd.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/node-addon-api": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-1.7.2.tgz", - "integrity": "sha512-ibPK3iA+vaY1eEjESkQkM0BbCqFOaZMiXRTtdB0u7b4djtY6JnsjvPdUHVMg6xQt3B8fpTTWHI9A+ADjM9frzg==", - "dev": true, - "optional": true - }, - "node_modules/node-cmd": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/node-cmd/-/node-cmd-5.0.0.tgz", - "integrity": "sha512-4sQTJmsS5uZKAPz/Df9fnIbmvOySfGdW+UreH4X5NcAOOpKjaE+K5wf4ehNBbZVPo0vQ36RkRnhhsXXJAT+Syw==", - "engines": { - "node": ">=6.4.0" - } - }, - "node_modules/normalize-url": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", - "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true, - "optional": true, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dev": true, - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/p-cancelable": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.1.1.tgz", - "integrity": "sha512-BZOr3nRQHOntUjTrH8+Lh54smKHoHyur8We1V8DSMVrl5A2malOOwuJRnKRDjSnkoeBh4at6BwEnb5I7Jl31wg==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/pause-stream": { - "version": "0.0.11", - "resolved": "https://registry.npmjs.org/pause-stream/-/pause-stream-0.0.11.tgz", - "integrity": "sha512-e3FBlXLmN/D1S+zHzanP4E/4Z60oFAa3O051qt1pxa7DEJWKAyil6upYVXCWadEnuoqa4Pkc9oUx9zsxYeRv8A==", - "dependencies": { - "through": "~2.3" - } - }, - "node_modules/pend": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", - "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==", - "dev": true - }, - "node_modules/plist": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/plist/-/plist-3.1.0.tgz", - "integrity": "sha512-uysumyrvkUX0rX/dEVqt8gC3sTBzd4zoWfLeS29nb53imdaXVvLINYXTI2GNqzaMuvacNx4uJQ8+b3zXR0pkgQ==", - "dev": true, - "dependencies": { - "@xmldom/xmldom": "^0.8.8", - "base64-js": "^1.5.1", - "xmlbuilder": "^15.1.1" - }, - "engines": { - "node": ">=10.4.0" - } - }, - "node_modules/progress": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", - "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", - "dev": true, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/ps-tree": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/ps-tree/-/ps-tree-1.2.0.tgz", - "integrity": "sha512-0VnamPPYHl4uaU/nSFeZZpR21QAWRz+sRv4iW9+v/GS/J5U5iZB5BNN6J0RMoOvdx2gWM2+ZFMIm58q24e4UYA==", - "dependencies": { - "event-stream": "=3.3.4" - }, - "bin": { - "ps-tree": "bin/ps-tree.js" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dev": true, - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/quick-lru": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", - "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/read-config-file": { - "version": "6.3.2", - "resolved": "https://registry.npmjs.org/read-config-file/-/read-config-file-6.3.2.tgz", - "integrity": "sha512-M80lpCjnE6Wt6zb98DoW8WHR09nzMSpu8XHtPkiTHrJ5Az9CybfeQhTJ8D7saeBHpGhLPIVyA8lcL6ZmdKwY6Q==", - "dev": true, - "dependencies": { - "config-file-ts": "^0.2.4", - "dotenv": "^9.0.2", - "dotenv-expand": "^5.1.0", - "js-yaml": "^4.1.0", - "json5": "^2.2.0", - "lazy-val": "^1.0.4" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/resolve-alpn": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", - "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==", - "dev": true - }, - "node_modules/responselike": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-2.0.1.tgz", - "integrity": "sha512-4gl03wn3hj1HP3yzgdI7d3lCkF95F21Pz4BPGvKHinyQzALR5CapwC8yIi0Rh58DEMQ/SguC03wFj2k0M/mHhw==", - "dev": true, - "dependencies": { - "lowercase-keys": "^2.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dev": true, - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/roarr": { - "version": "2.15.4", - "resolved": "https://registry.npmjs.org/roarr/-/roarr-2.15.4.tgz", - "integrity": "sha512-CHhPh+UNHD2GTXNYhPWLnU8ONHdI+5DI+4EYIAOaiD63rHeYlZvyh8P+in5999TTSFgUYuKUAjzRI4mdh/p+2A==", - "dev": true, - "optional": true, - "dependencies": { - "boolean": "^3.0.1", - "detect-node": "^2.0.4", - "globalthis": "^1.0.1", - "json-stringify-safe": "^5.0.1", - "semver-compare": "^1.0.0", - "sprintf-js": "^1.1.2" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "dev": true - }, - "node_modules/sanitize-filename": { - "version": "1.6.3", - "resolved": "https://registry.npmjs.org/sanitize-filename/-/sanitize-filename-1.6.3.tgz", - "integrity": "sha512-y/52Mcy7aw3gRm7IrcGDFx/bCk4AhRh2eI9luHOQM86nZsqwiRkkq2GekHXBBD+SmPidc8i2PqtYZl+pWJ8Oeg==", - "dev": true, - "dependencies": { - "truncate-utf8-bytes": "^1.0.0" - } - }, - "node_modules/sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==", - "dev": true - }, - "node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/semver-compare": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/semver-compare/-/semver-compare-1.0.0.tgz", - "integrity": "sha512-YM3/ITh2MJ5MtzaM429anh+x2jiLVjqILF4m4oyQB18W7Ggea7BfqdH/wGMK7dDiMghv/6WG7znWMwUDzJiXow==", - "dev": true, - "optional": true - }, - "node_modules/serialize-error": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/serialize-error/-/serialize-error-7.0.1.tgz", - "integrity": "sha512-8I8TjW5KMOKsZQTvoxjuSIa7foAwPWGOts+6o7sgjz41/qMD9VQHEDxi6PBvK2l0MXUmqZyNpUK+T2tQaaElvw==", - "dev": true, - "optional": true, - "dependencies": { - "type-fest": "^0.13.1" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/simple-update-notifier": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/simple-update-notifier/-/simple-update-notifier-2.0.0.tgz", - "integrity": "sha512-a2B9Y0KlNXl9u/vsW6sTIu9vGEpfKu2wRV6l1H3XEas/0gUIzGzBoP/IouTcUQbm9JWZLH3COxyn03TYlFax6w==", - "dev": true, - "dependencies": { - "semver": "^7.5.3" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/simple-update-notifier/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/slice-ansi": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-3.0.0.tgz", - "integrity": "sha512-pSyv7bSTC7ig9Dcgbw9AuRNUb5k5V6oDudjZoMBSr13qpLBG7tB+zgCkARjq7xIUgdz5P1Qe8u+rSGdouOOIyQ==", - "dev": true, - "optional": true, - "dependencies": { - "ansi-styles": "^4.0.0", - "astral-regex": "^2.0.0", - "is-fullwidth-code-point": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/smart-buffer": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", - "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", - "dev": true, - "optional": true, - "engines": { - "node": ">= 6.0.0", - "npm": ">= 3.0.0" - } - }, - "node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "dev": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/split": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/split/-/split-0.3.3.tgz", - "integrity": "sha512-wD2AeVmxXRBoX44wAycgjVpMhvbwdI2aZjCkvfNcH1YqHQvJVa1duWc73OyVGJUc05fhFaTZeQ/PYsrmyH0JVA==", - "dependencies": { - "through": "2" - }, - "engines": { - "node": "*" - } - }, - "node_modules/sprintf-js": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz", - "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==", - "dev": true, - "optional": true - }, - "node_modules/stat-mode": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/stat-mode/-/stat-mode-1.0.0.tgz", - "integrity": "sha512-jH9EhtKIjuXZ2cWxmXS8ZP80XyC3iasQxMDV8jzhNJpfDb7VbQLVW4Wvsxz9QZvzV+G4YoSfBUVKDOyxLzi/sg==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, - "node_modules/stream-combiner": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/stream-combiner/-/stream-combiner-0.0.4.tgz", - "integrity": "sha512-rT00SPnTVyRsaSz5zgSPma/aHSOic5U1prhYdRy5HS2kTZviFpmDgzilbtsJsxiroqACmayynDN/9VzIbX5DOw==", - "dependencies": { - "duplexer": "~0.1.1" - } - }, - "node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/sumchecker": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/sumchecker/-/sumchecker-3.0.1.tgz", - "integrity": "sha512-MvjXzkz/BOfyVDkG0oFOtBxHX2u3gKbMHIF/dXblZsgD3BWOFLmHovIpZY7BykJdAjcqRCBi1WYBNdEC9yI7vg==", - "dev": true, - "dependencies": { - "debug": "^4.1.0" - }, - "engines": { - "node": ">= 8.0" - } - }, - "node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/tar": { - "version": "6.1.15", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.15.tgz", - "integrity": "sha512-/zKt9UyngnxIT/EAGYuxaMYgOIJiP81ab9ZfkILq4oNLPFX50qyYmu7jRj9qeXoxmJHjGlbH0+cm2uy1WCs10A==", - "dev": true, - "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/temp-file": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/temp-file/-/temp-file-3.4.0.tgz", - "integrity": "sha512-C5tjlC/HCtVUOi3KWVokd4vHVViOmGjtLwIh4MuzPo/nMYTV/p1urt3RnMz2IWXDdKEGJH3k5+KPxtqRsUYGtg==", - "dev": true, - "dependencies": { - "async-exit-hook": "^2.0.1", - "fs-extra": "^10.0.0" - } - }, - "node_modules/temp-file/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/temp-file/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/temp-file/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==" - }, - "node_modules/tmp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", - "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", - "dev": true, - "dependencies": { - "rimraf": "^3.0.0" - }, - "engines": { - "node": ">=8.17.0" - } - }, - "node_modules/tmp-promise": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/tmp-promise/-/tmp-promise-3.0.3.tgz", - "integrity": "sha512-RwM7MoPojPxsOBYnyd2hy0bxtIlVrihNs9pj5SUvY8Zz1sQcQG2tG1hSr8PDxfgEB8RNKDhqbIlroIarSNDNsQ==", - "dev": true, - "dependencies": { - "tmp": "^0.2.0" - } - }, - "node_modules/truncate-utf8-bytes": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/truncate-utf8-bytes/-/truncate-utf8-bytes-1.0.2.tgz", - "integrity": "sha512-95Pu1QXQvruGEhv62XCMO3Mm90GscOCClvrIUwCM0PYOXK3kaF3l3sIHxx71ThJfcbM2O5Au6SO3AWCSEfW4mQ==", - "dev": true, - "dependencies": { - "utf8-byte-length": "^1.0.1" - } - }, - "node_modules/type-fest": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.13.1.tgz", - "integrity": "sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==", - "dev": true, - "optional": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/typescript": { - "version": "4.9.5", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", - "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", - "dev": true, - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=4.2.0" - } - }, - "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", - "dev": true, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dev": true, - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/utf8-byte-length": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/utf8-byte-length/-/utf8-byte-length-1.0.4.tgz", - "integrity": "sha512-4+wkEYLBbWxqTahEsWrhxepcoVOJ+1z5PGIjPZxRkytcdSUaNjIjBM7Xn8E+pdSuV7SzvWovBFA54FO0JSoqhA==", - "dev": true - }, - "node_modules/verror": { - "version": "1.10.1", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.1.tgz", - "integrity": "sha512-veufcmxri4e3XSrT0xwfUR7kguIkaxBeosDg00yDWhk49wdwkSUrvvsm7nc75e1PUyvIeZj6nS8VQRYz2/S4Xg==", - "dev": true, - "optional": true, - "dependencies": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - }, - "engines": { - "node": ">=0.6.0" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "dev": true - }, - "node_modules/xmlbuilder": { - "version": "15.1.1", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-15.1.1.tgz", - "integrity": "sha512-yMqGBqtXyeN1e3TGYvgNgDVZ3j84W4cwkOXQswghol6APgZWaff9lnbvN7MHYJOiXsvGPXtjTYJEiC9J2wv9Eg==", - "dev": true, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - }, - "node_modules/yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", - "dev": true, - "dependencies": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "dev": true, - "engines": { - "node": ">=12" - } - }, - "node_modules/yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", - "dev": true, - "dependencies": { - "buffer-crc32": "~0.2.3", - "fd-slicer": "~1.1.0" - } - } - } -} diff --git a/ai-solutions/ubuntu/electron-gui/electron_app_ui/package.json b/ai-solutions/ubuntu/electron-gui/electron_app_ui/package.json deleted file mode 100644 index 8978c35e..00000000 --- a/ai-solutions/ubuntu/electron-gui/electron_app_ui/package.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "name": "AI-SOLUTIONS", - "version": "1.0.0", - "description": "Application to showcase various AI Models", - "main": "main.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1", - "start": "electron . --no-sandbox", - "package": "node package_snpe_cpp.js && node package_python.js && npm run -s package-electron", - "package-electron": "electron-builder --publish=never --linux deb" - }, - "build": { - "appId": "com.ai-solutions.ai-demos", - "productName": "AI-SOLUTIONS", - "asar": false, - "asarUnpack": [ - "**/*.node" - ], - "mac": { - "category": "public.app-category.utilities" - }, - "files": [ - "assets", - "main.js", - "index_sr.html", - "index_ie.html", - "icon.png", - "preload.js", - "node_modules/**/*" - ], - "extraResources": [ - { - "from": "dist-python/", - "to": "app/dist-python", - "filter": [ - "**/*" - ] - }, - { - "from": "../SNPE_CPP_Code/build/Release/", - "to": "app/Release", - "filter": [ - "**/*" - ] - } - ] - }, - "keywords": [ - "Solutions", - "Electron", - "Qualcomm", - "AI", - "ai-demos", - "AI-SOLUTIONS", - "demo" - ], - "author": "Qualcomm < >", - "license": "BSD", - "devDependencies": { - "electron": "^25.5.0", - "electron-builder": "^24.6.3" - }, - - "homepage": "https://github.qualcomm.com/qualcomm-model-zoo-public-mirror/ai-solutions/tree/main/windows#readme", - "dependencies": { - "node-cmd": "^5.0.0", - "ps-tree": "^1.2.0" - } -} diff --git a/ai-solutions/ubuntu/electron-gui/electron_app_ui/package_python.js b/ai-solutions/ubuntu/electron-gui/electron_app_ui/package_python.js deleted file mode 100644 index afbbe8b0..00000000 --- a/ai-solutions/ubuntu/electron-gui/electron_app_ui/package_python.js +++ /dev/null @@ -1,39 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -const path = require("path"); - -const spawn = require("child_process").spawn, - ls = spawn( - "pyinstaller", - [ - "-w", - "--onefile", - `--add-data ../python_flask_server/templates${path.delimiter}templates`, - `--add-data ../python_flask_server/static${path.delimiter}static`, - `--add-data ../python_flask_server/DLC${path.delimiter}DLC`, - "--distpath dist-python", - "../python_flask_server/server.py", - ], - { - shell: true, - } - ); - -ls.stdout.on("data", function (data) { - // stream output of build process - console.log("INFO: ", data.toString()); -}); - -ls.stderr.on("data", function (data) { - console.log( data.toString()); -}); -ls.on("exit", function (code) { - console.log("pyinstaller process exited with code " + code.toString()); -}); diff --git a/ai-solutions/ubuntu/electron-gui/electron_app_ui/package_snpe_cpp.js b/ai-solutions/ubuntu/electron-gui/electron_app_ui/package_snpe_cpp.js deleted file mode 100644 index 256531ca..00000000 --- a/ai-solutions/ubuntu/electron-gui/electron_app_ui/package_snpe_cpp.js +++ /dev/null @@ -1,19 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -const nodeCmd = require('node-cmd') - -let command = String.raw `mkdir -p Release && cd ../SNPE_CPP_Code && mkdir -p build && cd build && mkdir -p Release && cmake ../ && cmake --build . && cp Release/snpe-sample ../../electron_app_ui/Release/snpe-sample`; - -nodeCmd.runSync(command, (err, data, stderr) => { -if(data) { - return res.json(data); -} -return err; -}); \ No newline at end of file diff --git a/ai-solutions/ubuntu/electron-gui/electron_app_ui/preload.js b/ai-solutions/ubuntu/electron-gui/electron_app_ui/preload.js deleted file mode 100644 index f464ed3d..00000000 --- a/ai-solutions/ubuntu/electron-gui/electron_app_ui/preload.js +++ /dev/null @@ -1,26 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -/** - * The preload script runs before. It has access to web APIs - * as well as Electron's renderer process modules and some - * polyfilled Node.js functions. - * - * https://www.electronjs.org/docs/latest/tutorial/sandbox - */ -window.addEventListener('DOMContentLoaded', () => { - const replaceText = (selector, text) => { - const element = document.getElementById(selector) - if (element) element.innerText = text - } - - for (const type of ['chrome', 'node', 'electron']) { - replaceText(`${type}-version`, process.versions[type]) - } -}) diff --git a/ai-solutions/ubuntu/electron-gui/electron_app_ui/styles.css b/ai-solutions/ubuntu/electron-gui/electron_app_ui/styles.css deleted file mode 100644 index ed8a34f4..00000000 --- a/ai-solutions/ubuntu/electron-gui/electron_app_ui/styles.css +++ /dev/null @@ -1,3 +0,0 @@ -/* styles.css */ - -/* Add styles here to customize the appearance of your app */ \ No newline at end of file diff --git a/ai-solutions/ubuntu/electron-gui/python_flask_server/DLC/enhancement/README.md b/ai-solutions/ubuntu/electron-gui/python_flask_server/DLC/enhancement/README.md deleted file mode 100644 index 23bf9bdb..00000000 --- a/ai-solutions/ubuntu/electron-gui/python_flask_server/DLC/enhancement/README.md +++ /dev/null @@ -1 +0,0 @@ -Place enhancement dlc files in this folder diff --git a/ai-solutions/ubuntu/electron-gui/python_flask_server/DLC/superresolution/README.md b/ai-solutions/ubuntu/electron-gui/python_flask_server/DLC/superresolution/README.md deleted file mode 100644 index 142a5fab..00000000 --- a/ai-solutions/ubuntu/electron-gui/python_flask_server/DLC/superresolution/README.md +++ /dev/null @@ -1 +0,0 @@ -Place superresolution dlc files in this folder diff --git a/ai-solutions/ubuntu/electron-gui/python_flask_server/ImageEnhancement_blueprint.py b/ai-solutions/ubuntu/electron-gui/python_flask_server/ImageEnhancement_blueprint.py deleted file mode 100644 index 5a2f680d..00000000 --- a/ai-solutions/ubuntu/electron-gui/python_flask_server/ImageEnhancement_blueprint.py +++ /dev/null @@ -1,213 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -from flask import Blueprint -from flask import request, jsonify, make_response, send_file -from PIL import Image -from empatches import EMPatches -import io, os -import cv2 -import numpy as np -import time -import functools -import zmq -import sys - -time_taken_model = "" -upscaled_img_dims = "" -old_runtime = "" -old_model_name = "" - -imageEnhance_bp = Blueprint("ImageEnhance",__name__) - - -def pyinstaller_absolute_path(relative_path): - """ For PyInstaller, getting absolute path of resources""" - base_path = getattr( sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__))) - abs_path = os.path.join(base_path, relative_path) - return abs_path - - -def buildnetwork_ie(socket, model_name, run_time): - - print("BUILDING NETWORK Low light") - print("Model name: ",model_name) - first_str = b"networkbuild" - - runtime_name_decoder={'DSP':b"DSP",'GPU':b"GPU", 'CPU':b"CPU"} - # dlc_name_decoder={'EnhancementGAN':'quant_enhancement_240_320_8350.dlc', 'MBLLEN':'quant_mbllen_214.dlc', 'RUAS':'quant_ruas_214.dlc','SCI':'quant_sci_214.dlc','StableLLVE':'quant_StableLLVE_214.dlc','Zero-DCE':'quant_zerodce_80_214.dlc','Zero-DCE++':'quant_zerodce++_214.dlc'} - # dlc_name_decoder={'MBLLEN':'quant_mbllen_214.dlc', 'RUAS':'quant_ruas_214.dlc','SCI':'quant_sci_214.dlc','StableLLVE':'quant_StableLLVE_214.dlc','Zero-DCE':'quant_zerodce_80_214.dlc'} - dlc_name_decoder={'Enlighten':'enlighten_quantized.dlc', 'RUAS':'ruas_quantized.dlc','SCI':'sci_quantized.dlc','StableLLVE':'StableLLVE_quantized.dlc','Zero-DCE':'zero_dce_quantized.dlc'} - dlc_path = bytes(pyinstaller_absolute_path(os.path.join("DLC","enhancement", dlc_name_decoder.get(model_name))),'utf-8') - - socket.send_multipart([first_str,dlc_path, runtime_name_decoder.get(run_time)]) - - print("Messages sent for building network, waiting for reply") - message_build = socket.recv() - print(message_build) - - -def runmodel_ie(socket, patch, model_name, run_time, scaling_factor=4 ): - - try: - print("LOW LIGHT MODEL") - - ## PREPROC ## - if model_name=='RUAS': - patch = cv2.resize(patch, (640,480)) - patch = patch/255 - elif model_name=='SCI': - patch = cv2.resize(patch, (640,480)) - patch = patch/255 - elif model_name=='StableLLVE': - patch = cv2.resize(patch, (640,480)) - patch = patch/255 - elif model_name=='Zero-DCE': - patch = cv2.resize(patch, (640,480)) - patch = patch/255 - else: - print("Out of Context: Model Specified is wrong") - - - img = np.array(patch) - img = img.astype(np.float32) - img = img.tobytes() - - print("Preproc done") - - socket.send_multipart([b"infer",img]) - - print("Messages Image sent, waiting for reply") - message_img_out = socket.recv() - - prediction = np.frombuffer(message_img_out, dtype=np.float32) - print("Message received from server :: Shape: ", prediction.shape) #," data: ", prediction) - - socket.send(b"get_infer_time") - message_infer_time = socket.recv() - print("message_infer_time", message_infer_time.decode('UTF-8')) - elapsed_time = 0.0 - elapsed_time = float(message_infer_time.decode('UTF-8'))/1000 - - print("post start") - - if model_name=='RUAS': - prediction = prediction.reshape(480,640,3) - prediction = prediction*255 - elif model_name=='SCI': - prediction = prediction.reshape(480,640,3) - prediction = prediction*255 - elif model_name=='StableLLVE': - prediction = prediction.reshape(480,640,3) - prediction = prediction*255 - elif model_name=='Zero-DCE': - prediction = prediction.reshape(480,640,3) - prediction = prediction*255 - else: - print("Out of Context: Model Specified is wrong") - - # for all other models, post proc is same # - # prediction = prediction*255 - - upscaled_patch = np.clip(prediction, 0, 255).astype(np.uint8) - - except Exception as e: - print("Exception",str(e)) - - return upscaled_patch, elapsed_time - - -# Endpoint for super resolution -@imageEnhance_bp.route('/image_enhancement', methods=['POST']) -def image_enhancement(): - try: - print("Image enhancement blueprint") - - ## GETTING DATA FROM ELECTRON ## - print("Fetching image data from the POST request") - image_data = request.files['imageData'] - - model_name = request.form['model_name'] - print("MODEL NAME:",model_name) - - runtime = request.form['runtime'] - print("RUN TIME:",runtime) - - print("load as PIL IMG") - image_data = Image.open(image_data) - #image_data.save("input_img.png") - width, height = image_data.size - print(f"Received img height = {height} ; width = {width}") - - - ## MAKING CONNECTION WITH SNPE EXE ## - context = zmq.Context() - - # Create a REQ (request) socket - socket = context.socket(zmq.REQ) - server_address = "tcp://localhost:5555" # Replace with your server's address - socket.connect(server_address) - - - ## BUILDING NETWORK ## - global old_model_name - global old_runtime - - if model_name != old_model_name or runtime != old_runtime: - print("___________________BUILDINGNETWORK________________") - print("old_model_name: ", old_model_name, "::model_name: ",model_name) - print("old_runtime: ", old_runtime, "::runtime: ",runtime) - buildnetwork_ie(socket, model_name, runtime) ##build network when there is some change other than image - old_model_name = model_name - old_runtime = runtime - - - ## INFERENCING ON NETWORK ## - - # Step 1: Read Image and Extract 128x128 patches from the image - image_np = np.array(image_data) - - merged_img, time_taken = runmodel_ie(socket, image_np, model_name, runtime) - - print("Received Enhanced Image") - - global time_taken_model - global upscaled_img_dims - time_taken_model = str(f'{time_taken*1000:.2f}')+" ms" - - - # Step 3: Getting image dimensions - - upscaled_img_dims = str(merged_img.shape[1]) + " x " +str(merged_img.shape[0]); - print("upscaled_img_dims: ",upscaled_img_dims) - merged_img = Image.fromarray(np.uint8(merged_img)) - # merged_img.save("upscaled_lowlight.png") - - # Convert the upscaled image to a binary response - output_buffer = io.BytesIO() - - merged_img.save(output_buffer, format='PNG') - - print("Sending enhanced image as output to electron ...") - output_buffer.seek(0) - return send_file(output_buffer, mimetype='image/png') - - except Exception as e: - print("#############EXCEPTION####################") - print(str(e)) - return jsonify({'error': str(e)}), 400 - -# Endpoint for super resolution -@imageEnhance_bp.route('/low-light/timer_string', methods=['POST']) -def timer_string(): - output_new = { - "infertime": time_taken_model, - "outputdims": upscaled_img_dims, - } - return jsonify(output_new), 200 \ No newline at end of file diff --git a/ai-solutions/ubuntu/electron-gui/python_flask_server/SuperResolution_blueprint.py b/ai-solutions/ubuntu/electron-gui/python_flask_server/SuperResolution_blueprint.py deleted file mode 100644 index 3050232c..00000000 --- a/ai-solutions/ubuntu/electron-gui/python_flask_server/SuperResolution_blueprint.py +++ /dev/null @@ -1,217 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -from flask import Blueprint -from flask import request, jsonify, make_response, send_file, render_template -from PIL import Image -from empatches import EMPatches -import io, os -import cv2 -import numpy as np -import time -import functools -import zmq -import sys - -time_taken_model = "" -upscaled_img_dims = "" -old_runtime = "" -old_model_name = "" - -superRes_bp = Blueprint("SuperRes",__name__) - - -def pyinstaller_absolute_path(relative_path): - """ For PyInstaller, getting absolute path of resources""" - base_path = getattr( sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__))) - abs_path = os.path.join(base_path, relative_path) - return abs_path - - -def buildnetwork(socket, model_name, run_time): - - print("BUILDING NETWORK") - first_str = b"networkbuild" - - runtime_name_decoder={'DSP':b"DSP",'GPU':b"GPU", 'CPU':b"CPU"} - dlc_name_decoder={'ESRGAN':'esrgan_quantized.dlc', 'SRGAN':'srgan_quantized.dlc', 'SESR':'sesr_quantized.dlc','QuickSR_large':'quicksrnet_large_quantized.dlc','QuickSR_medium':'quicksrnet_medium_quantized.dlc','QuickSR_small':'quicksrnet_small_quantized.dlc','XLSR':'xlsr_quantized.dlc'} - dlc_path = bytes(pyinstaller_absolute_path(os.path.join("DLC","superresolution", dlc_name_decoder.get(model_name))),'utf-8') - socket.send_multipart([first_str,dlc_path, runtime_name_decoder.get(run_time)]) - - print("Messages sent for building network, waiting for reply") - message_build = socket.recv() - print(message_build) - -def upscale_patch(socket, patch, model_name, run_time, scaling_factor=4 ): - - try: - print("MODEL::::::::::::::::::::::") - runtime_name_decoder={'DSP':"--use_dsp",'GPU':"--use_gpu", 'CPU':""} - - ## PREPROC ## - if model_name=='ESRGAN': - # do nothing # - print("no preproc needed---Only resize") - - else: - patch = patch/255 - - img = np.array(patch) - img = img.astype(np.float32) - img = img.tobytes() - - socket.send_multipart([b"infer",img]) - - print("Messages Image sent, waiting for reply") - message_img_out = socket.recv() - - prediction = np.frombuffer(message_img_out, dtype=np.float32) - - prediction = prediction.reshape(512,512,3) - - socket.send(b"get_infer_time") - message_infer_time = socket.recv() - print("message_infer_time", message_infer_time.decode('UTF-8')) - elapsed_time = 0.0 - elapsed_time = float(message_infer_time.decode('UTF-8'))/1000 - - ## POSTPROC ## - if model_name=='ESRGAN': - # do nothing # - print("no postproc needed for ESRGAN") - else: - # for all other models, post proc is same # - prediction = prediction*255 - - upscaled_patch = np.clip(prediction, 0, 255).astype(np.uint8) - - except Exception as e: - print("Exception",str(e)) - - return upscaled_patch, elapsed_time - -# Serve INDEX HTML file -@superRes_bp.route('/') -def index(): - return render_template('index.html') - -# Endpoint for super resolution -@superRes_bp.route('/timer_string', methods=['POST']) -def timer_string(): - output_new = { - "infertime": time_taken_model, - "outputdims": upscaled_img_dims, - } - return jsonify(output_new), 200 - -# Endpoint for super resolution -@superRes_bp.route('/super_resolution', methods=['POST']) -def super_resolution(): - try: - - ## GETTING DATA FROM ELECTRON ## - print("Fetching image data from the POST request") - image_data = request.files['imageData'] - - model_name = request.form['model_name'] - print("MODEL NAME:",model_name) - - runtime = request.form['runtime'] - print("RUN TIME:",runtime) - - print("load as PIL IMG") - image_data = Image.open(image_data) - #image_data.save("input_img.png") - width, height = image_data.size - print(f"Received img height = {height} ; width = {width}") - - - ## MAKING CONNECTION WITH SNPE EXE ## - context = zmq.Context() - # Create a REQ (request) socket - socket = context.socket(zmq.REQ) - server_address = "tcp://localhost:5555" # Replace with your server's address - socket.connect(server_address) - - - ## BUILDING NETWORK ## - global old_model_name - global old_runtime - - if model_name != old_model_name or runtime != old_runtime: - print("___________________BUILDINGNETWORK________________") - print("old_model_name: ", old_model_name, "::model_name: ",model_name) - print("old_runtime: ", old_runtime, "::runtime: ",runtime) - buildnetwork(socket, model_name, runtime) ##build network when there is some change other than image - old_model_name = model_name - old_runtime = runtime - - - ## INFERENCING ON NETWORK ## - - - # Step 0: Set upscaling params - patch_size = 128 - overlap_factor = 0.1 - scaling_factor= 4 - - - # Step 1: Read Image and Extract 128x128 patches from the image - image_np = np.array(image_data) - - # Dividing image into small patches - emp = EMPatches() - img_patches, indices = emp.extract_patches(image_np, patchsize=patch_size, overlap=overlap_factor) - print(f"Num of patches of 128 = {len(img_patches)}") - - - # Step 2: Upscale each patch by a factor of 4 - upscaled_patches= [] - infer_time_list = [] - time_taken = 0 - for patch in img_patches: - pt, single_infer_time = upscale_patch(socket, patch, model_name, runtime) - upscaled_patches.append(pt) - time_taken = time_taken + single_infer_time ##Adding time for all patches - - print("Received upscaled patches") - - global time_taken_model - global upscaled_img_dims - time_taken_model = str(f'{time_taken*1000:.2f}')+" ms" - - - - # Step 3: Stitch back the upscaled patches into a single image - - # Calculate the upscaled stiching indices - up_img = np.zeros((image_np.shape[0]*scaling_factor, image_np.shape[1]*scaling_factor, image_np.shape[2]), np.uint8) - _, new_indices = emp.extract_patches(up_img, patchsize=patch_size*scaling_factor, overlap=overlap_factor) - - # merge with new_indices - merged_img = emp.merge_patches(upscaled_patches, new_indices, mode='min') - upscaled_img_dims = str(merged_img.shape[1]) + " x " +str(merged_img.shape[0]); - print("upscaled_img_dims: ",upscaled_img_dims) - - merged_img = Image.fromarray(np.uint8(merged_img)) - # merged_img.save("upscaled_model.png") - - # Convert the upscaled image to a binary response - output_buffer = io.BytesIO() - - merged_img.save(output_buffer, format='PNG') - - print("Sending upscaled image as output to electron ...") - output_buffer.seek(0) - return send_file(output_buffer, mimetype='image/png') - - except Exception as e: - print("#############EXCEPTION####################") - print(str(e)) - return jsonify({'error': str(e)}), 400 diff --git a/ai-solutions/ubuntu/electron-gui/python_flask_server/server.py b/ai-solutions/ubuntu/electron-gui/python_flask_server/server.py deleted file mode 100644 index 26221ce4..00000000 --- a/ai-solutions/ubuntu/electron-gui/python_flask_server/server.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -from flask import Flask, render_template, request, jsonify, make_response, send_file -from flask_cors import CORS -from PIL import Image -from empatches import EMPatches -import io, os -import cv2 -import numpy as np -import time -import functools -import zmq -import sys - -from ImageEnhancement_blueprint import imageEnhance_bp -from SuperResolution_blueprint import superRes_bp -app = Flask(__name__, - static_url_path='', - static_folder='static') -CORS(app) - -time_taken_model = "" -upscaled_img_dims = "" -old_runtime = "" -old_model_name = "" - -app.register_blueprint(imageEnhance_bp) -app.register_blueprint(superRes_bp) - - -if __name__ == '__main__': - app.run(host='0.0.0.0', port=9081, debug=True) diff --git a/ai-solutions/ubuntu/electron-gui/python_flask_server/static/assets/test_face_lite.jpg b/ai-solutions/ubuntu/electron-gui/python_flask_server/static/assets/test_face_lite.jpg deleted file mode 100644 index f0ef1300..00000000 Binary files a/ai-solutions/ubuntu/electron-gui/python_flask_server/static/assets/test_face_lite.jpg and /dev/null differ diff --git a/ai-solutions/ubuntu/electron-gui/python_flask_server/static/assets/test_face_lr.jpg b/ai-solutions/ubuntu/electron-gui/python_flask_server/static/assets/test_face_lr.jpg deleted file mode 100644 index f0ef1300..00000000 Binary files a/ai-solutions/ubuntu/electron-gui/python_flask_server/static/assets/test_face_lr.jpg and /dev/null differ diff --git a/ai-solutions/ubuntu/electron-gui/python_flask_server/templates/index.html b/ai-solutions/ubuntu/electron-gui/python_flask_server/templates/index.html deleted file mode 100644 index b0ff9c08..00000000 --- a/ai-solutions/ubuntu/electron-gui/python_flask_server/templates/index.html +++ /dev/null @@ -1,283 +0,0 @@ - - - - - Slider Example - - - - -
-
-
- - - - -
- - - - - - \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/CMake/FindGStreamer.cmake b/ai-solutions/ubuntu/gstreamer-cli/CMake/FindGStreamer.cmake deleted file mode 100644 index 3e4148c0..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/CMake/FindGStreamer.cmake +++ /dev/null @@ -1,5 +0,0 @@ -find_package(PkgConfig) -pkg_search_module(GLIB REQUIRED glib-2.0) -pkg_check_modules(GSTREAMER REQUIRED gstreamer-1.0) -pkg_check_modules(GST_APP REQUIRED gstreamer-app-1.0) -pkg_check_modules(GST_VIDEO REQUIRED gstreamer-video-1.0) \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/CMakeLists.txt b/ai-solutions/ubuntu/gstreamer-cli/CMakeLists.txt deleted file mode 100644 index 62db1972..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/CMakeLists.txt +++ /dev/null @@ -1,25 +0,0 @@ -# CMake lowest version requirement -cmake_minimum_required(VERSION 3.5.1) - -# project information -project(AI-SOLUTIONS) - -include(FindPkgConfig) -pkg_check_modules(JSONCPP REQUIRED jsoncpp) -pkg_check_modules(JSON REQUIRED json-glib-1.0) -pkg_check_modules(GFLAGS REQUIRED gflags) - -set(PROJECT_ROOT ${CMAKE_CURRENT_LIST_DIR}) -set(CMAKE_MODULE_PATH ${PROJECT_ROOT}/CMake) -set(CMAKE_CXX_STANDARD 17) - -find_package(GStreamer REQUIRED) -find_package(OpenCV REQUIRED ) - -add_subdirectory("./src") - -link_directories( - ${JSONCPP_LIBRARY_DIRS} - ${JSON_LIBRARY_DIRS} - ${GFLAGS_LIBRARY_DIRS} -) diff --git a/ai-solutions/ubuntu/gstreamer-cli/README.md b/ai-solutions/ubuntu/gstreamer-cli/README.md deleted file mode 100644 index 52ac62f5..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/README.md +++ /dev/null @@ -1,123 +0,0 @@ -## Prepare Executable - Ubuntu - -### Install dependencies -```bash -apt update -apt install build-essential cmake unzip git pkg-config -apt install libjpeg-dev libpng-dev -apt-get install libjsoncpp-dev libjson-glib-dev libgflags-dev libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libgstreamer-plugins-bad1.0-dev gstreamer1.0-plugins-base gstreamer1.0-plugins-good gstreamer1.0-plugins-bad gstreamer1.0-plugins-ugly gstreamer1.0-libav gstreamer1.0-tools gstreamer1.0-x gstreamer1.0-alsa gstreamer1.0-gl gstreamer1.0-gtk3 gstreamer1.0-qt5 gstreamer1.0-pulseaudio -apt install libavcodec-dev libavformat-dev libswscale-dev libv4l-dev -apt install libxvidcore-dev libx264-dev -apt install libgtk-3-dev -apt install libatlas-base-dev gfortran -``` - -### Installing OpenCV 4.5.5 -```bash -adb shell -wget https://codeload.github.com/opencv/opencv/tar.gz/refs/tags/4.5.5 -O opencv-4.5.5.tar.gz -tar -zxvf opencv-4.5.5.tar.gz -cd ./opencv-4.5.5 -mkdir build && cd build -cmake -D CMAKE_BUILD_TYPE=RELEASE \ - -D CMAKE_INSTALL_PREFIX=/usr/local/opencv4.5 \ - -D OPENCV_ENABLE_NONFREE=ON \ - -D OPENCV_GENERATE_PKGCONFIG=YES \ - -D WITH_QT=ON \ - -D WITH_OPENGL=ON \ - -D BUILD_EXAMPLES=OFF \ - -D INSTALL_PYTHON_EXAMPLES=OFF \ - .. -make -j8 -make install -``` - -### Setup environment -```bash -cp -r /include/SNPE/Dl* /include/ -cp -r /include/SNPE/DiagLog /include/ -cp -r /include/SNPE/Wrapper.hpp /include/ -cp -r /include/SNPE/SNPE/ /include/ -``` - -```bash -adb shell -cd -cp -r lib/aarch64-ubuntu-gcc7.5/* /usr/lib/ -cp bin/aarch64-ubuntu-gcc7.5/snpe-net-run /usr/bin/ -cp -r lib/hexagon-v66/unsigned/lib* /usr/lib/rfsa/adsp/ -chmod +x /usr/bin/snpe-net-run -snpe-net-run --version -``` -Expected output: SNPE v2.12.0.230626174329_59328 - -### Building the application -```bash -adb shell -cd -mkdir build -cd build -cmake .. -make -j8 -``` - -### Running the application -```bash -export XDG_RUNTIME_DIR=/run/user/root -cd build -``` -#### To run inference on input image -NOTE: Make sure "input-config-name":"image" in data/config.json -```bash -./out/ai-solutions -c ../data/config.json -i Sample1.jpg -o output.jpg -``` -#### To run inference on camera stream -NOTE: Make sure "input-config-name":"camera" in data/config.json -```bash -./out/ai-solutions -c ../data/config.json -``` - -#### Details on Input arguments: - -##### Sample config.json -model-config: -```json -"model-configs":[ - - "model-name":"QSrnet-medium", -> model name which is used while enabling solution - "model-type":"superresolution", -> To specify the use case such superresolution or detection or segmentation etc.. - "model-path":"../models/quicksrnet_medium_quantized.dlc", -> Path at which model is located on target - "runtime":"DSP", -> Select Runtime either CPU or DSP - "input-layers":[ -> Input layer of the model - "t.1" - ], - "output-layers":[ - "depth_to_space#1" -> Output layer of the model - ], - "output-tensors":[ - "65" -> Output node for post processing - ] -] -``` - -solution-config: -```json -"solution-configs":[ - { - "solution-name":"AI-Solutions", -> To identify Solution - "model-name":"mobilenet-ssd", -> Specify model name to be executed - "input-config-name":"camera", -> To read input from camera stream - "Enable":1, -> Enable specific solution - "output-type":"wayland" -> To display output on monitor - }, - { - "solution-name":"AI-Solutions", - "model-name":"mobilenet-ssd", - "input-config-name":"image", -> To read input from image - "Enable":0, - "output-type":"wayland" - } -] - - -``` \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/data/config.json b/ai-solutions/ubuntu/gstreamer-cli/data/config.json deleted file mode 100644 index 6839fe0a..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/data/config.json +++ /dev/null @@ -1,177 +0,0 @@ -{ - "input-configs":[ - { - "input-config-name":"camera", - "stream-type":"camera", - "stream-width":1280, - "stream-height":720, - "SkipFrame":1, - "fps-n":30, - "fps-d":1 - }, - { - "input-config-name":"image", - "stream-type":"image" - } - ], - "model-configs":[ - { - "model-name":"ssd-mobilenet-v2", - "model-type":"detection", - "model-path":"../models/ssd_mobilenetV2_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.3, - "conf-threshold":0.7, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "Softmax_350", - "Concat_397" - ], - "output-tensors":[ - "935", - "986" - ], - "global-threshold":0.2 - }, - { - "model-name":"yolo-nas", - "model-type":"detection", - "model-path":"../models/yolo_nas_s_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/heads/Sigmoid", - "/heads/Mul" - ], - "output-tensors":[ - "877", - "885" - ], - "global-threshold":0.2 - }, - { - "model-name":"yolo-x", - "model-type":"detection", - "model-path":"../models/yolox_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.3, - "input-layers":[ - "images" - ], - "output-layers":[ - "Transpose_570" - ], - "output-tensors":[ - "output" - ], - "global-threshold":0.2 - }, - - { - "model-name":"DeepLabv3Plus-resnet++", - "model-type":"segmentation", - "model-path":"../models/DeepLabv3Plus_resnet101_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "Resize_284" - ], - "output-tensors":[ - "1089" - ], - "global-threshold":0.2 - }, - { - "model-name":"DeepLabv3-resnet101", - "model-type":"segmentation", - "model-path":"../models/deeplabv3_resnet101_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "1089" - ], - "global-threshold":0.2 - }, - { - "model-name":"DeepLabv3-resnet50", - "model-type":"segmentation", - "model-path":"../models/deeplabv3_resnet50_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "613" - ], - "global-threshold":0.2 - }, - { - "model-name":"FCN_resnet101", - "model-type":"segmentation", - "model-path":"../models/fcn_resnet101_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "1018" - ], - "global-threshold":0.2 - }, - { - "model-name":"FCN_resnet50", - "model-type":"segmentation", - "model-path":"../models/fcn_resnet50_quantized.dlc", - "runtime":"DSP", - "nms-threshold":0.4, - "conf-threshold":0.4, - "input-layers":[ - "input.1" - ], - "output-layers":[ - "/Resize_1" - ], - "output-tensors":[ - "542" - ], - "global-threshold":0.2 - } - ], - "solution-configs":[ - { - "solution-name":"AI-Solutions", - "model-name":"yolo-nas", - "input-config-name":"camera", - "Enable":1, - "output-type":"wayland" - } - ] -} diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/Configuration.h b/ai-solutions/ubuntu/gstreamer-cli/inc/Configuration.h deleted file mode 100644 index 6cce6ab6..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/Configuration.h +++ /dev/null @@ -1,118 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef CONFIGURATION_H_ -#define CONFIGURATION_H_ - -#include -#include -#include -#include "Utils.h" - -using namespace cv; -using namespace std; - -const string input_configs = "input-configs"; -const string model_configs = "model-configs"; -const string solution_configs = "solution-configs"; - -// Input Configs; -const string pipeline_input_config = "input-config-name"; -const string stream_type = "stream-type"; -const string camera_url = "camera-url"; -const string skipframe = "SkipFrame"; - -// Model Configs -const string model_config_name = "model-name"; -const string model_type = "model-type"; -const string model_path = "model-path"; -const string runtime = "runtime"; -const string nms_threshold = "nms-threshold"; -const string conf_threshold = "conf-threshold"; -const string input_layers = "input-layers"; -const string output_layers = "output-layers"; -const string output_tensors = "output-tensors"; - -// Solution Configs -const string solution_name = "solution-name"; -const string model_name = "model-name"; -const string Enable = "Enable"; -const string solution_input_config = "input-config-name"; -const string output_type = "output-type"; - -class ObjectDetectionSnpeConfig { - public: - string model_name; - string model_type; - std::string model_path; - runtime_t runtime; - float nmsThresh; - float confThresh; - std::vector labels; - std::vector inputLayers; - std::vector outputLayers; - std::vector outputTensors; -}; - -class InputConfiguration{ - public: - int SkipFrame; - int StreamNumber=0; - string StreamType; - string Url; - string ConfigName; -}; - -class SolutionConfiguration { - public: - string solution_name; - string model_name; - string input_config_name; - bool Enable; - string output_type; - std::shared_ptr input_config; - std::shared_ptr model_config; -}; - -class DebugConfiguration -{ - public: - bool DumpData=false; - string Directory; -}; - -class Configuration -{ -public: - static Configuration &getInstance() - { - static Configuration instance; - return instance; - } - -private: - Configuration() {} -public: - Configuration(Configuration const &) = delete; - void operator=(Configuration const &) = delete; - - DebugConfiguration Debug; - ObjectDetectionSnpeConfig Config; - SolutionConfiguration Sol_Config; - std::unordered_map> inputconfigs; - std::unordered_map> modelsconfig; - std::unordered_map> solutionsconfig; - - void LoadConfiguration(string file); - int LoadInputConfig(Json::Value& input); - int LoadModelsConfig(Json::Value& models); - int LoadSolutionsConfig(Json::Value& solutions); -}; - -#endif diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/DecodeQueue.h b/ai-solutions/ubuntu/gstreamer-cli/inc/DecodeQueue.h deleted file mode 100644 index 8ffdb0fa..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/DecodeQueue.h +++ /dev/null @@ -1,40 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef DECODE_QUEUE_H -#define DECODE_QUEUE_H - -#include "Detection.h" -#include -#include -#include -#include - -static const int DEFAULT_MAX_QUEUE_SIZE = 64; - -class DecodeQueue -{ -public: - DecodeQueue(uint32_t maxSize = DEFAULT_MAX_QUEUE_SIZE) : max_size_(maxSize), is_stoped_(false) {} - ~DecodeQueue() {} - int Dequeue(shared_ptr& item, unsigned int timeOutMs); - int Enqueue(const shared_ptr& item, bool isWait); - void Unlock(); - std::list> GetRemainItems(); - int IsEmpty(); -private: - std::list> queue_; - std::mutex mutex_; - std::condition_variable empty_cond_; - std::condition_variable full_cond_; - uint32_t max_size_; - bool is_stoped_; -}; - -#endif \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/Detection.h b/ai-solutions/ubuntu/gstreamer-cli/inc/Detection.h deleted file mode 100644 index de81ba1e..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/Detection.h +++ /dev/null @@ -1,62 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef DETECTION_H -#define DETECTION_H - -#include -#include -#include -#include - -using namespace std; -using namespace cv; - -struct ObjectData { - // Bounding box information: top-left coordinate and width, height - cv::Rect bbox; - // Confidence of this bounding box - float confidence = -1.0f; - // The label of this Bounding box - int label = -1; - // Time cost of detecting this frame - size_t time_cost = 0; - uint32_t Width=512; - uint32_t Height=512; - cv::Mat *output=NULL; - -}; - -struct Detection -{ - cv::Rect bbox; - float score; - int label; -}; - -struct DetectionDetail -{ - vector Result; - string ModelName; -}; - -struct DetectionItem -{ - uint32_t Width; - uint32_t Height; - uint32_t FrameId; - size_t Size; - string StreamName; - int StreamId; - shared_ptr ImageBuffer; -// vector Results; - ObjectData Results; -}; - -#endif diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/DetectionSnpe.h b/ai-solutions/ubuntu/gstreamer-cli/inc/DetectionSnpe.h deleted file mode 100644 index 566f7048..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/DetectionSnpe.h +++ /dev/null @@ -1,52 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __Detection_IMPL_H__ -#define __Detection_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" - -namespace detectionsnpe -{ - class DETECTIONSnpe - { - public: - DETECTIONSnpe(); - ~DETECTIONSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool SetScoreThresh(const float& conf_thresh, const float& nms_thresh); - bool IsInitialized() const; - - private: - bool m_isInit; - float m_nmsThresh; - float m_confThresh; - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess( cv::Mat image,cv::Mat& output_image,string model_name); - float computeIoU(const cv::Rect& a, const cv::Rect& b); - std::vector doNMS(std::vector winList, const float& nms_thresh); - }; - -} // namespace detection - -#endif // __DETECTION_IMPL_H__ diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/LowlightSnpe.h b/ai-solutions/ubuntu/gstreamer-cli/inc/LowlightSnpe.h deleted file mode 100644 index e6ee6b75..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/LowlightSnpe.h +++ /dev/null @@ -1,46 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __LOWLIGHT_IMPL_H__ -#define __LOWLIGHT_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" - -namespace lowlightsnpe -{ - class LOWLIGHTSnpe - { - public: - LOWLIGHTSnpe(); - ~LOWLIGHTSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool IsInitialized() const; - - private: - bool m_isInit; - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess(cv::Mat& output_image,string model_name); - }; - -} // namespace lowlightsnpe - -#endif // __LOWLIGHT_IMPL_H__ diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/ModelInference.h b/ai-solutions/ubuntu/gstreamer-cli/inc/ModelInference.h deleted file mode 100644 index de94f8c6..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/ModelInference.h +++ /dev/null @@ -1,35 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef MODEL_INFERENCE_H_ -#define MODEL_INFERENCE_H_ -#include "DecodeQueue.h" -#include -#include -#include -#include -#include -#include "Configuration.h" - -class ModelInference{ -public: - ModelInference(); - ModelInference(const string model_name); - int Initialization(const ObjectDetectionSnpeConfig& config); - bool IsInitialized(); - bool UnInitialization(); - ~ModelInference(); - int Inference(cv::Mat input,cv::Mat& output_image,string model_name); -private: - void *Impl = nullptr; - enum Models{DETECTION,SEGMENTATION}; - int Model; -}; - -#endif \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/SNPERuntime.h b/ai-solutions/ubuntu/gstreamer-cli/inc/SNPERuntime.h deleted file mode 100644 index 854ae9bb..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/SNPERuntime.h +++ /dev/null @@ -1,79 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef _SNPERUNTIME_H_ -#define _SNPERUNTIME_H_ - -#include -#include -#include -#include -#include - -#include "SNPE/SNPE.hpp" -#include "SNPE/SNPEFactory.hpp" -#include "SNPE/SNPEBuilder.hpp" -#include "DlSystem/DlEnums.hpp" -#include "DlSystem/DlError.hpp" -#include "DlSystem/ITensorFactory.hpp" -#include "DlSystem/IUserBufferFactory.hpp" -#include "DlSystem/TensorShape.hpp" -#include "DlContainer/IDlContainer.hpp" - -#include "Utils.h" - -namespace snperuntime { - -class SNPERuntime { -public: - SNPERuntime(); - - bool Initialize(const std::string& model_path, const runtime_t runtime); - bool Deinitialize(); - bool SetOutputLayers(std::vector& outputLayers); - - std::vector GetInputShape(const std::string& name); - std::vector GetOutputShape(const std::string& name); - - float* GetInputTensor(const std::string& name); - float* GetOutputTensor(const std::string& name); - - bool IsInit() { - return m_isInit; - } - - bool execute(); - -private: - bool m_isInit = false; - - std::unique_ptr m_container; - std::unique_ptr m_snpe; - zdl::DlSystem::Runtime_t m_runtime; - zdl::DlSystem::StringList m_outputLayers; - - std::map > m_inputShapes; - std::map > m_outputShapes; - - std::vector > m_inputUserBuffers; - std::vector > m_outputUserBuffers; - zdl::DlSystem::UserBufferMap m_inputUserBufferMap; - zdl::DlSystem::UserBufferMap m_outputUserBufferMap; - zdl::DlSystem::PerformanceProfile_t m_profile; - - void setTargetRuntime(const runtime_t runtime); - void setPerformanceProfile(const performance_t perfprofile); - - std::unordered_map> m_applicationInputBuffers; - std::unordered_map> m_applicationOutputBuffers; -}; - -} - -#endif // _SNPERUNTIME_H_ \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/SegmentationSnpe.h b/ai-solutions/ubuntu/gstreamer-cli/inc/SegmentationSnpe.h deleted file mode 100644 index 022dd918..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/SegmentationSnpe.h +++ /dev/null @@ -1,52 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __Segmentation_IMPL_H__ -#define __Segmentation_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" - -namespace segmentationsnpe -{ - - class SEGMENTATIONSnpe - { - public: - SEGMENTATIONSnpe(); - ~SEGMENTATIONSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool SetScoreThresh(const float& conf_thresh, const float& nms_thresh); - bool IsInitialized() const; - - private: - bool m_isInit; - float m_nmsThresh; - float m_confThresh; - - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess( cv::Mat image,cv::Mat& output_image,string model_name); - }; - -} // namespace segmentation - -#endif // __SEGMENTATION_IMPL_H__ diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/StreamDecode.h b/ai-solutions/ubuntu/gstreamer-cli/inc/StreamDecode.h deleted file mode 100644 index ff8c03c4..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/StreamDecode.h +++ /dev/null @@ -1,91 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef INC_STREAM_DECODE_H -#define INC_STREAM_DECODE_H - -#include "DecodeQueue.h" -#include "Configuration.h" -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -using namespace std; -/* Structure to contain all our information, so we can pass it to callbacks */ -typedef struct _PipelineData -{ - shared_ptr pipeline; - shared_ptr source; - shared_ptr main_capsfilter;; - shared_ptr videoDepay; - shared_ptr videoParse; - shared_ptr h264dec; - shared_ptr transform; - shared_ptr sink; -} PipelineData; - -typedef struct _FrameProcessData -{ - uint32_t frameId; - int interval = 25; - shared_ptr blockQueue; - string streamName; - int StreamId; -} FrameProcessData; - - -class StreamDecode -{ -public: - StreamDecode(std::string streamtype, std::string rtspUrl); - ~StreamDecode(); - int Initialization(shared_ptr &queue); - void UnInitialization(); - void DecodeAndInference(); - void SetSkipFrame(int interval); - void SetStreamName(string name); - void SetStreamId(int uuid); - - static void OnPadAdd(GstElement *element, GstPad *pad, gpointer data); - static GstFlowReturn OnAppsinkNewSample(GstElement *appsink, gpointer user_data); - void Stop(); -protected: - static void UnRefElement(GstElement *elem); - -private: - PipelineData data_; - shared_ptr bus_ = nullptr; - bool terminate_ = FALSE; - std::string StreamType; - FrameProcessData *frameProcess_ = nullptr; - int gst_camera_pipeline_init(); -}; - -class CaptureController -{ - public: - void CreateCapture(shared_ptr &pipeline_config, shared_ptr &gDecodeQueue); - void EndOfStream(int streamId); - void StopAll(); - void InterruptClose(); - - private: - map> decoder; - vector threads; -}; - -#endif \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/StreamEncode.h b/ai-solutions/ubuntu/gstreamer-cli/inc/StreamEncode.h deleted file mode 100644 index 3705ed11..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/StreamEncode.h +++ /dev/null @@ -1,74 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef INC_STREAM_ENCODE_H -#define INC_STREAM_ENCODE_H - -#include "DecodeQueue.h" -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "Utils.h" -#include "Configuration.h" - -using namespace std; -/* Structure to contain all our information, so we can pass it to callbacks */ -typedef struct _EncodePipeline -{ - shared_ptr pipeline; - shared_ptr appsrc; - shared_ptr vidconv; - shared_ptr vtransform; - shared_ptr capsfilter; - shared_ptr videoscale; - shared_ptr x264enc; - shared_ptr h264parse; - shared_ptr qtmux; - shared_ptr waylandsink; - shared_ptr videoconvert; -} EncodePipeline; - -class StreamEncode{ -public: - StreamEncode()=default; - ~StreamEncode()=default; - int Initialization(string output_type); - void UnInitialization(); - void PushData(uint8_t *data, int len); - int Loop(); - void Stop(); -private: - EncodePipeline data; - shared_ptr bus=nullptr; - bool terminate=false; - string outputFile; - int gst_wayland_pipeline_init(string output_type); -}; - -class EncodeController -{ - public: - void CreateEncoder(std::shared_ptr sol_conf); - void EncodeFrame(int streamId, uint8_t *pushData, int len); - void EndOfStream(int streamId); - void Stop(); - void InterruptClose(); - private: - map> encoders; - vector threads; -}; -#endif \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/SuperresolutionSnpe.h b/ai-solutions/ubuntu/gstreamer-cli/inc/SuperresolutionSnpe.h deleted file mode 100644 index e90e91d9..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/SuperresolutionSnpe.h +++ /dev/null @@ -1,46 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef __SUPERRES_IMPL_H__ -#define __SUPERRES_IMPL_H__ - -#include -#include -#include -#include - -#include "SNPERuntime.h" -#include "ModelInference.h" -#include "Configuration.h" - -namespace superressnpe -{ - class SUPERRESSnpe - { - public: - SUPERRESSnpe(); - ~SUPERRESSnpe(); - bool Initialize(const ObjectDetectionSnpeConfig& config); - bool DeInitialize(); - bool Detect(cv::Mat input,cv::Mat& output_image,string model_name); - bool IsInitialized() const; - - private: - bool m_isInit; - std::unique_ptr m_snperuntime; - std::vector m_inputLayers; - std::vector m_outputLayers; - std::vector m_outputTensors; - - bool PreProcessInput(const cv::Mat& frame,string model_name); - bool PostProcess(cv::Mat& output_image,string model_name); - }; -} // namespace superressnpe - -#endif // __SUPERRES_IMPL_H__ diff --git a/ai-solutions/ubuntu/gstreamer-cli/inc/Utils.h b/ai-solutions/ubuntu/gstreamer-cli/inc/Utils.h deleted file mode 100644 index 427496c4..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/inc/Utils.h +++ /dev/null @@ -1,98 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#ifndef UTILS_H_ -#define UTILS_H_ - -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace std; -using namespace cv; - -using chrono::high_resolution_clock; -using chrono::duration_cast; -using chrono::duration; -using chrono::milliseconds; - -#define QS_SUCCESS 0 -#define QS_ERROR -1 - -#define PRINT(fmt, ...) { \ - printf(fmt, ##__VA_ARGS__); \ -} - -#define LOG(level, fmt, ...) { \ - PRINT("[%s] - %s: " fmt, #level, __func__, ##__VA_ARGS__); \ -} - -//#define DEBUG -#ifdef DEBUG - #define LOG_DEBUG(fmt, ...) LOG(DEBUG, fmt, ##__VA_ARGS__) -#else - #define LOG_DEBUG(fmt, ...) ((void)0) -#endif - -#define LOG_INFO(fmt, ...) { \ - LOG(INFO, fmt, ##__VA_ARGS__); \ -} - -#define LOG_WARN(fmt, ...) { \ - LOG(WARN, fmt, ##__VA_ARGS__); \ -} - -#define LOG_ERROR(fmt, ...) { \ - LOG(ERROR, fmt, ##__VA_ARGS__); \ -} - -#define IMAGE_CHAN_SIZE_F32(width, height) ((width) * (height)*4) -#define RGB_IMAGE_SIZE_F32(width, height) ((width) * (height)*3 * 4) - -// Inference hardware runtime. -typedef enum runtime { - CPU = 0, - DSP -}runtime_t; - -typedef enum PerformanceProfile { - DEFAULT = 0, - /// Run in a balanced mode. - BALANCED = 0, - /// Run in high performance mode - HIGH_PERFORMANCE = 1, - /// Run in a power sensitive mode, at the expense of performance. - POWER_SAVER = 2, - /// Use system settings. SNPE makes no calls to any performance related APIs. - SYSTEM_SETTINGS = 3, - /// Run in sustained high performance mode - SUSTAINED_HIGH_PERFORMANCE = 4, - /// Run in burst mode - BURST = 5, - /// Run in lower clock than POWER_SAVER, at the expense of performance. - LOW_POWER_SAVER = 6, - /// Run in higher clock and provides better performance than POWER_SAVER. - HIGH_POWER_SAVER = 7, - /// Run in lower balanced mode - LOW_BALANCED = 8, -}performance_t; - -template -void ClearVector(std::vector& vt) -{ - std::vector vtTemp; - vtTemp.swap(vt); -} - -#endif \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/models/README.md b/ai-solutions/ubuntu/gstreamer-cli/models/README.md deleted file mode 100644 index 22397b6f..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/models/README.md +++ /dev/null @@ -1 +0,0 @@ -Place dlc files in this folder diff --git a/ai-solutions/ubuntu/gstreamer-cli/src/CMakeLists.txt b/ai-solutions/ubuntu/gstreamer-cli/src/CMakeLists.txt deleted file mode 100644 index a0d4b817..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/src/CMakeLists.txt +++ /dev/null @@ -1,39 +0,0 @@ -cmake_minimum_required(VERSION 3.5.1) - - -# Compile options -add_compile_options(-std=c++11) - -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "../out") -set(CMAKE_CXX_FLAGS_DEBUG "-fPIC -O0 -g -Wall") -set(CMAKE_CXX_FLAGS_RELEASE "-fPIC -O2 -Wall") - -message(STATUS "source file path" ${PROJECT_SRC_ROOT}) - -file(GLOB_RECURSE SRC_FILE - ../src/*.cpp -) - -set(SOURCE_FILE - ${SRC_FILE} -) - -add_executable(ai-solutions ${SRC_FILE}) -target_compile_options(ai-solutions PUBLIC -fPIC -O0 -g -Wall -Wnon-virtual-dtor) - -# Header path -include_directories( - "../inc" - "/usr/include/glib-2.0" - "/usr/lib/aarch64-linux-gnu/glib-2.0/include" - "/usr/include/gstreamer-1.0" - "/usr/local/include/opencv4" - ${OpenCV_INCLUDE_DIRS} - ${JSON_INCLUDE_DIRS} - ${JSONCPP_INCLUDE_DIRS} -) - -message(STATUS "JSON file path" ${JSON_INCLUDE_DIRS}) -message(STATUS "JSONCPP file path" ${JSONCPP_INCLUDE_DIRS}) - -target_link_libraries(ai-solutions PUBLIC pthread dl ${OpenCV_LIBS} ${GST_APP_LIBRARIES} ${JSON_LIBRARIES} jsoncpp SNPE jsoncpp) diff --git a/ai-solutions/ubuntu/gstreamer-cli/src/Configuration.cpp b/ai-solutions/ubuntu/gstreamer-cli/src/Configuration.cpp deleted file mode 100644 index 60e711dc..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/src/Configuration.cpp +++ /dev/null @@ -1,152 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "Configuration.h" -#include "Utils.h" -#include - -/** @brief To convert runtime from string to int - * @param device which contains runtime as a string - * @return int value corresponding to runtime -*/ - -static runtime_t device2runtime(std::string&& device) -{ - /** - * To convert all characters to lower case - */ - std::transform(device.begin(), device.end(), device.begin(), - [](unsigned char ch){ return tolower(ch); }); - - if (0 == device.compare("dsp")) - { - return DSP; - } - else - { - return CPU; - } -} - -/** @brief To parse Input config from config file - * @param input contains input config array -*/ -int Configuration::LoadInputConfig(Json::Value& input) -{ - if (input.isArray()) - { - int size = input.size(); - for (int i = 0; i < size; ++i) - { - std::shared_ptr inputconfig = std::shared_ptr(new InputConfiguration()); - inputconfig->ConfigName = input[i][pipeline_input_config].asString(); - inputconfig->StreamType = input[i][stream_type].asString(); - inputconfig->Url = input[i][camera_url].asString(); - inputconfig->SkipFrame = input[i][skipframe].asInt(); - inputconfigs[inputconfig->ConfigName] = inputconfig; - } - } - LOG_INFO("Input streams size=%u \n", input.size()); - return 0; -} - -/** @brief To parse model config - * @param models contains model config array - */ - -int Configuration::LoadModelsConfig(Json::Value& models) -{ - std::string line; - if (models.isArray()) - { - int size = models.size(); - for (int i = 0; i < size; ++i) - { - std::shared_ptr modelconfig = - std::shared_ptr(new ObjectDetectionSnpeConfig()); - modelconfig->model_name = models[i][model_config_name].asString(); - modelconfig->model_type = models[i][model_type].asString(); - modelconfig->model_path = models[i][model_path].asString(); - modelconfig->runtime = device2runtime(models[i][runtime].asString()); - modelconfig->nmsThresh = models[i][nms_threshold].asFloat(); - modelconfig->confThresh = models[i][conf_threshold].asFloat(); - - /** - * To access input layer names from config - */ - if (models[i]["input-layers"].isArray()) { - int num = models[i]["input-layers"].size(); - for (int j= 0; j < num; j++) { - modelconfig->inputLayers.push_back(models[i]["input-layers"][j].asString()); - } - } - /** - * To access output layer names from config - */ - if (models[i][output_layers].isArray()) { - int num = models[i]["output-layers"].size(); - for (int j = 0; j < num; j++) { - modelconfig->outputLayers.push_back(models[i]["output-layers"][j].asString()); - } - } - /** - * To access output tensor names from config - */ - if (models[i][output_tensors].isArray()) { - int num = models[i]["output-tensors"].size(); - for (int j = 0; j < num; j++) { - modelconfig->outputTensors.push_back(models[i]["output-tensors"][j].asString()); - } - } - - modelsconfig[modelconfig->model_name] = modelconfig; - } - } - - LOG_INFO("modelsconfig size = %lu \n", modelsconfig.size()); - return 0; -} - -/** @brief To parse solution config - * @param solutions contains solution array - * -*/ - -int Configuration::LoadSolutionsConfig(Json::Value& solutions) { - if (solutions.isArray()) { - int size = solutions.size(); - for (int i = 0; i < size; ++i) { - std::shared_ptr solutionconfig = std::shared_ptr(new SolutionConfiguration()); - solutionconfig->solution_name = solutions[i][solution_name].asString(); - solutionconfig->model_name = solutions[i][model_name].asString(); - solutionconfig->Enable = solutions[i][Enable].asBool(); - solutionconfig->input_config_name = solutions[i][solution_input_config].asString(); - solutionconfig->output_type = solutions[i][output_type].asString(); - solutionsconfig[i] = solutionconfig; - } - } - LOG_DEBUG("Solutions size %lu", solutionsconfig.size() ); - return 0; -} - - -/** @brief To parse config file - * @param configFilePath contains json file passed as an argument -*/ -void Configuration::LoadConfiguration(string configFilePath) -{ - Json::Reader reader; - Json::Value root; - std::ifstream in(configFilePath, std::ios::binary); - reader.parse(in, root); - - LoadInputConfig(root[input_configs]); - LoadModelsConfig(root[model_configs]); - LoadSolutionsConfig(root[solution_configs]); -} \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/src/DecodeQueue.cpp b/ai-solutions/ubuntu/gstreamer-cli/src/DecodeQueue.cpp deleted file mode 100644 index c6876372..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/src/DecodeQueue.cpp +++ /dev/null @@ -1,118 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "DecodeQueue.h" - -/** @brief To access frames from source - * @param item to store the frame - * @param timeOutMs to wait for frame till timeout - * @return 0 if success -*/ - -int DecodeQueue::Dequeue(shared_ptr &item, unsigned int timeOutMs) -{ - std::unique_lock lock(mutex_); - auto realTime = std::chrono::milliseconds(timeOutMs); - - while (queue_.empty() && !is_stoped_) - { - empty_cond_.wait_for(lock, realTime); - } - /** - * To check if pipeline is stopped - */ - if (is_stoped_) - { - return 1; - } - /** - * To check if queue is emtpy - */ - else if (queue_.empty()) - { - return 2; - } - else - { - item = queue_.front(); - queue_.pop_front(); - } - - full_cond_.notify_one(); - - return 0; -} - -/** @brief To enqueue the frames to display or save - * @param item to push into the queue - * @param isWait to wait for frame till timeout -*/ - -int DecodeQueue::Enqueue(const shared_ptr &item, bool isWait) -{ - std::unique_lock lock(mutex_); - while (queue_.size() >= max_size_ && isWait && !is_stoped_) - { - full_cond_.wait(lock); - } - /** - * To check if pipeline is stopped - */ - if (is_stoped_) - { - return 1; - } - /** - * To check if queue_ size is greater than max size - */ - else if (queue_.size() >= max_size_) - { - return 3; - } - queue_.push_back(item); - empty_cond_.notify_one(); - return 0; -} - -/** @brief To stop the pipeline -*/ - -void DecodeQueue::Unlock() -{ - { - std::unique_lock lock(mutex_); - is_stoped_ = true; - } - - full_cond_.notify_all(); - empty_cond_.notify_all(); -} - -/** @brief To inference the remaining items -*/ -std::list> DecodeQueue::GetRemainItems() -{ - std::unique_lock lock(mutex_); - /** - * To check if pipeline is stopped - */ - if (!is_stoped_) - { - return std::list>(); - } - - return queue_; -} - -/** @brief To check if queue is empty -*/ -int DecodeQueue::IsEmpty() -{ - return queue_.empty(); -} \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/src/DetectionSnpe.cpp b/ai-solutions/ubuntu/gstreamer-cli/src/DetectionSnpe.cpp deleted file mode 100644 index 63ff581f..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/src/DetectionSnpe.cpp +++ /dev/null @@ -1,544 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include -#include "Configuration.h" -#include "DetectionSnpe.h" - -namespace detectionsnpe -{ - - /** @brief Constructor - */ - DETECTIONSnpe::DETECTIONSnpe() : m_isInit(false),m_snperuntime(nullptr) - { - - } - - /** @brief Destructor - */ - DETECTIONSnpe::~DETECTIONSnpe() - { - DeInitialize(); - } - - /** @brief To read model config and set output layers - * @param config model config parameters - * @return true if success;false otherwise - */ - bool DETECTIONSnpe::Initialize(const ObjectDetectionSnpeConfig& config) - { - m_snperuntime = std::move(std::unique_ptr(new snperuntime::SNPERuntime())); - - m_inputLayers = config.inputLayers; - m_outputLayers = config.outputLayers; - m_outputTensors = config.outputTensors; - m_nmsThresh = config.nmsThresh; - m_confThresh = config.confThresh; - - /** - * To set output layer from model config - */ - m_snperuntime->SetOutputLayers(m_outputLayers); - /** - * To initialize snperuntime - */ - if (!m_snperuntime->Initialize(config.model_path, config.runtime)) - { - LOG_ERROR("Failed to Initialize snpe instance.\n"); - return false; - } - - m_isInit = true; - return true; - } - - /** @brief To deallocate buffers and reset - */ - bool DETECTIONSnpe::DeInitialize() - { - if (m_isInit) - { - m_snperuntime->Deinitialize(); - m_snperuntime.reset(nullptr); - } - m_isInit = false; - return true; - } - - bool DETECTIONSnpe::SetScoreThresh(const float& conf_thresh, const float& nms_thresh = 0.5) - { - this->m_nmsThresh = nms_thresh; - this->m_confThresh = conf_thresh; - return true; - } - - bool DETECTIONSnpe::IsInitialized() const - { - return m_isInit; - } - - /** @brief To preprocess input image - * @param input_image Input image for inference - * @return true if succuess; false otherwise - */ - bool DETECTIONSnpe::PreProcessInput(const cv::Mat& input_image,string model_name) - { - if (input_image.empty()) { - LOG_ERROR("Invalid image!\n"); - return false; - } - - auto inputShape = m_snperuntime->GetInputShape(m_inputLayers[0]); - size_t model_h = inputShape[1]; - size_t model_w = inputShape[2]; - - if (m_snperuntime->GetInputTensor(m_inputLayers[0]) == nullptr) - { - LOG_ERROR("Empty input tensor\n"); - return false; - } - - cv::Mat image = cv::Mat(model_h,model_w, CV_32FC3, Scalar(0.)); - cv::resize(input_image,image,cv::Size(model_h,model_w)); - cv::Mat input(model_h, model_w, CV_32FC3, m_snperuntime->GetInputTensor(m_inputLayers[0])); - - if(model_name.compare("ssd-mobilenet-v2") == 0 ) - { - cv::cvtColor(image, image, cv::COLOR_BGR2RGB); - image.convertTo(image, CV_32S); - subtract(image,Scalar(123.0, 117.0, 104.0),image); - image.convertTo(input, CV_32FC3, 1.0); - } - else if(model_name.compare("yolo-nas") == 0) - { - image.convertTo(input, CV_32FC3, 1/255.0); - } - else if(model_name.compare("yolo-x") == 0) - { - image.convertTo(input, CV_32FC3, 1.0); - } - - return true; - } - - /** @brief To preprocess,execute and postprocess - * @param input_image Input image for inference - * @param output_image Inference output image - * @param model_name To identify model for specific post-processing - * @return true if success; false otherwise - */ - bool DETECTIONSnpe::Detect(cv::Mat image,cv::Mat& output_image,string model_name) - { - /** - * Preprocessing image - */ - if(PreProcessInput(image, model_name) != true) - { - LOG_ERROR("PreProcess failed\n"); - return false; - } - /** - * Inferencing model on target - */ - if (!m_snperuntime->execute()) { - LOG_ERROR("SNPERuntime execute failed."); - return false; - } - /** - * Postprocessing to extract bounding boxes - */ - if(PostProcess(image,output_image,model_name) != true) - { - LOG_ERROR("PostProcess failed\n"); - return false; - } - return true; - } - - float DETECTIONSnpe::computeIoU(const cv::Rect& a, const cv::Rect& b) - { - float xOverlap = std::max( - 0., - std::min(a.x + a.width, b.x + b.width) - std::max(a.x, b.x) + 1.); - float yOverlap = std::max( - 0., - std::min(a.y + a.height, b.y + b.height) - std::max(a.y, b.y) + 1.); - float intersection = xOverlap * yOverlap; - float unio = - (a.width + 1.) * (a.height + 1.) + - (b.width + 1.) * (b.height + 1.) - intersection; - return intersection / unio; - } - - std::vector DETECTIONSnpe::doNMS(std::vector winList, const float& nms_thresh) - { - if (winList.empty()) { - return winList; - } - - std::sort(winList.begin(), winList.end(), [] (const ObjectData& left, const ObjectData& right) { - if (left.confidence > right.confidence) { - return true; - } else { - return false; - } - }); - - std::vector flag(winList.size(), false); - for (unsigned int i = 0; i < winList.size(); i++) { - if (flag[i]) { - continue; - } - - for (unsigned int j = i + 1; j < winList.size(); j++) { - if (computeIoU(winList[i].bbox, winList[j].bbox) > nms_thresh) { - flag[j] = true; - } - } - } - - std::vector ret; - for (unsigned int i = 0; i < winList.size(); i++) { - if (!flag[i]) - ret.push_back(winList[i]); - } - return ret; - } - - /** @brief Object Detection postprocess - * @param output_image Image with bounding boxes - * @param model_name To identify model for specific post-processing - */ - bool DETECTIONSnpe::PostProcess( cv::Mat image,cv::Mat& output_image,string model_name) - { - int width = image.cols, height = image.rows; - cv::resize(image,output_image,cv::Size(width,height)); - - if(model_name.compare("ssd-mobilenet-v2") == 0) - { - vectorclasses = { - "background","aeroplane","bicycle","bird","boat", - "bottle","bus","car","cat","chair","cow", - "diningtable","dog","horse","motorbike","person", - "pottedplant","sheep","sofa","train","tvmonitor", - }; - - auto outputShape_score = m_snperuntime->GetOutputShape(m_outputTensors[0]); - int elements_score = outputShape_score[1]; - int channels_score = outputShape_score[2]; - - auto outputShape_box = m_snperuntime->GetOutputShape(m_outputTensors[1]); - float *score_confidence = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - float *box_coordinates = m_snperuntime->GetOutputTensor(m_outputTensors[1]); - - if( (score_confidence == nullptr) || (box_coordinates == nullptr) ) - { - return false; - } - for(size_t class_index = 1; class_index winList; - for(int row=0; row m_confThresh && (class_pred==class_index) ) - { - ObjectData rect; - rect.bbox.x = box_coordinates[row*4 ] * width; - rect.bbox.y = box_coordinates[row*4+ 1] * height; - rect.bbox.width = box_coordinates[row*4 + 2] * width; - rect.bbox.height = box_coordinates[row*4 + 3] * height; - rect.confidence = value; - rect.label = class_pred; - winList.push_back(rect); - } - } - } - winList = doNMS(winList, m_nmsThresh); - for(size_t i =0;i classes = { - "person", "bicycle", "car", "motorcycle", "airplane", "bus", - "train", "truck", "boat", "traffic", "fire", "stop", "parking", - "bench", "bird", "cat", "dog", "horse", "sheep", "cow", - "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", - "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", - "sports", "kite", "baseball", "baseball", "skateboard", "surfboard", - "tennis", "bottle", "wine", "cup", "fork", "knife","spoon", - "bowl", "banana", "apple", "sandwich", "orange", "broccoli", - "carrot", "hot", "pizza", "donut", "cake", "chair", "couch", - "potted", "bed", "dining", "toilet", "tv", "laptop", "mouse", - "remote", "keyboard", "cell", "microwave", "oven", "toaster", - "sink", "refrigerator", "book", "clock", "vase", "scissors", - "teddy", "hair", "toothbrush" - }; - - float *class_scores = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - auto outputShape_scores = m_snperuntime->GetOutputShape(m_outputTensors[0]); - float *bboxes = m_snperuntime->GetOutputTensor(m_outputTensors[1]); - auto outputShape_bboxes = m_snperuntime->GetOutputShape(m_outputTensors[1]); - - if( (class_scores == nullptr) || (bboxes == nullptr) ) - { - return false; - } - float ratio1 = width/320.0; - float ratio2 = height/320.0; - - int out_coordinates = outputShape_scores[1]; - int out_scores = outputShape_scores[2]; - - std::vector winList; - for(int i =0;i= m_confThresh) - { - float x1 = bboxes[i*4 ]*ratio1; - float y1 = bboxes[i*4 + 1]*ratio2; - float x2 = bboxes[i*4 + 2]*ratio1; - float y2 = bboxes[i*4 + 3]*ratio2; - ObjectData rect; - rect.bbox.x = x1 ; - rect.bbox.y = y1 ; - rect.bbox.width = x2 - x1; - rect.bbox.height = y2 - y1; - rect.confidence = class_scores[out_scores*i + j]; - rect.label = j; - winList.push_back(rect); - } - } - } - winList = doNMS(winList,m_nmsThresh); - for(size_t i =0;i classes = { - "person", "bicycle", "car", "motorcycle", "airplane", "bus", - "train", "truck", "boat", "traffic", "fire", "stop", "parking", - "bench", "bird", "cat", "dog", "horse", "sheep", "cow", - "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", - "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", - "sports", "kite", "baseball", "baseball", "skateboard", "surfboard", - "tennis", "bottle", "wine", "cup", "fork", "knife","spoon", - "bowl", "banana", "apple", "sandwich", "orange", "broccoli", - "carrot", "hot", "pizza", "donut", "cake", "chair", "couch", - "potted", "bed", "dining", "toilet", "tv", "laptop", "mouse", - "remote", "keyboard", "cell", "microwave", "oven", "toaster", - "sink", "refrigerator", "book", "clock", "vase", "scissors", - "teddy", "hair", "toothbrush" - }; - - float *scores = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - auto outputShape = m_snperuntime->GetOutputShape(m_outputTensors[0]); - - if(scores == nullptr) - { - return false; - } - int model_h = outputShape[1]; - int model_w = outputShape[2]; - float output[model_h][model_w]; - - for(int i=0;i grid; - static vector expanded_stride; - static int sum=0; - if(flag == false) - { - const int strides[3] = {8, 16, 32}; - int hsizes[3] = {80, 40, 20}; - int wsizes[3] = {80, 40, 20}; - - vector> grids, expanded_strides; - - for(int i=0;i<3;i++) - { - vector grid; - vector expanded_stride; - for(int j=0; j> boxes; - vector> scores_vec; - for(int i=0;i box; - for(int j=0;j<4;j++) - { - box.push_back(output[i][j]); - } - boxes.push_back(box); - } - - for(int i=0;i score; - float val = output[i][4]; - for(int j=5;j<85;j++) - { - score.push_back(output[i][j] * val); - } - scores_vec.push_back(score); - } - - std::vector winList; - for(int i=0;i=m_confThresh) - { - for(int j=0;j<4;j++) - { - int x1 = boxes[i][0]; - int y1 = boxes[i][1]; - int x2 = boxes[i][2]; - int y2 = boxes[i][3]; - - int x = (int)(x1 - x2/2); - int y = (int)(y1 - y2/2); - int w = (int)(x1 + x2/2); - int h = (int)(y1 + y2/2); - - ObjectData rect; - float ratio1 = width/640.0; - float ratio2 = height/640.0; - rect.bbox.x = x * ratio1; - rect.bbox.y = y * ratio2; - rect.bbox.width = w *ratio1; - rect.bbox.height = h *ratio2; - rect.confidence = maxScore; - rect.label = maxClassIndex.y; - - winList.push_back(rect); - } - } - } - - winList = doNMS(winList, m_nmsThresh); - for(size_t i =0;i(Impl); - } - else if(Model == SEGMENTATION) - { - delete static_cast(Impl); - } - Impl = nullptr; - } -} - -/** @brief For model inference - * @param item contains image buffer and results object to store results - * @return true if success -*/ -int ModelInference::Inference(cv::Mat input,cv::Mat& output_image,string model_name) -{ - int ret=0; - if (nullptr != Impl && IsInitialized()) - { - if(Model == DETECTION) - { - ret = static_cast(Impl)->Detect(input, output_image,model_name); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->Detect(input,output_image, model_name); - } - } - return ret; -} - -/** @brief To intialize SNPE - * @param contains SNPE configuration - * @return true if success -*/ -int ModelInference::Initialization(const ObjectDetectionSnpeConfig& config) -{ - int ret=0; - if (IsInitialized()) { - if(Model == DETECTION) - { - ret = static_cast(Impl)->DeInitialize() && static_cast(Impl)->Initialize(config); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->DeInitialize() && static_cast(Impl)->Initialize(config); - } - } - else - { - if(Model == DETECTION) - { - ret = static_cast(Impl)->Initialize(config); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->Initialize(config); - } - } - return ret; -} - -/** @brief To uninitialize SNPE - * @return true if success -*/ -bool ModelInference::UnInitialization() -{ - bool ret=false; - if (nullptr != Impl && IsInitialized()) - { - if(Model == DETECTION) - { - ret = static_cast(Impl)->DeInitialize(); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->DeInitialize(); - } - } - else - { - LOG_ERROR("ObjectDetection: deinit failed!\n"); - ret = false; - } - return ret; -} - -/** @brief To check if SNPE is initialized - * @return true if already inititalized -*/ -bool ModelInference::IsInitialized() -{ - bool ret=false; - if(Model == DETECTION) - { - ret = static_cast(Impl)->IsInitialized(); - } - else if(Model == SEGMENTATION) - { - ret = static_cast(Impl)->IsInitialized(); - } - return ret; -} - diff --git a/ai-solutions/ubuntu/gstreamer-cli/src/SNPERuntime.cpp b/ai-solutions/ubuntu/gstreamer-cli/src/SNPERuntime.cpp deleted file mode 100644 index febc4e0f..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/src/SNPERuntime.cpp +++ /dev/null @@ -1,426 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "SNPERuntime.h" - -namespace snperuntime{ - - /** @brief SNPE constructor - */ - SNPERuntime::SNPERuntime() - { - static zdl::DlSystem::Version_t version = zdl::SNPE::SNPEFactory::getLibraryVersion(); - LOG_INFO("Using SNPE: '%s' \n", version.asString().c_str()); - } - - /** @brief To calculate buffer size for memory allocation - * @return buffer size - */ - static size_t calcSizeFromDims(const zdl::DlSystem::Dimension* dims, size_t rank, size_t elementSize) - { - if (rank == 0) return 0; - size_t size = elementSize; - while (rank--) { - size *= *dims; - dims++; - } - return size; - } - - /** @brief To create userbuffer - */ - void CreateUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, - std::unordered_map>& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - const zdl::DlSystem::TensorShape& bufferShape, - const char* name) - { - size_t bufferElementSize = sizeof(float); - - /** - * To calculate stride based on buffer strides - * Note: Strides = Number of bytes to advance to the next element in each dimension. - * For example, if a float tensor of dimension 2x4x3 is tightly packed in a buffer of 96 bytes, then the strides would be (48,12,4) - */ - std::vector strides(bufferShape.rank()); - strides[strides.size() - 1] = bufferElementSize; - size_t stride = strides[strides.size() - 1]; - for (size_t i = bufferShape.rank() - 1; i > 0; i--) - { - stride *= bufferShape[i]; - strides[i - 1] = stride; - } - - size_t bufSize = calcSizeFromDims(bufferShape.getDimensions(), bufferShape.rank(), bufferElementSize); - - /** - * To set the buffer encoding type - */ - zdl::DlSystem::UserBufferEncodingFloat userBufferEncodingFloat; - /** - * To create user-backed storage to load input data onto it - */ - applicationBuffers.emplace(name, std::vector(bufSize / bufferElementSize)); - /** - * To create SNPE user buffer from the user-backed buffer - */ - zdl::DlSystem::IUserBufferFactory& ubFactory = zdl::SNPE::SNPEFactory::getUserBufferFactory(); - snpeUserBackedBuffers.push_back(ubFactory.createUserBuffer((void*)applicationBuffers.at(name).data(), - bufSize, - strides, - &userBufferEncodingFloat)); - /** - * To add the user-backed buffer to the inputMap, which is later on fed to the network for execution - */ - if (snpeUserBackedBuffers.back() == nullptr) - { - std::cerr << "Error while creating user buffer." << std::endl; - } - userBufferMap.add(name, snpeUserBackedBuffers.back().get()); - } - - /** @brief To set SNPERuntime - * @param runtime contains SNPERuntime value - */ - void SNPERuntime::setTargetRuntime(const runtime_t runtime) - { - switch (runtime) { - case DSP: - m_runtime = zdl::DlSystem::Runtime_t::DSP; - break; - default: - m_runtime = zdl::DlSystem::Runtime_t::CPU; - break; - } - - if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(m_runtime)) { - LOG_ERROR("Selected runtime not present. Falling back to CPU.\n"); - m_runtime = zdl::DlSystem::Runtime_t::CPU; - } - } - - /** @brief To set performance profile - * @param perfprofile contains performance value - */ - void SNPERuntime::setPerformanceProfile(const performance_t perfprofile) - { - switch (perfprofile) { - case BALANCED: - m_profile = zdl::DlSystem::PerformanceProfile_t::BALANCED; - break; - case HIGH_PERFORMANCE: - m_profile = zdl::DlSystem::PerformanceProfile_t::HIGH_PERFORMANCE; - break; - case POWER_SAVER: - m_profile = zdl::DlSystem::PerformanceProfile_t::POWER_SAVER; - break; - case SUSTAINED_HIGH_PERFORMANCE: - m_profile = zdl::DlSystem::PerformanceProfile_t::SUSTAINED_HIGH_PERFORMANCE; - break; - case BURST: - m_profile = zdl::DlSystem::PerformanceProfile_t::BURST; - break; - case LOW_POWER_SAVER: - m_profile = zdl::DlSystem::PerformanceProfile_t::LOW_POWER_SAVER; - break; - case HIGH_POWER_SAVER: - m_profile = zdl::DlSystem::PerformanceProfile_t::HIGH_POWER_SAVER; - break; - case LOW_BALANCED: - m_profile = zdl::DlSystem::PerformanceProfile_t::LOW_BALANCED; - break; - case SYSTEM_SETTINGS: - m_profile = zdl::DlSystem::PerformanceProfile_t::SYSTEM_SETTINGS; - break; - default: - m_profile = zdl::DlSystem::PerformanceProfile_t::BALANCED; - break; - } - LOG_DEBUG("Choose performance: %d, Set performance: %d \n", perfprofile, (int)m_profile); - } - - /** @brief To initialize SNPERuntime - * @param dlc_path contains dlc path from the config file - * @param runtime SNPERuntime value - * @return true if success; false otherwise - */ - bool SNPERuntime::Initialize(const std::string& dlc_path, const runtime_t runtime) - { - setTargetRuntime(runtime); - setPerformanceProfile(BURST); - /** - * To read dlc from dlc_path - */ - m_container = zdl::DlContainer::IDlContainer::open(dlc_path); - /** - * To create snpeBuilder from m_container based on runtime,performance profile - */ - std::vector runtimeStrVector; - switch (runtime) - { - case CPU: - runtimeStrVector.push_back("cpu_float32"); - runtimeStrVector.push_back("dsp_fixed8_tf"); - LOG_INFO("Runtime = CPU \n"); - break; - - case DSP: - runtimeStrVector.push_back("dsp_fixed8_tf"); - runtimeStrVector.push_back("cpu_float32"); - LOG_INFO("Runtime = DSP \n"); - break; - - } - //std::vector runtimeStrVector = {"dsp_fixed8_tf","gpu_float16","cpu_float32"}; - zdl::DlSystem::RuntimeList runtimeList; - - runtimeList.clear(); - for(auto& runtimeStr : runtimeStrVector) - { - zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::RuntimeList::stringToRuntime(runtimeStr.c_str()); - if(runtime != zdl::DlSystem::Runtime_t::UNSET) - { - bool ret = runtimeList.add(runtime); - if(ret == false) - { - std::cerr <getInputTensorNames(); - if (!inputNamesOpt) throw std::runtime_error("Error obtaining input tensor names\n"); - const zdl::DlSystem::StringList& inputNames = *inputNamesOpt; - - /** - * To create SNPE user buffers for each application storage buffer - */ - for (const char* name : inputNames) - { - /** - * To get attributes of buffer by name - */ - auto bufferAttributesOpt = m_snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) - { - LOG_ERROR("Error obtaining attributes for input tensor: %s\n", name); - return false; - } - - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - std::vector tensorShape; - for (size_t j = 0; j < bufferShape.rank(); j++) - { - tensorShape.push_back(bufferShape[j]); - } - m_inputShapes.emplace(name, tensorShape); - - CreateUserBuffer(m_inputUserBufferMap, m_applicationInputBuffers, m_inputUserBuffers, bufferShape, name); - } - - /** - * To get output tensor names of the network that need to be populated - */ - const auto& outputNamesOpt = m_snpe->getOutputTensorNames(); - if (!outputNamesOpt) throw std::runtime_error("Error obtaining output tensor names\n"); - const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; - - /** - * To create SNPE user buffers for each application storage buffer - */ - for (const char* name : outputNames) - { - // get attributes of buffer by name - auto bufferAttributesOpt = m_snpe->getInputOutputBufferAttributes(name); - if (!bufferAttributesOpt) - { - LOG_ERROR("Error obtaining attributes for input tensor: %s\n", name); - return false; - } - - const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); - std::vector tensorShape; - for (size_t j = 0; j < bufferShape.rank(); j++) { - tensorShape.push_back(bufferShape[j]); - } - m_outputShapes.emplace(name, tensorShape); - - CreateUserBuffer(m_outputUserBufferMap, m_applicationOutputBuffers, m_outputUserBuffers, bufferShape, name); - } - - m_isInit = true; - - return true; - } - - /** @brief To deinitialize SNPERuntime - */ - bool SNPERuntime::Deinitialize() - { - if (nullptr != m_snpe) - { - m_snpe.reset(nullptr); - } - - for (auto [k, v] : m_applicationInputBuffers) ClearVector(v); - for (auto [k, v] : m_applicationOutputBuffers) ClearVector(v); - return true; - } - - /** @brief To store output layers for each model - * @param outputlayers contains output layers defined in the config file - */ - bool SNPERuntime::SetOutputLayers(std::vector& outputLayers) - { - for (size_t i = 0; i < outputLayers.size(); i ++) - { - m_outputLayers.append(outputLayers[i].c_str()); - } - - return true; - } - - /** @brief To get input shape for each model - * @param name contains name of input layer - * @return shape of input layer if success; empty otherwise - */ - std::vector SNPERuntime::GetInputShape(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of input - */ - if (IsInit()) { - if (m_inputShapes.find(name) != m_inputShapes.end()) - { - return m_inputShapes.at(name); - } - LOG_ERROR("Can't find any input layer named %s\n", name.c_str()); - return {}; - } else { - LOG_ERROR("GetInputShape Failed: SNPE Init Failed !!!\n"); - return {}; - } - } - - /** @brief To get output shape for each model - * @param name contains name of output layers - * @return shape of output layer if success; empty otherwise - */ - std::vector SNPERuntime::GetOutputShape(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of output - */ - if (IsInit()) - { - if (m_outputShapes.find(name) != m_outputShapes.end()) - { - return m_outputShapes.at(name); - } - LOG_ERROR("Can't find any ouput layer named %s\n", name.c_str()); - return {}; - } - else - { - LOG_ERROR("GetOutputShape Failed: SNPE Init Failed !!!\n"); - return {}; - } - } - - - /** @brief To get input tensor for each model - * @param name contains name of input layer - * @return shape of input tensor if success; NULL otherwise - */ - float* SNPERuntime::GetInputTensor(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of input - */ - if (IsInit()) - { - if (m_applicationInputBuffers.find(name) != m_applicationInputBuffers.end()) - { - return m_applicationInputBuffers.at(name).data(); - } - LOG_ERROR("Can't find any input tensor named '%s' \n", name.c_str()); - return nullptr; - } - else - { - LOG_ERROR("GetInputTensor Failed: SNPE Init Failed !!!\n"); - return nullptr; - } - } - - /** @brief To get output tensor for each model - * @param name contains name of output layer - * @return shape of output tensor if success; NULL otherwise - */ - - float* SNPERuntime::GetOutputTensor(const std::string& name) - { - /** - * To check if runtime is initialized and layer name is a part of output - */ - if (IsInit()) - { - if (m_applicationOutputBuffers.find(name) != m_applicationOutputBuffers.end()) - { - return m_applicationOutputBuffers.at(name).data(); - } - LOG_ERROR("Can't find any output tensor named '%s' \n", name.c_str()); - return nullptr; - } - else - { - LOG_ERROR("GetOutputTensor Failed: SNPE Init Failed !!!"); - return nullptr; - } - } - - /** @brief To execute inference on target - * @return QS_SUCCESS if success; QS_FAIL otherwise - */ - bool SNPERuntime::execute() - { - if (!m_snpe->execute(m_inputUserBufferMap, m_outputUserBufferMap)) - { - LOG_ERROR("SNPE Task execute failed: %s\n", zdl::DlSystem::getLastErrorString()); - return false; - } - - return true; - } - -} // namespace snperuntime \ No newline at end of file diff --git a/ai-solutions/ubuntu/gstreamer-cli/src/SegmentationSnpe.cpp b/ai-solutions/ubuntu/gstreamer-cli/src/SegmentationSnpe.cpp deleted file mode 100644 index 72073883..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/src/SegmentationSnpe.cpp +++ /dev/null @@ -1,482 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include -#include -#include -#include "Configuration.h" -#include "SegmentationSnpe.h" - -namespace segmentationsnpe -{ - - /** @brief Constructor - */ - SEGMENTATIONSnpe::SEGMENTATIONSnpe() : m_isInit(false),m_snperuntime(nullptr) - { - - } - - /** @brief Destructor - */ - SEGMENTATIONSnpe::~SEGMENTATIONSnpe() - { - DeInitialize(); - } - - /** @brief To read model config and set output layers - * @param config model config parameters - * @return true if success;false otherwise - */ - bool SEGMENTATIONSnpe::Initialize(const ObjectDetectionSnpeConfig& config) - { - m_snperuntime = std::move(std::unique_ptr(new snperuntime::SNPERuntime())); - - m_inputLayers = config.inputLayers; - m_outputLayers = config.outputLayers; - m_outputTensors = config.outputTensors; - m_nmsThresh = config.nmsThresh; - m_confThresh = config.confThresh; - - /** - * To set output layer from model config - */ - m_snperuntime->SetOutputLayers(m_outputLayers); - /** - * To initialize snperuntime - */ - if (!m_snperuntime->Initialize(config.model_path, config.runtime)) { - LOG_ERROR("Failed to Initialize snpe instance.\n"); - return false; - } - - m_isInit = true; - return true; - } - - /** @brief To deallocate buffers - */ - bool SEGMENTATIONSnpe::DeInitialize() - { - if (m_isInit) { - m_snperuntime->Deinitialize(); - m_snperuntime.reset(nullptr); - } - - m_isInit = false; - return true; - } - - bool SEGMENTATIONSnpe::SetScoreThresh(const float& conf_thresh, const float& nms_thresh = 0.5) - { - this->m_nmsThresh = nms_thresh; - this->m_confThresh = conf_thresh; - return true; - } - - bool SEGMENTATIONSnpe::IsInitialized() const - { - return m_isInit; - } - - /** @brief To preprocess input image - * @param input_image Input image for inference - * @return true if succuess; false otherwise - */ - bool SEGMENTATIONSnpe::PreProcessInput(const cv::Mat& input_image,string model_name) - { - if (input_image.empty()) { - LOG_ERROR("Invalid image!\n"); - return false; - } - - auto inputShape = m_snperuntime->GetInputShape(m_inputLayers[0]); - int model_h = inputShape[1]; - int model_w = inputShape[2]; - - if (m_snperuntime->GetInputTensor(m_inputLayers[0]) == nullptr) - { - LOG_ERROR("Empty input tensor\n"); - return false; - } - - cv::Mat image = cv::Mat(model_h,model_w, CV_32FC3, Scalar(0.)); - cv::resize(input_image,image,cv::Size(model_h,model_w)); - cv::Mat input(model_h, model_w, CV_32FC3, m_snperuntime->GetInputTensor(m_inputLayers[0])); - - if(model_name.compare("DeepLabv3Plus-resnet++") == 0 || model_name.compare("DeepLabv3-resnet101") == 0 || model_name.compare("DeepLabv3-resnet50") == 0 || model_name.compare("FCN_resnet101") == 0 || model_name.compare("FCN_resnet50") == 0) - { - cv::resize(image,image,cv::Size(model_w,model_h)); - image.convertTo(input,CV_32FC3,1.0); - const float mean_vals[3] = {0.485, 0.456, 0.406}; - const float norm_vals[3] = {0.229, 0.224, 0.225}; - for (int i = 0; i < input.rows; i++) - { - float* pdata = (float*)(input.data + i * input.step); - for (int j = 0; j < input.cols; j++) - { - float x = pdata[2], y=pdata[1], z = pdata[0]; - pdata[0] = (x / 255.0 - mean_vals[0]) / norm_vals[0]; - pdata[1] = (y / 255.0 - mean_vals[1]) / norm_vals[1]; - pdata[2] = (z / 255.0 - mean_vals[2]) / norm_vals[2]; - pdata += 3; - } - } - } - return true; - } - - /** @brief To preprocess,execute and postprocess - * @param input_image Input image for inference - * @param output_image Inference output image - * @param model_name To identify model for specific post-processing - * @return true if success; false otherwise - */ - bool SEGMENTATIONSnpe::Detect(cv::Mat image,cv::Mat& output_image,string model_name) - { - /** - * Preprocessing image - */ - if(PreProcessInput(image, model_name) != true) - { - LOG_ERROR("PreProcess failed\n"); - return false; - } - /** - * Inferencing model on target - */ - if (!m_snperuntime->execute()) { - LOG_ERROR("SNPERuntime execute failed."); - return false; - } - /** - * Postprocessing - */ - if(PostProcess(image,output_image,model_name) != true) - { - LOG_ERROR("PostProcess failed\n"); - return false; - } - return true; - } - - /** @brief postprocess to overlay segmentation - * @param output_image Overlayed image - * @param model_name To identify model for specific post-processing - */ - bool SEGMENTATIONSnpe::PostProcess( cv::Mat image,cv::Mat& output_image,string model_name) - { - auto outputShape = m_snperuntime->GetOutputShape(m_outputTensors[0]); - float *predOutput = m_snperuntime->GetOutputTensor(m_outputTensors[0]); - - if( predOutput == nullptr) - { - return false; - } - - int height = outputShape[1]; - int width = outputShape[2]; - int channels = outputShape[3]; - - cv::Mat temp = cv::Mat(height,width, CV_8UC3); - vector app_vect; - - float ***app = new float**[height]; - for (int i = 0; i < height; ++i) - { - app[i] = new float*[width]; - for (int j = 0; j < width; ++j) - app[i][j] = new float[channels]; - } - - for(int i = 0;i app_t_vec; - - for(int i = 0;i < channels;i++) - { - for (int j = 0; j < width; j++) - { - for (int k = 0; k < height; k++) - { - float x = app[j][k][i]; - app_t_vec.push_back(x); - } - } - } - - float ***app_t=NULL; - - app_t = new float**[channels]; - for (int i = 0; i < channels; ++i) - { - app_t[i] = new float*[width]; - for (int j = 0; j < width; ++j) - app_t[i][j] = new float[height]; - } - - for(int i =0;i> colors_res = { - { 0, 0, 0},{128, 0, 0},{ 0, 128, 0},{128, 128, 0},{ 0, 0, 128}, - {128, 0, 128},{ 0, 128, 128},{128, 128, 128},{ 64, 0, 0},{192, 0, 0}, - { 64, 128, 0},{192, 128, 0},{ 64, 0, 128},{192, 0, 128},{ 64, 128, 128}, - {192, 128, 128},{ 0, 64, 0},{128, 64, 0},{ 0, 192, 0},{128, 192, 0}, - { 0, 64, 128},{128, 64, 128},{ 0, 192, 128},{128, 192, 128},{ 64, 64, 0}, - {192, 64, 0},{ 64, 192, 0},{192, 192, 0},{ 64, 64, 128},{192, 64, 128}, - { 64, 192, 128},{192, 192, 128},{ 0, 0, 64},{128, 0, 64},{ 0, 128, 64}, - {128, 128, 64},{ 0, 0, 192},{128, 0, 192},{ 0, 128, 192},{128, 128, 192}, - { 64, 0, 64},{192, 0, 64},{ 64, 128, 64},{192, 128, 64},{ 64, 0, 192}, - {192, 0, 192},{ 64, 128, 192},{192, 128, 192},{ 0, 64, 64},{128, 64, 64}, - { 0, 192, 64},{128, 192, 64},{ 0, 64, 192},{128, 64, 192},{ 0, 192, 192}, - {128, 192, 192},{ 64, 64, 64},{192, 64, 64},{ 64, 192, 64},{192, 192, 64}, - { 64, 64, 192},{192, 64, 192},{ 64, 192, 192},{192, 192, 192},{ 32, 0, 0}, - {160, 0, 0},{ 32, 128, 0},{160, 128, 0},{ 32, 0, 128},{160, 0, 128}, - { 32, 128, 128},{160, 128, 128},{ 96, 0, 0},{224, 0, 0},{ 96, 128, 0}, - {224, 128, 0},{ 96, 0, 128},{224, 0, 128},{ 96, 128, 128},{224, 128, 128}, - { 32, 64, 0},{160, 64, 0},{ 32, 192, 0},{160, 192, 0},{ 32, 64, 128}, - {160, 64, 128},{ 32, 192, 128},{160, 192, 128},{ 96, 64, 0},{224, 64, 0}, - { 96, 192, 0},{224, 192, 0},{ 96, 64, 128},{224, 64, 128},{ 96, 192, 128}, - {224, 192, 128},{ 32, 0, 64},{160, 0, 64},{ 32, 128, 64},{160, 128, 64}, - { 32, 0, 192},{160, 0, 192},{ 32, 128, 192},{160, 128, 192},{ 96, 0, 64}, - {224, 0, 64},{ 96, 128, 64},{224, 128, 64},{ 96, 0, 192},{224, 0, 192}, - { 96, 128, 192},{224, 128, 192},{ 32, 64, 64},{160, 64, 64},{ 32, 192, 64}, - {160, 192, 64},{ 32, 64, 192},{160, 64, 192},{ 32, 192, 192},{160, 192, 192}, - { 96, 64, 64},{224, 64, 64},{ 96, 192, 64},{224, 192, 64},{ 96, 64, 192}, - {224, 64, 192},{ 96, 192, 192},{224, 192, 192},{ 0, 32, 0},{128, 32, 0}, - { 0, 160, 0},{128, 160, 0},{ 0, 32, 128},{128, 32, 128},{ 0, 160, 128}, - {128, 160, 128},{ 64, 32, 0},{192, 32, 0},{ 64, 160, 0},{192, 160, 0}, - { 64, 32, 128},{192, 32, 128},{ 64, 160, 128},{192, 160, 128},{ 0, 96, 0}, - {128, 96, 0},{ 0, 224, 0},{128, 224, 0},{ 0, 96, 128},{128, 96, 128}, - { 0, 224, 128},{128, 224, 128},{ 64, 96, 0},{192, 96, 0},{ 64, 224, 0}, - {192, 224, 0},{ 64, 96, 128},{192, 96, 128},{ 64, 224, 128},{192, 224, 128}, - { 0, 32, 64},{128, 32, 64},{ 0, 160, 64},{128, 160, 64},{ 0, 32, 192}, - {128, 32, 192},{ 0, 160, 192},{128, 160, 192},{ 64, 32, 64},{192, 32, 64}, - { 64, 160, 64},{192, 160, 64},{ 64, 32, 192},{192, 32, 192},{ 64, 160, 192}, - {192, 160, 192},{ 0, 96, 64},{128, 96, 64},{ 0, 224, 64},{128, 224, 64}, - { 0, 96, 192},{128, 96, 192},{ 0, 224, 192},{128, 224, 192},{ 64, 96, 64}, - {192, 96, 64},{ 64, 224, 64},{192, 224, 64},{ 64, 96, 192},{192, 96, 192}, - { 64, 224, 192},{192, 224, 192},{ 32, 32, 0},{160, 32, 0},{ 32, 160, 0}, - {160, 160, 0},{ 32, 32, 128},{160, 32, 128},{ 32, 160, 128},{160, 160, 128}, - { 96, 32, 0},{224, 32, 0},{ 96, 160, 0},{224, 160, 0},{ 96, 32, 128}, - {224, 32, 128},{ 96, 160, 128},{224, 160, 128},{ 32, 96, 0},{160, 96, 0}, - { 32, 224, 0},{160, 224, 0},{ 32, 96, 128},{160, 96, 128},{ 32, 224, 128}, - {160, 224, 128},{ 96, 96, 0},{224, 96, 0},{ 96, 224, 0},{224, 224, 0}, - { 96, 96, 128},{224, 96, 128},{ 96, 224, 128},{224, 224, 128},{ 32, 32, 64}, - {160, 32, 64},{ 32, 160, 64},{160, 160, 64},{ 32, 32, 192},{160, 32, 192}, - { 32, 160, 192},{160, 160, 192},{ 96, 32, 64},{224, 32, 64},{ 96, 160, 64}, - {224, 160, 64},{ 96, 32, 192},{224, 32, 192},{ 96, 160, 192},{224, 160, 192}, - { 32, 96, 64},{160, 96, 64},{ 32, 224, 64},{160, 224, 64},{ 32, 96, 192}, - {160, 96, 192},{ 32, 224, 192},{160, 224, 192},{ 96, 96, 64},{224, 96, 64}, - { 96, 224, 64},{224, 224, 64},{ 96, 96, 192},{224, 96, 192},{ 96, 224, 192}, - {224, 224, 192} - }; - - int **app_t_max=NULL; - - app_t_max = new int*[width]; - for (int j = 0; j < width; ++j) - { - app_t_max[j] = new int[height]; - } - - vector max_values; - for(int i=0;i max) - { - max = temp; - app_t_max[i][j] = k; - } - } - max_values.push_back(max); - } - } - - vector max_vec; - - for(int i = 0; i< height;i++) - { - for(int j=0;j> color; - color = colors_res; - - for (int i = 0; i < temp.rows; i++) - { - char* pdata = (char*)(temp.data + i * temp.step); - for (int j = 0; j < temp.cols; j++) - { - int id = app_t_max[i][j]; - pdata[0] = color[id][2]; - pdata[1] = color[id][1]; - pdata[2] = color[id][0]; - pdata += 3; - } - } - - for (int j = 0; j < width; ++j) - { - delete [] app_t_max[j]; - } - delete [] app_t_max; - app_t_max = NULL; - - } - else if(model_name.compare("DeepLabv3-resnet101") == 0 || model_name.compare("DeepLabv3-resnet50") == 0 || model_name.compare("FCN_resnet101") == 0 || model_name.compare("FCN_resnet50") == 0) - { - - vector> label_map = { - {0, 0, 0}, // background - {128, 0, 0}, // aeroplane - {0, 128, 0}, // bicycle - {128, 128, 0}, // bird - {0, 0, 128}, // boat - {128, 0, 128}, // bottle - {0, 128, 128}, // bus - {128, 128, 128}, // car - {64, 0, 0}, // cat - {192, 0, 0}, // chair - {64, 128, 0}, // cow - {192, 128, 0}, // dining table - {64, 0, 128}, // dog - {192, 0, 128}, // horse - {64, 128, 128}, // motorbike - {192, 128, 128}, // person - {0, 64, 0}, // potted plant - {128, 64, 0}, // sheep - {0, 192, 0}, // sofa - {128, 192, 0}, // train - {0, 64, 128} // tv/monitor - }; - - int **app_t_max=NULL; - - app_t_max = new int*[width]; - for (int j = 0; j < width; j++) - { - app_t_max[j] = new int[height]; - } - - vector max_values; - for(int i=0; i max) - { - max = temp; - app_t_max[i][j] = k; - } - } - max_values.push_back(max); - - } - } - - vector max_vec; - - for(int i = 0; i< height;i++) - { - for(int j=0;j(user_data); - GstSample *sample = NULL; - GstBuffer *buffer = NULL; - GstMapInfo map; - const GstStructure *info = NULL; - GstCaps *caps = NULL; - GstFlowReturn ret = GST_FLOW_OK; - int sample_width = 0; - int sample_height = 0; - - g_signal_emit_by_name(appsink, "pull-sample", &sample, &ret); - if (ret != GST_FLOW_OK) - { - LOG_ERROR("can't pull GstSample."); - return ret; - } - - if (sample) - { - buffer = gst_sample_get_buffer(sample); - if (buffer == NULL) - { - LOG_ERROR("get buffer is null"); - goto exit; - } - - gst_buffer_map(buffer, &map, GST_MAP_READ); - - caps = gst_sample_get_caps(sample); - if (caps == NULL) - { - LOG_ERROR("get caps is null"); - goto exit; - } - - info = gst_caps_get_structure(caps, 0); - if (info == NULL) - { - LOG_ERROR("get info is null"); - goto exit; - } - - gst_structure_get_int(info, "width", &sample_width); - gst_structure_get_int(info, "height", &sample_height); - - - if (map.data == NULL) - { - LOG_ERROR("appsink buffer data empty"); - return GST_FLOW_OK; - } - - frameProcess->frameId += 1; - LOG_DEBUG("Frame ID=%d \n", frameProcess->frameId); - if (frameProcess->frameId % frameProcess->interval == 0) - { - shared_ptr detail(new DetectionItem()); - detail->Size = (uint32_t)map.size; - detail->Width = sample_width; - detail->Height = sample_height; - detail->FrameId = frameProcess->frameId; - detail->StreamName = frameProcess->streamName; - detail->StreamId = frameProcess->StreamId; - - uint8_t *imgBuf = new uint8_t[map.size]; - memcpy(static_cast(imgBuf), map.data, map.size); - detail->ImageBuffer.reset((uint8_t *)imgBuf, [](uint8_t *p) - { delete[] (p); }); - - int ret = frameProcess->blockQueue->Enqueue(detail,true); - if (ret != 0) - { - LOG_ERROR("Enqueue Fail = %d \n", ret); - } - } - } - -exit: - if (buffer) - { - gst_buffer_unmap(buffer, &map); - } - if (sample) - { - gst_sample_unref(sample); - } - return GST_FLOW_OK; -} - -void StreamDecode::OnPadAdd(GstElement *element, GstPad *pad, gpointer data) -{ - // Link two Element with named pad - GstPad *sink_pad = gst_element_get_static_pad(GST_ELEMENT(data), "sink"); - if (gst_pad_is_linked(sink_pad)) - { - LOG_INFO("rtspsrc and depay are already linked. Ignoring\n"); - return; - } - gst_element_link_pads(element, gst_pad_get_name(pad), GST_ELEMENT(data), "sink"); -} - -StreamDecode::StreamDecode(std::string streamtype, std::string url) -{ - this->StreamType = streamtype; - frameProcess_ = new FrameProcessData(); - frameProcess_->frameId = 1; - this->frameProcess_->streamName = url; -} - -StreamDecode::~StreamDecode() -{ - UnInitialization(); - if (frameProcess_ != nullptr) - { - delete frameProcess_; - frameProcess_ = nullptr; - } -} - -void StreamDecode::UnInitialization() -{ - terminate_ = true; -} -void StreamDecode::DecodeAndInference() -{ - GstStateChangeReturn ret; - shared_ptr msg = nullptr; - /* Start playing */ - ret = gst_element_set_state(data_.pipeline.get(), GST_STATE_PLAYING); - if (ret == GST_STATE_CHANGE_FAILURE) - { - LOG_ERROR("Unable to set the pipeline to the playing state.\n"); - return; - } - - /* Listen to the bus */ - bus_.reset(gst_element_get_bus(data_.pipeline.get()), [](GstBus *obj) - { gst_object_unref(obj); }); - - GstMessageType msgType; - do - { - msgType = static_cast(GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS); - msg.reset(gst_bus_timed_pop_filtered(bus_.get(), GST_CLOCK_TIME_NONE, msgType), [](GstMessage *m) - { gst_message_unref(m); }); - /* Parse message */ - if (msg != NULL) - { - GError *err; - gchar *debug_info; - - switch (GST_MESSAGE_TYPE(msg.get())) - { - case GST_MESSAGE_ERROR: - gst_message_parse_error(msg.get(), &err, &debug_info); - LOG_ERROR("Error received from element = %s \t %s\n", GST_OBJECT_NAME(msg.get()->src), err->message); - g_clear_error(&err); - g_free(debug_info); - terminate_ = true; - break; - case GST_MESSAGE_EOS: - LOG_INFO("Stream = %s \t End-Of-Stream reached. total Frame = %d \n ", frameProcess_->streamName.c_str(), frameProcess_->frameId); - terminate_ = true; - break; - case GST_MESSAGE_STATE_CHANGED: - if (GST_MESSAGE_SRC(msg.get()) == GST_OBJECT(data_.pipeline.get())) - { - GstState old_state, new_state, pending_state; - gst_message_parse_state_changed(msg.get(), &old_state, &new_state, &pending_state); - } - break; - default: - LOG_ERROR("Unexpected message received.\n"); - break; - } - } - } while (!terminate_); - - gst_element_set_state(data_.pipeline.get(), GST_STATE_NULL); -} - -void StreamDecode::UnRefElement(GstElement *elem) -{ - // LOG_DEBUG("Pipeline parent manage this object instead of unreffing the object directly: %s\n", elem->object.name); -} - -int StreamDecode::gst_camera_pipeline_init() -{ - GstCaps *filtercaps; - /* Initialize GStreamer */ - gst_init(nullptr, nullptr); - /* Create the empty pipeline */ - data_.pipeline.reset(gst_pipeline_new("decode-pipeline"), [](GstElement *elem) { - gst_element_set_state(elem, GST_STATE_NULL); - gst_object_unref(elem); }); - - data_.source.reset(gst_element_factory_make("qtiqmmfsrc", "source"), UnRefElement); - data_.main_capsfilter.reset(gst_element_factory_make ("capsfilter", "main_capsfilter"), UnRefElement); - data_.transform.reset(gst_element_factory_make("qtivtransform", "transform"), UnRefElement); - data_.sink.reset(gst_element_factory_make("appsink", "sink"), UnRefElement); - - if (!data_.pipeline.get() || !data_.source.get() || !data_.main_capsfilter.get() || !data_.transform.get() || !data_.sink.get()) - { - LOG_ERROR("Not all elements could be created."); - return QS_ERROR; - } - - filtercaps = gst_caps_new_simple ("video/x-raw", - "format", G_TYPE_STRING, "NV12", - "width", G_TYPE_INT, 1280, - "height", G_TYPE_INT, 720, - "framerate", GST_TYPE_FRACTION, 30, 1, - NULL); - - gst_caps_set_features (filtercaps, 0, - gst_caps_features_new ("memory:GBM", NULL)); - g_object_set (data_.main_capsfilter.get(), "caps", filtercaps, NULL); - gst_caps_unref (filtercaps); - - gst_bin_add_many(GST_BIN(data_.pipeline.get()), data_.source.get(), data_.main_capsfilter.get(), - data_.transform.get(), data_.sink.get(), NULL); - if (!gst_element_link_many(data_.source.get(), data_.main_capsfilter.get(), data_.transform.get(), data_.sink.get(), NULL)) - { - LOG_ERROR("Elements could not be linked.\n"); - gst_bin_remove_many (GST_BIN(data_.pipeline.get()), data_.source.get(), data_.main_capsfilter.get(), - data_.transform.get(), data_.sink.get(), NULL); - return QS_ERROR; - } - shared_ptr caps(gst_caps_from_string("video/x-raw,format=BGR"), [](GstCaps *caps) - { gst_caps_unref(caps); }); - g_object_set(data_.sink.get(), "caps", caps.get(), NULL); - - /* Configure appsink */ - g_object_set(data_.sink.get(), "emit-signals", TRUE, NULL); - g_signal_connect(data_.sink.get(), "new-sample", G_CALLBACK(StreamDecode::OnAppsinkNewSample), frameProcess_); - - /* Connect to the pad-added signal */ - g_signal_connect(data_.source.get(), "pad-added", G_CALLBACK(StreamDecode::OnPadAdd), data_.transform.get()); - - return QS_SUCCESS; - -} - -int StreamDecode::Initialization(shared_ptr &queue) -{ - frameProcess_->blockQueue = queue; - - if(0 == StreamType.compare("camera")) { - return gst_camera_pipeline_init(); - } - else { - LOG_ERROR("Stream Type does not configured"); - return QS_ERROR; - } -} - -void StreamDecode::SetSkipFrame(int interval) -{ - if (interval < 1) - { - return; - } - frameProcess_->interval = interval; -} - -void StreamDecode::SetStreamName(string name) -{ - LOG_INFO("Set stream name =%s \n", name.c_str()); - frameProcess_->streamName = name; -} - -void StreamDecode::SetStreamId(int uuid) -{ - frameProcess_->StreamId = uuid; -} - -void StreamDecode::Stop() -{ - terminate_ = true; - gboolean res = gst_element_send_event(data_.pipeline.get(), gst_event_new_eos()); - if (!res) - { - LOG_ERROR("Error occurred! EOS signal cannot be sent!\n\r"); - } -} - -static void CaptureThreadFunc(shared_ptr decodePtr) -{ - decodePtr->DecodeAndInference(); -} - -void CaptureController::CreateCapture(shared_ptr &input_config, shared_ptr &queue) -{ - shared_ptr decodePtr = make_shared(input_config->StreamType, input_config->Url); - - decodePtr->SetStreamId(input_config->StreamNumber); - - decodePtr->Initialization(queue); - decodePtr->SetStreamName("stream_" + to_string(input_config->StreamNumber)); - - decodePtr->SetSkipFrame(input_config->SkipFrame); - - std::thread decodeThread = std::thread(CaptureThreadFunc, decodePtr); - threads.emplace_back(move(decodeThread)); - - decoder.insert(pair>(input_config->StreamNumber, decodePtr)); -} - -void CaptureController::InterruptClose() -{ - map>::reverse_iterator iter; - - for (iter = decoder.rbegin(); iter != decoder.rend(); iter++) - { - iter->second->Stop(); - } -} - -void CaptureController::StopAll() -{ - map>::reverse_iterator iter; - - for (size_t i = 0; i < threads.size(); i++) - { - threads[i].join(); - } - - for (iter = decoder.rbegin(); iter != decoder.rend(); iter++) - { - iter->second->Stop(); - } -} diff --git a/ai-solutions/ubuntu/gstreamer-cli/src/StreamEncode.cpp b/ai-solutions/ubuntu/gstreamer-cli/src/StreamEncode.cpp deleted file mode 100644 index aa375bb8..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/src/StreamEncode.cpp +++ /dev/null @@ -1,248 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "StreamEncode.h" - -int StreamEncode::gst_wayland_pipeline_init(string output_type) -{ - gst_init(nullptr, nullptr); - data.pipeline.reset(gst_pipeline_new("pipeline"), [](GstElement *elem) - { - gst_element_set_state(elem, GST_STATE_NULL); - gst_object_unref(elem); - }); - - auto DefaultUnRefGstElement = [](GstElement *elem) { - // Per GStreamer design pipeline parent manage - // GstElement instead of unreffing the object directly - }; - - data.appsrc.reset(gst_element_factory_make("appsrc", "appsrc"), DefaultUnRefGstElement); - data.vidconv.reset(gst_element_factory_make("videoconvert", "vidconv"), DefaultUnRefGstElement); - data.videoscale.reset(gst_element_factory_make("videoscale", "videoscale"), DefaultUnRefGstElement); - data.waylandsink.reset(gst_element_factory_make("waylandsink", "waylandsink"), DefaultUnRefGstElement); - - if (!data.pipeline.get() || !data.appsrc.get() || !data.vidconv.get() || !data.videoscale.get() || !data.waylandsink.get()) - { - LOG_ERROR("[not all element created,(%s)(%s)(%s)(%s)(%s)]\n", - !data.pipeline.get() ? "ng" : "ok", - !data.appsrc.get() ? "ng" : "ok", - !data.vidconv.get() ? "ng" : "ok", - !data.videoscale.get() ? "ng" : "ok", - !data.waylandsink.get() ? "ng" : "ok"); - return QS_ERROR; - } - - gst_bin_add_many(GST_BIN(data.pipeline.get()), - data.appsrc.get(), - data.vidconv.get(), - data.videoscale.get(), - data.waylandsink.get(), - NULL); - - GstCaps *caps = gst_caps_from_string("video/x-raw, framerate=30/1,width=1280, height=720,format=BGR"); - g_object_set(data.appsrc.get(), "caps", caps, NULL); - gst_caps_unref(caps); - - g_object_set(G_OBJECT(data.waylandsink.get()), "async", true, NULL); - g_object_set(G_OBJECT(data.waylandsink.get()), "sync", false, NULL); - g_object_set (G_OBJECT (data.waylandsink.get()), "fullscreen", true, NULL); - - - gst_element_sync_state_with_parent(data.waylandsink.get()); - - - if (!gst_element_link(data.appsrc.get(), data.vidconv.get())) - { - LOG_ERROR("Link Fail %s %s \n", GST_ELEMENT_NAME(data.appsrc.get()), GST_ELEMENT_NAME(data.vidconv.get())); - return QS_ERROR; - } - - if (!gst_element_link(data.vidconv.get(), data.videoscale.get())) - { - LOG_ERROR("Link Fail %s %s \n", GST_ELEMENT_NAME(data.vidconv.get()), GST_ELEMENT_NAME(data.videoscale.get())); - return QS_ERROR; - } - if (!gst_element_link(data.videoscale.get(), data.waylandsink.get())) - { - LOG_ERROR("Link Fail %s %s \n", GST_ELEMENT_NAME(data.videoconvert.get()), GST_ELEMENT_NAME(data.waylandsink.get())); - return QS_ERROR; - } - - return QS_SUCCESS; -} - - -int StreamEncode::Initialization(string output_type) -{ - if(0 == output_type.compare("wayland")) - { - return gst_wayland_pipeline_init(output_type); - } - else - { - LOG_ERROR("Stream Type does not configured"); - return QS_ERROR; - } - - return QS_SUCCESS; - -} - -int StreamEncode::Loop() -{ - /* Start playing */ - GstStateChangeReturn ret = gst_element_set_state(data.pipeline.get(), GST_STATE_PLAYING); - if (ret == GST_STATE_CHANGE_FAILURE) - { - LOG_ERROR("Unable to set the pipeline to the playing state.\n"); - return QS_ERROR; - } - - /* Listen to the bus */ - bus.reset(gst_element_get_bus(data.pipeline.get()), [](GstBus *obj) - { gst_object_unref(obj); }); - - GstMessageType msgType; - gchar *debug_info; - GError *err; - GstMessage *msg = nullptr; - do - { - msgType = static_cast(GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS); - msg = gst_bus_timed_pop_filtered(bus.get(), GST_CLOCK_TIME_NONE, msgType); - - /* Parse message */ - if (msg != NULL) - { - switch (GST_MESSAGE_TYPE(msg)) - { - case GST_MESSAGE_ERROR: - gst_message_parse_error(msg, &err, &debug_info); - LOG_ERROR("Error received from element %s: %s\n", GST_OBJECT_NAME(msg->src), err->message); - g_clear_error(&err); - g_free(debug_info); - terminate = TRUE; - break; - case GST_MESSAGE_EOS: - LOG_INFO("End-Of-Stream reached. Encoder\n"); - terminate = TRUE; - break; - case GST_MESSAGE_STATE_CHANGED: - /* We are only interested in state-changed messages from the pipeline */ - if (GST_MESSAGE_SRC(msg) == GST_OBJECT(data.pipeline.get())) - { - GstState old_state, new_state, pending_state; - gst_message_parse_state_changed(msg, &old_state, &new_state, &pending_state); - } - break; - default: - /* We should not reach here */ - LOG_ERROR("Unexpected message received.\n"); - break; - } - gst_message_unref(msg); - } - } while (!terminate); - /* Free resources */ - gst_element_set_state(data.pipeline.get(), GST_STATE_NULL); - return QS_SUCCESS; -} - -void StreamEncode::UnInitialization() -{ - LOG_DEBUG("UnInitialization \n"); -} - -void StreamEncode::PushData(uint8_t *pushData, int len) -{ - GstBuffer *buffer = gst_buffer_new_and_alloc(len); - gst_buffer_fill(buffer, 0, pushData, len); - static GstClockTime timestamp = 0; - GST_BUFFER_PTS(buffer) = timestamp; - GST_BUFFER_DURATION(buffer) = gst_util_uint64_scale_int(1, GST_SECOND, 30); - - timestamp += GST_BUFFER_DURATION(buffer); - GstFlowReturn ret = GST_FLOW_OK; - - g_signal_emit_by_name(GST_APP_SRC(data.appsrc.get()), "push-buffer", buffer, &ret); - gst_buffer_unref(buffer); - - if ((ret != GST_FLOW_OK)) - { - LOG_ERROR("Error with gst_app_src_push_buffer for view_pipeline, return = %d \n", ret); - } -} - -void StreamEncode::Stop() -{ - terminate = TRUE; - gst_app_src_end_of_stream(GST_APP_SRC(data.appsrc.get())); // send eos -} - -static void EncodeThreadFunc(shared_ptr encodePtr) -{ - int ret; - ret = encodePtr->Loop(); - if(ret == QS_ERROR) - LOG_ERROR("Failed to run the gstreamer pipeline\n"); - -} - -void EncodeController::CreateEncoder(std::shared_ptr sol_conf) -{ - int streamId = sol_conf->input_config->StreamNumber; - string outputType = sol_conf->output_type; - shared_ptr encodePtr = make_shared(); - encodePtr->Initialization(outputType); - encoders.insert(pair>(streamId, encodePtr)); - - std::thread encodeThread = std::thread(EncodeThreadFunc, encodePtr); - threads.emplace_back(move(encodeThread)); -} - -void EncodeController::EncodeFrame(int streamId, uint8_t *pushData, int len) -{ - encoders[streamId]->PushData(pushData, len); -} - -void EncodeController::EndOfStream(int streamId) -{ - encoders[streamId]->Stop(); -} - -void EncodeController::InterruptClose() -{ - map>::reverse_iterator iter; - - for (iter = encoders.rbegin(); iter != encoders.rend(); iter++) - { - iter->second->Stop(); - } - - for (size_t i = 0; i < threads.size(); i++) - { - threads[i].join(); - } -} - -void EncodeController::Stop() -{ - map>::reverse_iterator iter; - - for (iter = encoders.rbegin(); iter != encoders.rend(); iter++) - { - iter->second->Stop(); - } - - for (size_t i = 0; i < threads.size(); i++) - { - threads[i].join(); - } -} diff --git a/ai-solutions/ubuntu/gstreamer-cli/src/main.cpp b/ai-solutions/ubuntu/gstreamer-cli/src/main.cpp deleted file mode 100644 index ef3fe5d1..00000000 --- a/ai-solutions/ubuntu/gstreamer-cli/src/main.cpp +++ /dev/null @@ -1,325 +0,0 @@ -// -*- mode: cpp -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -#include "ModelInference.h" -#include "Configuration.h" -#include "StreamDecode.h" -#include "StreamEncode.h" -#include "DecodeQueue.h" -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace std; - - -/** - * To decode frames from gstreamer -*/ -shared_ptr gDecodeQueue; - -/** - * To check for gstreamer exit -*/ -bool gExit = false; - -/** - * To encode frames for preview/file -*/ -shared_ptr encoderCtrl; - -/** - * To create object for frame capture -*/ -shared_ptr captureCtrl; - - - -/** @brief To intialize and configure the runtime based on the solution - * @param sol_conf contains information about the solution -*/ -void Inference_Image(void *sol_conf, string inputimage, string outputimage) -{ - LOG_DEBUG("InferenceThread \n"); - - SolutionConfiguration *solution_config = (SolutionConfiguration *)sol_conf; - /** - * TO initialize layers and buffers based on model type - */ - shared_ptr shInference; - shInference = std::make_shared(solution_config->model_config->model_type); - - shInference->Initialization(*solution_config->model_config.get()); - - /** - * start inferencing on the image buffer - */ - auto start1 = chrono::steady_clock::now(); - cv::Mat input = cv::imread(inputimage, cv::IMREAD_COLOR); - if(input.empty()) - { - LOG_ERROR("Invalid image!\n"); - return; - } - //cv::cvtColor(input, input, cv::COLOR_BGR2RGB); - LOG_ERROR("model name = %s\n",solution_config->model_name.c_str()); - cv::Mat output_image; - if(shInference->Inference(input,output_image,solution_config->model_name) == true) - { - auto end1 = chrono::steady_clock::now(); - auto costTime1 = chrono::duration_cast(end1 - start1).count(); - LOG_INFO("Elapsed inference time in milliseconds: %ld ms\n",costTime1); - cv::imwrite(outputimage,output_image); - } - else - { - LOG_ERROR("Model Inference failed\n"); - } - shInference->UnInitialization(); -} - -void Inference_Camera(void *sol_conf) -{ - LOG_DEBUG("InferenceThread \n"); - - SolutionConfiguration *solution_config = (SolutionConfiguration *)sol_conf; - /** - * TO initialize layers and buffers based on model type - */ - shared_ptr shInference; - shInference = std::make_shared(solution_config->model_config->model_type); - - shInference->Initialization(*solution_config->model_config.get()); - - int ret = 0; - auto start = chrono::steady_clock::now(); - uint32_t frames = 0; - /** - * Run the loop until stream ends or interrupt from user - */ - do - { - shared_ptr item; - /** - * To retrieve gstreamer buffer from queue - */ - ret = gDecodeQueue->Dequeue(item, 300); - /** - * Check if Dequeue is successful - */ - if (ret == 0) - { - frames += 1; - auto start1 = chrono::steady_clock::now(); - /** - * start inferencing on the image buffer - */ - cv::Mat image(cv::Size(item->Width, item->Height), CV_8UC3, item->ImageBuffer.get(), cv::Mat::AUTO_STEP); - cv::Mat output_image; - shInference->Inference(image,output_image,solution_config->model_name); - auto end1 = chrono::steady_clock::now(); - auto costTime1 = chrono::duration_cast(end1 - start1).count(); - LOG_INFO("Elapsed inference time in milliseconds: %ld ms\n",costTime1); - - cv::resize(output_image,output_image,Size(1280,720)); - int size = output_image.total() * output_image.elemSize(); - /** - * To display on monitor - */ - encoderCtrl->EncodeFrame(item->StreamId, output_image.data, size); - } - /** - * If there are no items in the queue - */ - else - { - if (ret != 1) - { - LOG_ERROR("Error ret= %d\n", ret); - } - continue; - } - - } while (!gExit); - /** - * To infer on the remaining pending items if exited before completion - */ - auto remains = gDecodeQueue->GetRemainItems(); - LOG_INFO("Remain Items= %lu\n", remains.size()); - for (auto item : remains) - { - frames += 1; - cv::Mat image(cv::Size(item->Width, item->Height), CV_8UC3, item->ImageBuffer.get(), cv::Mat::AUTO_STEP); - cv::Mat output_image; - shInference->Inference(image,output_image,solution_config->model_name); - } - /** - * To deallocate the bufferes and runtime - */ - shInference->UnInitialization(); - - auto end = chrono::steady_clock::now(); - auto costTime = chrono::duration_cast(end - start).count(); - - LOG_INFO("Elapsed time in milliseconds: %ld ms \t Received Frames: %d \t Through rate: %ld \n", - costTime, frames, (frames * 1000)/costTime); -} - -/** @brief Execution starts from here - * @param argc for total argument count - * @param argv arguments to be passed -*/ - -int main(int argc, char **argv) -{ - /** - * To store config file name passed in argument - */ - const char* inputFile=NULL; - string inputimage,outputimage; - int opt = 0; - /** - * Check if 'h' or 'c' passed in argument - */ - while ((opt = getopt(argc, argv, ":hc:i:o:")) != EOF) - { - switch (opt) - { - case 'h': std::cout - << "\nDESCRIPTION:\n" - << "------------\n" - << "Example application demonstrating how to run the use case\n" - << "using the SNPE C++ API.\n" - << "REQUIRED ARGUMENTS:\n" - << "-------------------\n" - << " -c Path to the config json file.\n" - << "Example: \nImage as Input: ./out/ai-solutions -c ../data/config.json -i image_path -o Output_path\n" - << "Camera stream as input ./out/ai-solutions -c ../data/config.json\n"; - break; - case 'c': - inputFile = optarg; - LOG_INFO("Path to config file = %s \n", inputFile); - break; - case 'i': - inputimage = optarg; - LOG_INFO(" input image = %s \n",inputimage.c_str()); - break; - case 'o': - outputimage = optarg; - LOG_INFO(" output image = %s \n",outputimage.c_str()); - break; - - default: - LOG_INFO("Invalid parameter specified. Please run sample with the -h flag to see required arguments\n"); - exit(0); - }; - } - /** - * To parse input,model and solution config from inputFile - */ - Configuration::getInstance().LoadConfiguration(inputFile); - - /** - * To access enabled soultion model - */ - vector selected_model; - /** - * To access enabled solution configuration - */ - vector solutions_config; - /** - * To intialize each enabled solution - */ - - bool camera = false; - for (auto i : Configuration::getInstance().solutionsconfig) { - /** - * To access solution configuration - */ - std::shared_ptr config = i.second; - /** - * To check if solution is enabled - */ - if (config->Enable == true) { - /** - * To access the input configuration - */ - config->input_config = Configuration::getInstance().inputconfigs[config->input_config_name]; - if (config->input_config == NULL) { - LOG_ERROR("NULL Input configuration for selected solution name = %s \n", config->solution_name.c_str()); - exit(1); - } - config->input_config->StreamNumber = i.first; - /** - * To access the model configuration - */ - config->model_config = Configuration::getInstance().modelsconfig[config->model_name]; - if (config->model_config == NULL) { - LOG_ERROR("NULL Model configuration for selected solution name = %s \n", config->solution_name.c_str()); - exit(1); - } - /** - * To store the enabled solution configuration - */ - solutions_config.emplace_back(*config); - /** - * Append the selected models - */ - selected_model.push_back(config->model_name); - - if(config->input_config_name.compare("camera") == 0) - { - camera = true; - const int MAX_QUEUE_DEPTH = 1; - gDecodeQueue = make_shared(MAX_QUEUE_DEPTH); - encoderCtrl = make_shared(); - captureCtrl = make_shared(); - /** - * Intialize gstreamer pipeline to capture - */ - captureCtrl->CreateCapture(config->input_config, gDecodeQueue); - /** - * Intialze encoder to display or save frame - */ - encoderCtrl->CreateEncoder(config); - } - } - } - /** - * Check if any solution is enabled - */ - if (selected_model.size() == 0) { - LOG_ERROR("Solution not enabled, Enable the desired solution in config.json file\n"); - exit(1); - } - if(camera == true) - { - Inference_Camera((void *)(&solutions_config[0]) ); - gDecodeQueue->Unlock(); - captureCtrl->StopAll(); - captureCtrl->StopAll(); - } - else - { - if(inputimage.empty() || outputimage.empty()) - { - LOG_ERROR("Example: ./out/ai-solutions -c ../data/config.json -i image_path -o output_path\n"); - return 0; - } - Inference_Image((void *)(&solutions_config[0]), inputimage, outputimage ); - } - - - return 0; -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/.gitignore b/ai-solutions/windows/angular-app-nlp/Electron app UI/.gitignore deleted file mode 100644 index 08b25532..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/.gitignore +++ /dev/null @@ -1 +0,0 @@ -node_modules diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/README.md b/ai-solutions/windows/angular-app-nlp/Electron app UI/README.md deleted file mode 100644 index 2c839538..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# npm install ---> installs node_modules -# npm run build ----> creates dist -# npm run dist ----> Generates exe diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/angular.json b/ai-solutions/windows/angular-app-nlp/Electron app UI/angular.json deleted file mode 100644 index 7138e98c..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/angular.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "$schema": "./node_modules/@angular/cli/lib/config/schema.json", - "version": 1, - "newProjectRoot": "projects", - "projects": { - "testApp": { - "projectType": "application", - "schematics": { - "@schematics/angular:application": { - "strict": true - } - }, - "root": "", - "sourceRoot": "src", - "prefix": "app", - "architect": { - "build": { - "builder": "@angular-devkit/build-angular:browser", - "options": { - "outputPath": "dist", - "index": "src/index.html", - "main": "src/main.ts", - "polyfills": "src/polyfills.ts", - "tsConfig": "tsconfig.app.json", - "assets": [ - "src/favicon.ico", - "src/assets" - ], - "styles": [ - "./node_modules/@angular/material/prebuilt-themes/indigo-pink.css", - "src/styles.css" - ], - "scripts": [] - }, - "configurations": { - "production": { - "budgets": [ - { - "type": "initial", - "maximumWarning": "500kb", - "maximumError": "1mb" - }, - { - "type": "anyComponentStyle", - "maximumWarning": "2kb", - "maximumError": "4kb" - } - ], - "fileReplacements": [ - { - "replace": "src/environments/environment.ts", - "with": "src/environments/environment.prod.ts" - } - ], - "outputHashing": "all" - }, - "development": { - "buildOptimizer": false, - "optimization": false, - "vendorChunk": true, - "extractLicenses": false, - "sourceMap": true, - "namedChunks": true - } - }, - "defaultConfiguration": "production" - }, - "serve": { - "builder": "@angular-devkit/build-angular:dev-server", - "configurations": { - "production": { - "browserTarget": "testApp:build:production" - }, - "development": { - "browserTarget": "testApp:build:development" - } - }, - "defaultConfiguration": "development" - }, - "extract-i18n": { - "builder": "@angular-devkit/build-angular:extract-i18n", - "options": { - "browserTarget": "testApp:build" - } - }, - "test": { - "builder": "@angular-devkit/build-angular:karma", - "options": { - "main": "src/test.ts", - "polyfills": "src/polyfills.ts", - "tsConfig": "tsconfig.spec.json", - "karmaConfig": "karma.conf.js", - "assets": [ - "src/favicon.ico", - "src/assets" - ], - "styles": [ - "./node_modules/@angular/material/prebuilt-themes/indigo-pink.css", - "src/styles.css" - ], - "scripts": [] - } - } - } - } - }, - "defaultProject": "testApp" -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/karma.conf.js b/ai-solutions/windows/angular-app-nlp/Electron app UI/karma.conf.js deleted file mode 100644 index d5dc9e5d..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/karma.conf.js +++ /dev/null @@ -1,53 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// Karma configuration file, see link for more information -// https://karma-runner.github.io/1.0/config/configuration-file.html - -module.exports = function (config) { - config.set({ - basePath: '', - frameworks: ['jasmine', '@angular-devkit/build-angular'], - plugins: [ - require('karma-jasmine'), - require('karma-chrome-launcher'), - require('karma-jasmine-html-reporter'), - require('karma-coverage'), - require('@angular-devkit/build-angular/plugins/karma') - ], - client: { - jasmine: { - // you can add configuration options for Jasmine here - // the possible options are listed at https://jasmine.github.io/api/edge/Configuration.html - // for example, you can disable the random execution with `random: false` - // or set a specific seed with `seed: 4321` - }, - clearContext: false // leave Jasmine Spec Runner output visible in browser - }, - jasmineHtmlReporter: { - suppressAll: true // removes the duplicated traces - }, - coverageReporter: { - dir: require('path').join(__dirname, './coverage/test-app'), - subdir: '.', - reporters: [ - { type: 'html' }, - { type: 'text-summary' } - ] - }, - reporters: ['progress', 'kjhtml'], - port: 9876, - colors: true, - logLevel: config.LOG_INFO, - autoWatch: true, - browsers: ['Chrome'], - singleRun: false, - restartOnFileChange: true - }); -}; diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/main.js b/ai-solutions/windows/angular-app-nlp/Electron app UI/main.js deleted file mode 100644 index 895798b0..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/main.js +++ /dev/null @@ -1,63 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// Modules to control application life and create native browser window -const {app, BrowserWindow} = require('electron') -const path = require('path') -const server = require("./server") -require('dotenv').config(); - -const PORT = process.env.PORT || 3000; - -function createWindow () { - // Create the browser window. - const mainWindow = new BrowserWindow({ - minWidth: 1000, - minHeight: 600, - title:"QNN - QA", - show: false, - webPreferences: { - nodeIntegration: true - } - }) - - // and load the index.html of the app. - mainWindow.loadURL('http://localhost:'+PORT); - mainWindow.once('ready-to-show',() =>{ - mainWindow.show(); - mainWindow.maximize(); - } - ) - - // Open the DevTools. - //mainWindow.webContents.openDevTools() -} - -// This method will be called when Electron has finished -// initialization and is ready to create browser windows. -// Some APIs can only be used after this event occurs. -app.whenReady().then(() => { - createWindow() - - app.on('activate', function () { - // On macOS it's common to re-create a window in the app when the - // dock icon is clicked and there are no other windows open. - if (BrowserWindow.getAllWindows().length === 0) createWindow() - }) -}) - -// Quit when all windows are closed, except on macOS. There, it's common -// for applications and their menu bar to stay active until the user quits -// explicitly with Cmd + Q. -app.on('window-all-closed', function () { - if (process.platform !== 'darwin') app.quit() -}) - -// In this file you can include the rest of your app's specific main process -// code. You can also put them in separate files and require them here. diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/package-lock.json b/ai-solutions/windows/angular-app-nlp/Electron app UI/package-lock.json deleted file mode 100644 index d3cdd9b7..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/package-lock.json +++ /dev/null @@ -1,15879 +0,0 @@ -{ - "name": "QA_App", - "version": "1.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "QA_App", - "version": "1.0.0", - "dependencies": { - "@angular/animations": "~13.2.0", - "@angular/cdk": "^12.2.7", - "@angular/common": "~13.2.0", - "@angular/compiler": "~13.2.0", - "@angular/core": "~13.2.0", - "@angular/forms": "~13.2.0", - "@angular/material": "^12.2.8", - "@angular/platform-browser": "~13.2.0", - "@angular/platform-browser-dynamic": "~13.2.0", - "@angular/router": "~13.2.0", - "body-parser": "^1.19.0", - "dotenv": "^10.0.0", - "express": "^4.17.1", - "path": "^0.12.7", - "request": "^2.88.2", - "rxjs": "~7.5.0", - "tslib": "^2.3.0", - "zone.js": "~0.11.4" - }, - "devDependencies": { - "@angular-devkit/build-angular": "~13.2.4", - "@angular/cli": "~13.2.4", - "@angular/compiler-cli": "~13.2.0", - "@types/jasmine": "~3.10.0", - "@types/node": "^12.11.1", - "electron": "^17.0.1", - "electron-builder": "^22.11.7", - "electron-packager": "^15.4.0", - "jasmine-core": "~4.0.0", - "karma": "~6.3.0", - "karma-chrome-launcher": "~3.1.0", - "karma-coverage": "~2.1.0", - "karma-jasmine": "~4.0.0", - "karma-jasmine-html-reporter": "~1.7.0", - "nodemon": "^2.0.13", - "typescript": "~4.5.2" - } - }, - "node_modules/@ampproject/remapping": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-1.1.1.tgz", - "integrity": "sha512-YVAcA4DKLOj296CF5SrQ8cYiMRiUGc2sqFpLxsDGWE34suHqhGP/5yMsDHKsrh8hs8I5TiRVXNwKPWQpX3iGjw==", - "dev": true, - "dependencies": { - "@jridgewell/resolve-uri": "^3.0.3", - "sourcemap-codec": "1.4.8" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@angular-devkit/architect": { - "version": "0.1302.6", - "resolved": "https://registry.npmjs.org/@angular-devkit/architect/-/architect-0.1302.6.tgz", - "integrity": "sha512-NztzorUMfwJeRaT7SY00Y8WSqc2lQYuF11yNoyEm7Dae3V7VZ28rW2Z9RwibP27rYQL0RjSMaz2wKITHX2vOAw==", - "dev": true, - "dependencies": { - "@angular-devkit/core": "13.2.6", - "rxjs": "6.6.7" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0", - "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", - "yarn": ">= 1.13.0" - } - }, - "node_modules/@angular-devkit/architect/node_modules/rxjs": { - "version": "6.6.7", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz", - "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==", - "dev": true, - "dependencies": { - "tslib": "^1.9.0" - }, - "engines": { - "npm": ">=2.0.0" - } - }, - "node_modules/@angular-devkit/architect/node_modules/tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "dev": true - }, - "node_modules/@angular-devkit/build-angular": { - "version": "13.2.6", - "resolved": "https://registry.npmjs.org/@angular-devkit/build-angular/-/build-angular-13.2.6.tgz", - "integrity": "sha512-Y2ojy6xbZ0kwScppcutLHBP8eW0qNOjburTISSBU/L5l/9FOeZ1E7yAreKuVu/qibZiLbSJfAhk+SLwhRHFSSQ==", - "dev": true, - "dependencies": { - "@ampproject/remapping": "1.1.1", - "@angular-devkit/architect": "0.1302.6", - "@angular-devkit/build-webpack": "0.1302.6", - "@angular-devkit/core": "13.2.6", - "@babel/core": "7.16.12", - "@babel/generator": "7.16.8", - "@babel/helper-annotate-as-pure": "7.16.7", - "@babel/plugin-proposal-async-generator-functions": "7.16.8", - "@babel/plugin-transform-async-to-generator": "7.16.8", - "@babel/plugin-transform-runtime": "7.16.10", - "@babel/preset-env": "7.16.11", - "@babel/runtime": "7.16.7", - "@babel/template": "7.16.7", - "@discoveryjs/json-ext": "0.5.6", - "@ngtools/webpack": "13.2.6", - "ansi-colors": "4.1.1", - "babel-loader": "8.2.3", - "babel-plugin-istanbul": "6.1.1", - "browserslist": "^4.9.1", - "cacache": "15.3.0", - "circular-dependency-plugin": "5.2.2", - "copy-webpack-plugin": "10.2.1", - "core-js": "3.20.3", - "critters": "0.0.16", - "css-loader": "6.5.1", - "esbuild-wasm": "0.14.22", - "glob": "7.2.0", - "https-proxy-agent": "5.0.0", - "inquirer": "8.2.0", - "jsonc-parser": "3.0.0", - "karma-source-map-support": "1.4.0", - "less": "4.1.2", - "less-loader": "10.2.0", - "license-webpack-plugin": "4.0.2", - "loader-utils": "3.2.0", - "mini-css-extract-plugin": "2.5.3", - "minimatch": "3.0.4", - "open": "8.4.0", - "ora": "5.4.1", - "parse5-html-rewriting-stream": "6.0.1", - "piscina": "3.2.0", - "postcss": "8.4.5", - "postcss-import": "14.0.2", - "postcss-loader": "6.2.1", - "postcss-preset-env": "7.2.3", - "regenerator-runtime": "0.13.9", - "resolve-url-loader": "5.0.0", - "rxjs": "6.6.7", - "sass": "1.49.0", - "sass-loader": "12.4.0", - "semver": "7.3.5", - "source-map-loader": "3.0.1", - "source-map-support": "0.5.21", - "stylus": "0.56.0", - "stylus-loader": "6.2.0", - "terser": "5.11.0", - "text-table": "0.2.0", - "tree-kill": "1.2.2", - "tslib": "2.3.1", - "webpack": "5.67.0", - "webpack-dev-middleware": "5.3.0", - "webpack-dev-server": "4.7.3", - "webpack-merge": "5.8.0", - "webpack-subresource-integrity": "5.1.0" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0", - "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", - "yarn": ">= 1.13.0" - }, - "optionalDependencies": { - "esbuild": "0.14.22" - }, - "peerDependencies": { - "@angular/compiler-cli": "^13.0.0", - "@angular/localize": "^13.0.0", - "@angular/service-worker": "^13.0.0", - "karma": "^6.3.0", - "ng-packagr": "^13.0.0", - "protractor": "^7.0.0", - "tailwindcss": "^2.0.0 || ^3.0.0", - "typescript": ">=4.4.3 <4.6" - }, - "peerDependenciesMeta": { - "@angular/localize": { - "optional": true - }, - "@angular/service-worker": { - "optional": true - }, - "karma": { - "optional": true - }, - "ng-packagr": { - "optional": true - }, - "protractor": { - "optional": true - }, - "tailwindcss": { - "optional": true - } - } - }, - "node_modules/@angular-devkit/build-angular/node_modules/esbuild": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.14.22.tgz", - "integrity": "sha512-CjFCFGgYtbFOPrwZNJf7wsuzesx8kqwAffOlbYcFDLFuUtP8xloK1GH+Ai13Qr0RZQf9tE7LMTHJ2iVGJ1SKZA==", - "dev": true, - "hasInstallScript": true, - "optional": true, - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=12" - }, - "optionalDependencies": { - "esbuild-android-arm64": "0.14.22", - "esbuild-darwin-64": "0.14.22", - "esbuild-darwin-arm64": "0.14.22", - "esbuild-freebsd-64": "0.14.22", - "esbuild-freebsd-arm64": "0.14.22", - "esbuild-linux-32": "0.14.22", - "esbuild-linux-64": "0.14.22", - "esbuild-linux-arm": "0.14.22", - "esbuild-linux-arm64": "0.14.22", - "esbuild-linux-mips64le": "0.14.22", - "esbuild-linux-ppc64le": "0.14.22", - "esbuild-linux-riscv64": "0.14.22", - "esbuild-linux-s390x": "0.14.22", - "esbuild-netbsd-64": "0.14.22", - "esbuild-openbsd-64": "0.14.22", - "esbuild-sunos-64": "0.14.22", - "esbuild-windows-32": "0.14.22", - "esbuild-windows-64": "0.14.22", - "esbuild-windows-arm64": "0.14.22" - } - }, - "node_modules/@angular-devkit/build-angular/node_modules/rxjs": { - "version": "6.6.7", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz", - "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==", - "dev": true, - "dependencies": { - "tslib": "^1.9.0" - }, - "engines": { - "npm": ">=2.0.0" - } - }, - "node_modules/@angular-devkit/build-angular/node_modules/rxjs/node_modules/tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "dev": true - }, - "node_modules/@angular-devkit/build-webpack": { - "version": "0.1302.6", - "resolved": "https://registry.npmjs.org/@angular-devkit/build-webpack/-/build-webpack-0.1302.6.tgz", - "integrity": "sha512-TYEh2n9tPe932rEIgdiSpojOqtDppW2jzb/empVqCkLF7WUZsXKvTanttZC34L6R2VD6SAGWhb6JDg75ghUVYA==", - "dev": true, - "dependencies": { - "@angular-devkit/architect": "0.1302.6", - "rxjs": "6.6.7" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0", - "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", - "yarn": ">= 1.13.0" - }, - "peerDependencies": { - "webpack": "^5.30.0", - "webpack-dev-server": "^4.0.0" - } - }, - "node_modules/@angular-devkit/build-webpack/node_modules/rxjs": { - "version": "6.6.7", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz", - "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==", - "dev": true, - "dependencies": { - "tslib": "^1.9.0" - }, - "engines": { - "npm": ">=2.0.0" - } - }, - "node_modules/@angular-devkit/build-webpack/node_modules/tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "dev": true - }, - "node_modules/@angular-devkit/core": { - "version": "13.2.6", - "resolved": "https://registry.npmjs.org/@angular-devkit/core/-/core-13.2.6.tgz", - "integrity": "sha512-8h2mWdBTN/dYwZuzKMg2IODlOWMdbJcpQG4XVrkk9ejCPP+3aX5Aa3glCe/voN6eBNiRfs8YDM0jxmpN2aWVtg==", - "dev": true, - "dependencies": { - "ajv": "8.9.0", - "ajv-formats": "2.1.1", - "fast-json-stable-stringify": "2.1.0", - "magic-string": "0.25.7", - "rxjs": "6.6.7", - "source-map": "0.7.3" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0", - "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", - "yarn": ">= 1.13.0" - }, - "peerDependencies": { - "chokidar": "^3.5.2" - }, - "peerDependenciesMeta": { - "chokidar": { - "optional": true - } - } - }, - "node_modules/@angular-devkit/core/node_modules/ajv": { - "version": "8.9.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.9.0.tgz", - "integrity": "sha512-qOKJyNj/h+OWx7s5DePL6Zu1KeM9jPZhwBqs+7DzP6bGOvqzVCSf0xueYmVuaC/oQ/VtS2zLMLHdQFbkka+XDQ==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/@angular-devkit/core/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true - }, - "node_modules/@angular-devkit/core/node_modules/rxjs": { - "version": "6.6.7", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz", - "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==", - "dev": true, - "dependencies": { - "tslib": "^1.9.0" - }, - "engines": { - "npm": ">=2.0.0" - } - }, - "node_modules/@angular-devkit/core/node_modules/tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "dev": true - }, - "node_modules/@angular-devkit/schematics": { - "version": "13.2.6", - "resolved": "https://registry.npmjs.org/@angular-devkit/schematics/-/schematics-13.2.6.tgz", - "integrity": "sha512-mPgSqdnZRuPSMeUA+T+mwVCrq2yhXpcYm1/Rjbhy09CyHs4wSrFv21WHCrE6shlvXpcmwr0n+I0DIeagAPmjUA==", - "dev": true, - "dependencies": { - "@angular-devkit/core": "13.2.6", - "jsonc-parser": "3.0.0", - "magic-string": "0.25.7", - "ora": "5.4.1", - "rxjs": "6.6.7" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0", - "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", - "yarn": ">= 1.13.0" - } - }, - "node_modules/@angular-devkit/schematics/node_modules/rxjs": { - "version": "6.6.7", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz", - "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==", - "dev": true, - "dependencies": { - "tslib": "^1.9.0" - }, - "engines": { - "npm": ">=2.0.0" - } - }, - "node_modules/@angular-devkit/schematics/node_modules/tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "dev": true - }, - "node_modules/@angular/animations": { - "version": "13.2.7", - "resolved": "https://registry.npmjs.org/@angular/animations/-/animations-13.2.7.tgz", - "integrity": "sha512-FthGqRPQ1AOcOx/NIW65xeFYkQZJ7PpXcX59Kt+qkoUzngAQEY+UUpOteG52tmL0iZSVwOCjtxRFi9w4heVgEg==", - "dependencies": { - "tslib": "^2.3.0" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0" - }, - "peerDependencies": { - "@angular/core": "13.2.7" - } - }, - "node_modules/@angular/cdk": { - "version": "12.2.13", - "resolved": "https://registry.npmjs.org/@angular/cdk/-/cdk-12.2.13.tgz", - "integrity": "sha512-zSKRhECyFqhingIeyRInIyTvYErt4gWo+x5DQr0b7YLUbU8DZSwWnG4w76Ke2s4U8T7ry1jpJBHoX/e8YBpGMg==", - "dependencies": { - "tslib": "^2.2.0" - }, - "optionalDependencies": { - "parse5": "^5.0.0" - }, - "peerDependencies": { - "@angular/common": "^12.0.0 || ^13.0.0-0", - "@angular/core": "^12.0.0 || ^13.0.0-0", - "rxjs": "^6.5.3 || ^7.0.0" - } - }, - "node_modules/@angular/cli": { - "version": "13.2.6", - "resolved": "https://registry.npmjs.org/@angular/cli/-/cli-13.2.6.tgz", - "integrity": "sha512-xIjEaQI5sWemXXc7GXLm4u9UL5sjtrQL/y1PJvvk/Jsa8+kIT+MutOfZfC7zcdAh9fqHd8mokH3guFV8BJdFxA==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "@angular-devkit/architect": "0.1302.6", - "@angular-devkit/core": "13.2.6", - "@angular-devkit/schematics": "13.2.6", - "@schematics/angular": "13.2.6", - "@yarnpkg/lockfile": "1.1.0", - "ansi-colors": "4.1.1", - "debug": "4.3.3", - "ini": "2.0.0", - "inquirer": "8.2.0", - "jsonc-parser": "3.0.0", - "npm-package-arg": "8.1.5", - "npm-pick-manifest": "6.1.1", - "open": "8.4.0", - "ora": "5.4.1", - "pacote": "12.0.3", - "resolve": "1.22.0", - "semver": "7.3.5", - "symbol-observable": "4.0.0", - "uuid": "8.3.2" - }, - "bin": { - "ng": "bin/ng.js" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0", - "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", - "yarn": ">= 1.13.0" - } - }, - "node_modules/@angular/cli/node_modules/debug": { - "version": "4.3.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", - "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/@angular/cli/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/@angular/cli/node_modules/uuid": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", - "dev": true, - "bin": { - "uuid": "dist/bin/uuid" - } - }, - "node_modules/@angular/common": { - "version": "13.2.7", - "resolved": "https://registry.npmjs.org/@angular/common/-/common-13.2.7.tgz", - "integrity": "sha512-gSkv9aMz5q2ynIqSwgp5HEVVwlmpMYGVZFNZnEnezGY96Hza0eXlb/AYdqO7S3VQVvx+FXpvXP/eq/SsCw7rFA==", - "dependencies": { - "tslib": "^2.3.0" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0" - }, - "peerDependencies": { - "@angular/core": "13.2.7", - "rxjs": "^6.5.3 || ^7.4.0" - } - }, - "node_modules/@angular/compiler": { - "version": "13.2.7", - "resolved": "https://registry.npmjs.org/@angular/compiler/-/compiler-13.2.7.tgz", - "integrity": "sha512-auRMo+k+xCQmIBkZ5UgkAAmhbpcoOUWQrJN2PQnPl88DPquui3tXC4R6RANpWCu59oT8m29FQMviHcN3ZTFl6Q==", - "dependencies": { - "tslib": "^2.3.0" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0" - } - }, - "node_modules/@angular/compiler-cli": { - "version": "13.2.7", - "resolved": "https://registry.npmjs.org/@angular/compiler-cli/-/compiler-cli-13.2.7.tgz", - "integrity": "sha512-EZFWHyC2PO3ECEgX/WTMaTEvH4isvtw8E/l+48YHvINeHoxPGF7Or8qEeu/lvGlXMzBd89QH1ohutfz93vNz+g==", - "dev": true, - "dependencies": { - "@babel/core": "^7.17.2", - "chokidar": "^3.0.0", - "convert-source-map": "^1.5.1", - "dependency-graph": "^0.11.0", - "magic-string": "^0.26.0", - "reflect-metadata": "^0.1.2", - "semver": "^7.0.0", - "sourcemap-codec": "^1.4.8", - "tslib": "^2.3.0", - "yargs": "^17.2.1" - }, - "bin": { - "ng-xi18n": "bundles/src/bin/ng_xi18n.js", - "ngc": "bundles/src/bin/ngc.js", - "ngcc": "bundles/ngcc/main-ngcc.js" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0" - }, - "peerDependencies": { - "@angular/compiler": "13.2.7", - "typescript": ">=4.4.2 <4.6" - } - }, - "node_modules/@angular/compiler-cli/node_modules/@ampproject/remapping": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.1.2.tgz", - "integrity": "sha512-hoyByceqwKirw7w3Z7gnIIZC3Wx3J484Y3L/cMpXFbr7d9ZQj2mODrirNzcJa+SM3UlpWXYvKV4RlRpFXlWgXg==", - "dev": true, - "dependencies": { - "@jridgewell/trace-mapping": "^0.3.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@angular/compiler-cli/node_modules/@babel/core": { - "version": "7.17.8", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.17.8.tgz", - "integrity": "sha512-OdQDV/7cRBtJHLSOBqqbYNkOcydOgnX59TZx4puf41fzcVtN3e/4yqY8lMQsK+5X2lJtAdmA+6OHqsj1hBJ4IQ==", - "dev": true, - "dependencies": { - "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.16.7", - "@babel/generator": "^7.17.7", - "@babel/helper-compilation-targets": "^7.17.7", - "@babel/helper-module-transforms": "^7.17.7", - "@babel/helpers": "^7.17.8", - "@babel/parser": "^7.17.8", - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.17.3", - "@babel/types": "^7.17.0", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.1.2", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/@angular/compiler-cli/node_modules/@babel/core/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@angular/compiler-cli/node_modules/@babel/generator": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.17.7.tgz", - "integrity": "sha512-oLcVCTeIFadUoArDTwpluncplrYBmTCCZZgXCbgNGvOBBiSDDK3eWO4b/+eOTli5tKv1lg+a5/NAXg+nTcei1w==", - "dev": true, - "dependencies": { - "@babel/types": "^7.17.0", - "jsesc": "^2.5.1", - "source-map": "^0.5.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@angular/compiler-cli/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/@angular/compiler-cli/node_modules/magic-string": { - "version": "0.26.1", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.26.1.tgz", - "integrity": "sha512-ndThHmvgtieXe8J/VGPjG+Apu7v7ItcD5mhEIvOscWjPF/ccOiLxHaSuCAS2G+3x4GKsAbT8u7zdyamupui8Tg==", - "dev": true, - "dependencies": { - "sourcemap-codec": "^1.4.8" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@angular/compiler-cli/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/@angular/compiler-cli/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@angular/core": { - "version": "13.2.7", - "resolved": "https://registry.npmjs.org/@angular/core/-/core-13.2.7.tgz", - "integrity": "sha512-6J6C2ymIy+cZudock25BLEZFckhqFfXOSZw5YBIoAJuntV4hckLNfRTn/dxn5qmWF/Vw60NKdhV367YYejz6Gg==", - "dependencies": { - "tslib": "^2.3.0" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0" - }, - "peerDependencies": { - "rxjs": "^6.5.3 || ^7.4.0", - "zone.js": "~0.11.4" - } - }, - "node_modules/@angular/forms": { - "version": "13.2.7", - "resolved": "https://registry.npmjs.org/@angular/forms/-/forms-13.2.7.tgz", - "integrity": "sha512-Did5ShmHTu52cljmNtMxUBUGEYHJ/FV5ZpKhbI7sd/VSFhXp9KWYUbfma0m7+CUJMGmpt6bmDaN0G2WS8Es1LQ==", - "dependencies": { - "tslib": "^2.3.0" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0" - }, - "peerDependencies": { - "@angular/common": "13.2.7", - "@angular/core": "13.2.7", - "@angular/platform-browser": "13.2.7", - "rxjs": "^6.5.3 || ^7.4.0" - } - }, - "node_modules/@angular/material": { - "version": "12.2.13", - "resolved": "https://registry.npmjs.org/@angular/material/-/material-12.2.13.tgz", - "integrity": "sha512-6g2GyN4qp2D+DqY2AwrQuPB3cd9gybvQVXvNRbTPXEulHr+LgGei00ySdFHFp6RvdGSMZ4i3LM1Fq3VkFxhCfQ==", - "dependencies": { - "tslib": "^2.2.0" - }, - "peerDependencies": { - "@angular/animations": "^12.0.0 || ^13.0.0-0", - "@angular/cdk": "12.2.13", - "@angular/common": "^12.0.0 || ^13.0.0-0", - "@angular/core": "^12.0.0 || ^13.0.0-0", - "@angular/forms": "^12.0.0 || ^13.0.0-0", - "rxjs": "^6.5.3 || ^7.0.0" - } - }, - "node_modules/@angular/platform-browser": { - "version": "13.2.7", - "resolved": "https://registry.npmjs.org/@angular/platform-browser/-/platform-browser-13.2.7.tgz", - "integrity": "sha512-3rpeS2n+mfey9FqJg/NQKPiyHC47vgldWXmuz5FmOCHrOY54AaFfoiwQcdxzh6Lxx/CUVm0TlOS8S/xI9iEqXw==", - "dependencies": { - "tslib": "^2.3.0" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0" - }, - "peerDependencies": { - "@angular/animations": "13.2.7", - "@angular/common": "13.2.7", - "@angular/core": "13.2.7" - }, - "peerDependenciesMeta": { - "@angular/animations": { - "optional": true - } - } - }, - "node_modules/@angular/platform-browser-dynamic": { - "version": "13.2.7", - "resolved": "https://registry.npmjs.org/@angular/platform-browser-dynamic/-/platform-browser-dynamic-13.2.7.tgz", - "integrity": "sha512-3tKiUohQ8wl4hp1zYLKvMJ7GVYpg2K5dRrihtUKkJk8xUv3iuTUI0wbNCrUDZkrWc0GMhnQNXwE22gd+hKjfKg==", - "dependencies": { - "tslib": "^2.3.0" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0" - }, - "peerDependencies": { - "@angular/common": "13.2.7", - "@angular/compiler": "13.2.7", - "@angular/core": "13.2.7", - "@angular/platform-browser": "13.2.7" - } - }, - "node_modules/@angular/router": { - "version": "13.2.7", - "resolved": "https://registry.npmjs.org/@angular/router/-/router-13.2.7.tgz", - "integrity": "sha512-VzEFKyUE8CR23IbmAjmcSFY6pa4NsjaaTqT4mDYhzFeYc7R0s58Ow9d4Fy+0sWX6rzys01rcVNCg+ifJAnwYZA==", - "dependencies": { - "tslib": "^2.3.0" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0" - }, - "peerDependencies": { - "@angular/common": "13.2.7", - "@angular/core": "13.2.7", - "@angular/platform-browser": "13.2.7", - "rxjs": "^6.5.3 || ^7.4.0" - } - }, - "node_modules/@assemblyscript/loader": { - "version": "0.10.1", - "resolved": "https://registry.npmjs.org/@assemblyscript/loader/-/loader-0.10.1.tgz", - "integrity": "sha512-H71nDOOL8Y7kWRLqf6Sums+01Q5msqBW2KhDUTemh1tvY04eSkSXrK0uj/4mmY0Xr16/3zyZmsrxN7CKuRbNRg==", - "dev": true - }, - "node_modules/@babel/code-frame": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.16.7.tgz", - "integrity": "sha512-iAXqUn8IIeBTNd72xsFlgaXHkMBMt6y4HJp1tIaK465CWLT/fG1aqB7ykr95gHHmlBdGbFeWWfyB4NJJ0nmeIg==", - "dev": true, - "dependencies": { - "@babel/highlight": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/compat-data": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.17.7.tgz", - "integrity": "sha512-p8pdE6j0a29TNGebNm7NzYZWB3xVZJBZ7XGs42uAKzQo8VQ3F0By/cQCtUEABwIqw5zo6WA4NbmxsfzADzMKnQ==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core": { - "version": "7.16.12", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.16.12.tgz", - "integrity": "sha512-dK5PtG1uiN2ikk++5OzSYsitZKny4wOCD0nrO4TqnW4BVBTQ2NGS3NgilvT/TEyxTST7LNyWV/T4tXDoD3fOgg==", - "dev": true, - "dependencies": { - "@babel/code-frame": "^7.16.7", - "@babel/generator": "^7.16.8", - "@babel/helper-compilation-targets": "^7.16.7", - "@babel/helper-module-transforms": "^7.16.7", - "@babel/helpers": "^7.16.7", - "@babel/parser": "^7.16.12", - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.16.10", - "@babel/types": "^7.16.8", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.1.2", - "semver": "^6.3.0", - "source-map": "^0.5.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/@babel/core/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/@babel/core/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/@babel/core/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/core/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@babel/generator": { - "version": "7.16.8", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.16.8.tgz", - "integrity": "sha512-1ojZwE9+lOXzcWdWmO6TbUzDfqLD39CmEhN8+2cX9XkDo5yW1OpgfejfliysR2AWLpMamTiOiAp/mtroaymhpw==", - "dev": true, - "dependencies": { - "@babel/types": "^7.16.8", - "jsesc": "^2.5.1", - "source-map": "^0.5.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/generator/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@babel/helper-annotate-as-pure": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.16.7.tgz", - "integrity": "sha512-s6t2w/IPQVTAET1HitoowRGXooX8mCgtuP5195wD/QJPV6wYjpujCGF7JuMODVX2ZAJOf1GT6DT9MHEZvLOFSw==", - "dev": true, - "dependencies": { - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.16.7.tgz", - "integrity": "sha512-C6FdbRaxYjwVu/geKW4ZeQ0Q31AftgRcdSnZ5/jsH6BzCJbtvXvhpfkbkThYSuutZA7nCXpPR6AD9zd1dprMkA==", - "dev": true, - "dependencies": { - "@babel/helper-explode-assignable-expression": "^7.16.7", - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-compilation-targets": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.17.7.tgz", - "integrity": "sha512-UFzlz2jjd8kroj0hmCFV5zr+tQPi1dpC2cRsDV/3IEW8bJfCPrPpmcSN6ZS8RqIq4LXcmpipCQFPddyFA5Yc7w==", - "dev": true, - "dependencies": { - "@babel/compat-data": "^7.17.7", - "@babel/helper-validator-option": "^7.16.7", - "browserslist": "^4.17.5", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-compilation-targets/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.17.6", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.17.6.tgz", - "integrity": "sha512-SogLLSxXm2OkBbSsHZMM4tUi8fUzjs63AT/d0YQIzr6GSd8Hxsbk2KYDX0k0DweAzGMj/YWeiCsorIdtdcW8Eg==", - "dev": true, - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-function-name": "^7.16.7", - "@babel/helper-member-expression-to-functions": "^7.16.7", - "@babel/helper-optimise-call-expression": "^7.16.7", - "@babel/helper-replace-supers": "^7.16.7", - "@babel/helper-split-export-declaration": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.17.0", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.17.0.tgz", - "integrity": "sha512-awO2So99wG6KnlE+TPs6rn83gCz5WlEePJDTnLEqbchMVrBeAujURVphRdigsk094VhvZehFoNOihSlcBjwsXA==", - "dev": true, - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "regexpu-core": "^5.0.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.1.tgz", - "integrity": "sha512-J9hGMpJQmtWmj46B3kBHmL38UhJGhYX7eqkcq+2gsstyYt341HmPeWspihX43yVRA0mS+8GGk2Gckc7bY/HCmA==", - "dev": true, - "dependencies": { - "@babel/helper-compilation-targets": "^7.13.0", - "@babel/helper-module-imports": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/traverse": "^7.13.0", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2", - "semver": "^6.1.2" - }, - "peerDependencies": { - "@babel/core": "^7.4.0-0" - } - }, - "node_modules/@babel/helper-define-polyfill-provider/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/@babel/helper-define-polyfill-provider/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/@babel/helper-define-polyfill-provider/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-environment-visitor": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.16.7.tgz", - "integrity": "sha512-SLLb0AAn6PkUeAfKJCCOl9e1R53pQlGAfc4y4XuMRZfqeMYLE0dM1LMhqbGAlGQY0lfw5/ohoYWAe9V1yibRag==", - "dev": true, - "dependencies": { - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-explode-assignable-expression": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.16.7.tgz", - "integrity": "sha512-KyUenhWMC8VrxzkGP0Jizjo4/Zx+1nNZhgocs+gLzyZyB8SHidhoq9KK/8Ato4anhwsivfkBLftky7gvzbZMtQ==", - "dev": true, - "dependencies": { - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-function-name": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.16.7.tgz", - "integrity": "sha512-QfDfEnIUyyBSR3HtrtGECuZ6DAyCkYFp7GHl75vFtTnn6pjKeK0T1DB5lLkFvBea8MdaiUABx3osbgLyInoejA==", - "dev": true, - "dependencies": { - "@babel/helper-get-function-arity": "^7.16.7", - "@babel/template": "^7.16.7", - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-get-function-arity": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.16.7.tgz", - "integrity": "sha512-flc+RLSOBXzNzVhcLu6ujeHUrD6tANAOU5ojrRx/as+tbzf8+stUCj7+IfRRoAbEZqj/ahXEMsjhOhgeZsrnTw==", - "dev": true, - "dependencies": { - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-hoist-variables": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.16.7.tgz", - "integrity": "sha512-m04d/0Op34H5v7pbZw6pSKP7weA6lsMvfiIAMeIvkY/R4xQtBSMFEigu9QTZ2qB/9l22vsxtM8a+Q8CzD255fg==", - "dev": true, - "dependencies": { - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.17.7.tgz", - "integrity": "sha512-thxXgnQ8qQ11W2wVUObIqDL4p148VMxkt5T/qpN5k2fboRyzFGFmKsTGViquyM5QHKUy48OZoca8kw4ajaDPyw==", - "dev": true, - "dependencies": { - "@babel/types": "^7.17.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-imports": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.16.7.tgz", - "integrity": "sha512-LVtS6TqjJHFc+nYeITRo6VLXve70xmq7wPhWTqDJusJEgGmkAACWwMiTNrvfoQo6hEhFwAIixNkvB0jPXDL8Wg==", - "dev": true, - "dependencies": { - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-transforms": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.17.7.tgz", - "integrity": "sha512-VmZD99F3gNTYB7fJRDTi+u6l/zxY0BE6OIxPSU7a50s6ZUQkHwSDmV92FfM+oCG0pZRVojGYhkR8I0OGeCVREw==", - "dev": true, - "dependencies": { - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-module-imports": "^7.16.7", - "@babel/helper-simple-access": "^7.17.7", - "@babel/helper-split-export-declaration": "^7.16.7", - "@babel/helper-validator-identifier": "^7.16.7", - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.17.3", - "@babel/types": "^7.17.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.16.7.tgz", - "integrity": "sha512-EtgBhg7rd/JcnpZFXpBy0ze1YRfdm7BnBX4uKMBd3ixa3RGAE002JZB66FJyNH7g0F38U05pXmA5P8cBh7z+1w==", - "dev": true, - "dependencies": { - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-plugin-utils": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.16.7.tgz", - "integrity": "sha512-Qg3Nk7ZxpgMrsox6HreY1ZNKdBq7K72tDSliA6dCl5f007jR4ne8iD5UzuNnCJH2xBf2BEEVGr+/OL6Gdp7RxA==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.16.8", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.16.8.tgz", - "integrity": "sha512-fm0gH7Flb8H51LqJHy3HJ3wnE1+qtYR2A99K06ahwrawLdOFsCEWjZOrYricXJHoPSudNKxrMBUPEIPxiIIvBw==", - "dev": true, - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-wrap-function": "^7.16.8", - "@babel/types": "^7.16.8" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-replace-supers": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.16.7.tgz", - "integrity": "sha512-y9vsWilTNaVnVh6xiJfABzsNpgDPKev9HnAgz6Gb1p6UUwf9NepdlsV7VXGCftJM+jqD5f7JIEubcpLjZj5dBw==", - "dev": true, - "dependencies": { - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-member-expression-to-functions": "^7.16.7", - "@babel/helper-optimise-call-expression": "^7.16.7", - "@babel/traverse": "^7.16.7", - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-simple-access": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.17.7.tgz", - "integrity": "sha512-txyMCGroZ96i+Pxr3Je3lzEJjqwaRC9buMUgtomcrLe5Nd0+fk1h0LLA+ixUF5OW7AhHuQ7Es1WcQJZmZsz2XA==", - "dev": true, - "dependencies": { - "@babel/types": "^7.17.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.16.0", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.16.0.tgz", - "integrity": "sha512-+il1gTy0oHwUsBQZyJvukbB4vPMdcYBrFHa0Uc4AizLxbq6BOYC51Rv4tWocX9BLBDLZ4kc6qUFpQ6HRgL+3zw==", - "dev": true, - "dependencies": { - "@babel/types": "^7.16.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-split-export-declaration": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.16.7.tgz", - "integrity": "sha512-xbWoy/PFoxSWazIToT9Sif+jJTlrMcndIsaOKvTA6u7QEo7ilkRZpjew18/W3c7nm8fXdUDXh02VXTbZ0pGDNw==", - "dev": true, - "dependencies": { - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-validator-identifier": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.16.7.tgz", - "integrity": "sha512-hsEnFemeiW4D08A5gUAZxLBTXpZ39P+a+DGDsHw1yxqyQ/jzFEnxf5uTEGp+3bzAbNOxU1paTgYS4ECU/IgfDw==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-validator-option": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.16.7.tgz", - "integrity": "sha512-TRtenOuRUVo9oIQGPC5G9DgK4743cdxvtOw0weQNpZXaS16SCBi5MNjZF8vba3ETURjZpTbVn7Vvcf2eAwFozQ==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-wrap-function": { - "version": "7.16.8", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.16.8.tgz", - "integrity": "sha512-8RpyRVIAW1RcDDGTA+GpPAwV22wXCfKOoM9bet6TLkGIFTkRQSkH1nMQ5Yet4MpoXe1ZwHPVtNasc2w0uZMqnw==", - "dev": true, - "dependencies": { - "@babel/helper-function-name": "^7.16.7", - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.16.8", - "@babel/types": "^7.16.8" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helpers": { - "version": "7.17.8", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.17.8.tgz", - "integrity": "sha512-QcL86FGxpfSJwGtAvv4iG93UL6bmqBdmoVY0CMCU2g+oD2ezQse3PT5Pa+jiD6LJndBQi0EDlpzOWNlLuhz5gw==", - "dev": true, - "dependencies": { - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.17.3", - "@babel/types": "^7.17.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/highlight": { - "version": "7.16.10", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.16.10.tgz", - "integrity": "sha512-5FnTQLSLswEj6IkgVw5KusNUUFY9ZGqe/TRFnP/BKYHYgfh7tc+C7mwiy95/yNP7Dh9x580Vv8r7u7ZfTBFxdw==", - "dev": true, - "dependencies": { - "@babel/helper-validator-identifier": "^7.16.7", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/parser": { - "version": "7.17.8", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.17.8.tgz", - "integrity": "sha512-BoHhDJrJXqcg+ZL16Xv39H9n+AqJ4pcDrQBGZN+wHxIysrLZ3/ECwCBUch/1zUNhnsXULcONU3Ei5Hmkfk6kiQ==", - "dev": true, - "bin": { - "parser": "bin/babel-parser.js" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.16.7.tgz", - "integrity": "sha512-anv/DObl7waiGEnC24O9zqL0pSuI9hljihqiDuFHC8d7/bjr/4RLGPWuc8rYOff/QPzbEPSkzG8wGG9aDuhHRg==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.16.7.tgz", - "integrity": "sha512-di8vUHRdf+4aJ7ltXhaDbPoszdkh59AQtJM5soLsuHpQJdFQZOA4uGj0V2u/CZ8bJ/u8ULDL5yq6FO/bCXnKHw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0", - "@babel/plugin-proposal-optional-chaining": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-async-generator-functions": { - "version": "7.16.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.16.8.tgz", - "integrity": "sha512-71YHIvMuiuqWJQkebWJtdhQTfd4Q4mF76q2IX37uZPkG9+olBxsX+rH1vkhFto4UeJZ9dPY2s+mDvhDm1u2BGQ==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-async-generator-functions instead.", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-remap-async-to-generator": "^7.16.8", - "@babel/plugin-syntax-async-generators": "^7.8.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-class-properties": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.16.7.tgz", - "integrity": "sha512-IobU0Xme31ewjYOShSIqd/ZGM/r/cuOz2z0MDbNrhF5FW+ZVgi0f2lyeoj9KFPDOAqsYxmLWZte1WOwlvY9aww==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-class-properties instead.", - "dev": true, - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-class-static-block": { - "version": "7.17.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.17.6.tgz", - "integrity": "sha512-X/tididvL2zbs7jZCeeRJ8167U/+Ac135AM6jCAx6gYXDUviZV5Ku9UDvWS2NCuWlFjIRXklYhwo6HhAC7ETnA==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-class-static-block instead.", - "dev": true, - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.17.6", - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/plugin-syntax-class-static-block": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.12.0" - } - }, - "node_modules/@babel/plugin-proposal-dynamic-import": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.16.7.tgz", - "integrity": "sha512-I8SW9Ho3/8DRSdmDdH3gORdyUuYnk1m4cMxUAdu5oy4n3OfN8flDEH+d60iG7dUfi0KkYwSvoalHzzdRzpWHTg==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-dynamic-import instead.", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-export-namespace-from": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.16.7.tgz", - "integrity": "sha512-ZxdtqDXLRGBL64ocZcs7ovt71L3jhC1RGSyR996svrCi3PYqHNkb3SwPJCs8RIzD86s+WPpt2S73+EHCGO+NUA==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-export-namespace-from instead.", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-json-strings": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.16.7.tgz", - "integrity": "sha512-lNZ3EEggsGY78JavgbHsK9u5P3pQaW7k4axlgFLYkMd7UBsiNahCITShLjNQschPyjtO6dADrL24757IdhBrsQ==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-json-strings instead.", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/plugin-syntax-json-strings": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.16.7.tgz", - "integrity": "sha512-K3XzyZJGQCr00+EtYtrDjmwX7o7PLK6U9bi1nCwkQioRFVUv6dJoxbQjtWVtP+bCPy82bONBKG8NPyQ4+i6yjg==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-logical-assignment-operators instead.", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.16.7.tgz", - "integrity": "sha512-aUOrYU3EVtjf62jQrCj63pYZ7k6vns2h/DQvHPWGmsJRYzWXZ6/AsfgpiRy6XiuIDADhJzP2Q9MwSMKauBQ+UQ==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-nullish-coalescing-operator instead.", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-numeric-separator": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.16.7.tgz", - "integrity": "sha512-vQgPMknOIgiuVqbokToyXbkY/OmmjAzr/0lhSIbG/KmnzXPGwW/AdhdKpi+O4X/VkWiWjnkKOBiqJrTaC98VKw==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-numeric-separator instead.", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-object-rest-spread": { - "version": "7.17.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.17.3.tgz", - "integrity": "sha512-yuL5iQA/TbZn+RGAfxQXfi7CNLmKi1f8zInn4IgobuCWcAb7i+zj4TYzQ9l8cEzVyJ89PDGuqxK1xZpUDISesw==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-object-rest-spread instead.", - "dev": true, - "dependencies": { - "@babel/compat-data": "^7.17.0", - "@babel/helper-compilation-targets": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-optional-catch-binding": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.16.7.tgz", - "integrity": "sha512-eMOH/L4OvWSZAE1VkHbr1vckLG1WUcHGJSLqqQwl2GaUqG6QjddvrOaTUMNYiv77H5IKPMZ9U9P7EaHwvAShfA==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-optional-catch-binding instead.", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-optional-chaining": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.16.7.tgz", - "integrity": "sha512-eC3xy+ZrUcBtP7x+sq62Q/HYd674pPTb/77XZMb5wbDPGWIdUbSr4Agr052+zaUPSb+gGRnjxXfKFvx5iMJ+DA==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-optional-chaining instead.", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-private-methods": { - "version": "7.16.11", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.16.11.tgz", - "integrity": "sha512-F/2uAkPlXDr8+BHpZvo19w3hLFKge+k75XUprE6jaqKxjGkSYcK+4c+bup5PdW/7W/Rpjwql7FTVEDW+fRAQsw==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-private-methods instead.", - "dev": true, - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.16.10", - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-private-property-in-object": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.16.7.tgz", - "integrity": "sha512-rMQkjcOFbm+ufe3bTZLyOfsOUOxyvLXZJCTARhJr+8UMSoZmqTe1K1BgkFcrW37rAchWg57yI69ORxiWvUINuQ==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-private-property-in-object instead.", - "dev": true, - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-create-class-features-plugin": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-unicode-property-regex": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.16.7.tgz", - "integrity": "sha512-QRK0YI/40VLhNVGIjRNAAQkEHws0cswSdFFjpFyt943YmJIU1da9uW63Iu6NFV6CxTZW5eTDCrwZUstBWgp/Rg==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-unicode-property-regex instead.", - "dev": true, - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-async-generators": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", - "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-class-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", - "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-class-static-block": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", - "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", - "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.3" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-json-strings": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", - "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-logical-assignment-operators": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", - "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-numeric-separator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", - "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-object-rest-spread": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-optional-catch-binding": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-optional-chaining": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", - "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-private-property-in-object": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", - "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-top-level-await": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", - "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.16.7.tgz", - "integrity": "sha512-9ffkFFMbvzTvv+7dTp/66xvZAWASuPD5Tl9LK3Z9vhOmANo6j94rik+5YMBt4CwHVMWLWpMsriIc2zsa3WW3xQ==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.16.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.16.8.tgz", - "integrity": "sha512-MtmUmTJQHCnyJVrScNzNlofQJ3dLFuobYn3mwOTKHnSCMtbNsqvF71GQmJfFjdrXSsAA7iysFmYWw4bXZ20hOg==", - "dev": true, - "dependencies": { - "@babel/helper-module-imports": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-remap-async-to-generator": "^7.16.8" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.16.7.tgz", - "integrity": "sha512-JUuzlzmF40Z9cXyytcbZEZKckgrQzChbQJw/5PuEHYeqzCsvebDx0K0jWnIIVcmmDOAVctCgnYs0pMcrYj2zJg==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.16.7.tgz", - "integrity": "sha512-ObZev2nxVAYA4bhyusELdo9hb3H+A56bxH3FZMbEImZFiEDYVHXQSJ1hQKFlDnlt8G9bBrCZ5ZpURZUrV4G5qQ==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-classes": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.16.7.tgz", - "integrity": "sha512-WY7og38SFAGYRe64BrjKf8OrE6ulEHtr5jEYaZMwox9KebgqPi67Zqz8K53EKk1fFEJgm96r32rkKZ3qA2nCWQ==", - "dev": true, - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-function-name": "^7.16.7", - "@babel/helper-optimise-call-expression": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-replace-supers": "^7.16.7", - "@babel/helper-split-export-declaration": "^7.16.7", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.16.7.tgz", - "integrity": "sha512-gN72G9bcmenVILj//sv1zLNaPyYcOzUho2lIJBMh/iakJ9ygCo/hEF9cpGb61SCMEDxbbyBoVQxrt+bWKu5KGw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.17.7.tgz", - "integrity": "sha512-XVh0r5yq9sLR4vZ6eVZe8FKfIcSgaTBxVBRSYokRj2qksf6QerYnTxz9/GTuKTH/n/HwLP7t6gtlybHetJ/6hQ==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.16.7.tgz", - "integrity": "sha512-Lyttaao2SjZF6Pf4vk1dVKv8YypMpomAbygW+mU5cYP3S5cWTfCJjG8xV6CFdzGFlfWK81IjL9viiTvpb6G7gQ==", - "dev": true, - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.16.7.tgz", - "integrity": "sha512-03DvpbRfvWIXyK0/6QiR1KMTWeT6OcQ7tbhjrXyFS02kjuX/mu5Bvnh5SDSWHxyawit2g5aWhKwI86EE7GUnTw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.16.7.tgz", - "integrity": "sha512-8UYLSlyLgRixQvlYH3J2ekXFHDFLQutdy7FfFAMm3CPZ6q9wHCwnUyiXpQCe3gVVnQlHc5nsuiEVziteRNTXEA==", - "dev": true, - "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-for-of": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.16.7.tgz", - "integrity": "sha512-/QZm9W92Ptpw7sjI9Nx1mbcsWz33+l8kuMIQnDwgQBG5s3fAfQvkRjQ7NqXhtNcKOnPkdICmUHyCaWW06HCsqg==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-function-name": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.16.7.tgz", - "integrity": "sha512-SU/C68YVwTRxqWj5kgsbKINakGag0KTgq9f2iZEXdStoAbOzLHEBRYzImmA6yFo8YZhJVflvXmIHUO7GWHmxxA==", - "dev": true, - "dependencies": { - "@babel/helper-compilation-targets": "^7.16.7", - "@babel/helper-function-name": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-literals": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.16.7.tgz", - "integrity": "sha512-6tH8RTpTWI0s2sV6uq3e/C9wPo4PTqqZps4uF0kzQ9/xPLFQtipynvmT1g/dOfEJ+0EQsHhkQ/zyRId8J2b8zQ==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.16.7.tgz", - "integrity": "sha512-mBruRMbktKQwbxaJof32LT9KLy2f3gH+27a5XSuXo6h7R3vqltl0PgZ80C8ZMKw98Bf8bqt6BEVi3svOh2PzMw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.16.7.tgz", - "integrity": "sha512-KaaEtgBL7FKYwjJ/teH63oAmE3lP34N3kshz8mm4VMAw7U3PxjVwwUmxEFksbgsNUaO3wId9R2AVQYSEGRa2+g==", - "dev": true, - "dependencies": { - "@babel/helper-module-transforms": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7", - "babel-plugin-dynamic-import-node": "^2.3.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.17.7.tgz", - "integrity": "sha512-ITPmR2V7MqioMJyrxUo2onHNC3e+MvfFiFIR0RP21d3PtlVb6sfzoxNKiphSZUOM9hEIdzCcZe83ieX3yoqjUA==", - "dev": true, - "dependencies": { - "@babel/helper-module-transforms": "^7.17.7", - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-simple-access": "^7.17.7", - "babel-plugin-dynamic-import-node": "^2.3.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.17.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.17.8.tgz", - "integrity": "sha512-39reIkMTUVagzgA5x88zDYXPCMT6lcaRKs1+S9K6NKBPErbgO/w/kP8GlNQTC87b412ZTlmNgr3k2JrWgHH+Bw==", - "dev": true, - "dependencies": { - "@babel/helper-hoist-variables": "^7.16.7", - "@babel/helper-module-transforms": "^7.17.7", - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-validator-identifier": "^7.16.7", - "babel-plugin-dynamic-import-node": "^2.3.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.16.7.tgz", - "integrity": "sha512-EMh7uolsC8O4xhudF2F6wedbSHm1HHZ0C6aJ7K67zcDNidMzVcxWdGr+htW9n21klm+bOn+Rx4CBsAntZd3rEQ==", - "dev": true, - "dependencies": { - "@babel/helper-module-transforms": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.16.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.16.8.tgz", - "integrity": "sha512-j3Jw+n5PvpmhRR+mrgIh04puSANCk/T/UA3m3P1MjJkhlK906+ApHhDIqBQDdOgL/r1UYpz4GNclTXxyZrYGSw==", - "dev": true, - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-transform-new-target": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.16.7.tgz", - "integrity": "sha512-xiLDzWNMfKoGOpc6t3U+etCE2yRnn3SM09BXqWPIZOBpL2gvVrBWUKnsJx0K/ADi5F5YC5f8APFfWrz25TdlGg==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-object-super": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.16.7.tgz", - "integrity": "sha512-14J1feiQVWaGvRxj2WjyMuXS2jsBkgB3MdSN5HuC2G5nRspa5RK9COcs82Pwy5BuGcjb+fYaUj94mYcOj7rCvw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-replace-supers": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-parameters": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.16.7.tgz", - "integrity": "sha512-AT3MufQ7zZEhU2hwOA11axBnExW0Lszu4RL/tAlUJBuNoRak+wehQW8h6KcXOcgjY42fHtDxswuMhMjFEuv/aw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.16.7.tgz", - "integrity": "sha512-z4FGr9NMGdoIl1RqavCqGG+ZuYjfZ/hkCIeuH6Do7tXmSm0ls11nYVSJqFEUOSJbDab5wC6lRE/w6YjVcr6Hqw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.16.7.tgz", - "integrity": "sha512-mF7jOgGYCkSJagJ6XCujSQg+6xC1M77/03K2oBmVJWoFGNUtnVJO4WHKJk3dnPC8HCcj4xBQP1Egm8DWh3Pb3Q==", - "dev": true, - "dependencies": { - "regenerator-transform": "^0.14.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.16.7.tgz", - "integrity": "sha512-KQzzDnZ9hWQBjwi5lpY5v9shmm6IVG0U9pB18zvMu2i4H90xpT4gmqwPYsn8rObiadYe2M0gmgsiOIF5A/2rtg==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-runtime": { - "version": "7.16.10", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.16.10.tgz", - "integrity": "sha512-9nwTiqETv2G7xI4RvXHNfpGdr8pAA+Q/YtN3yLK7OoK7n9OibVm/xymJ838a9A6E/IciOLPj82lZk0fW6O4O7w==", - "dev": true, - "dependencies": { - "@babel/helper-module-imports": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7", - "babel-plugin-polyfill-corejs2": "^0.3.0", - "babel-plugin-polyfill-corejs3": "^0.5.0", - "babel-plugin-polyfill-regenerator": "^0.3.0", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.16.7.tgz", - "integrity": "sha512-hah2+FEnoRoATdIb05IOXf+4GzXYTq75TVhIn1PewihbpyrNWUt2JbudKQOETWw6QpLe+AIUpJ5MVLYTQbeeUg==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-spread": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.16.7.tgz", - "integrity": "sha512-+pjJpgAngb53L0iaA5gU/1MLXJIfXcYepLgXB3esVRf4fqmj8f2cxM3/FKaHsZms08hFQJkFccEWuIpm429TXg==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.16.7.tgz", - "integrity": "sha512-NJa0Bd/87QV5NZZzTuZG5BPJjLYadeSZ9fO6oOUoL4iQx+9EEuw/eEM92SrsT19Yc2jgB1u1hsjqDtH02c3Drw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.16.7.tgz", - "integrity": "sha512-VwbkDDUeenlIjmfNeDX/V0aWrQH2QiVyJtwymVQSzItFDTpxfyJh3EVaQiS0rIN/CqbLGr0VcGmuwyTdZtdIsA==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.16.7.tgz", - "integrity": "sha512-p2rOixCKRJzpg9JB4gjnG4gjWkWa89ZoYUnl9snJ1cWIcTH/hvxZqfO+WjG6T8DRBpctEol5jw1O5rA8gkCokQ==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.16.7.tgz", - "integrity": "sha512-TAV5IGahIz3yZ9/Hfv35TV2xEm+kaBDaZQCn2S/hG9/CZ0DktxJv9eKfPc7yYCvOYR4JGx1h8C+jcSOvgaaI/Q==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.16.7.tgz", - "integrity": "sha512-oC5tYYKw56HO75KZVLQ+R/Nl3Hro9kf8iG0hXoaHP7tjAyCpvqBiSNe6vGrZni1Z6MggmUOC6A7VP7AVmw225Q==", - "dev": true, - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-env": { - "version": "7.16.11", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.16.11.tgz", - "integrity": "sha512-qcmWG8R7ZW6WBRPZK//y+E3Cli151B20W1Rv7ln27vuPaXU/8TKms6jFdiJtF7UDTxcrb7mZd88tAeK9LjdT8g==", - "dev": true, - "dependencies": { - "@babel/compat-data": "^7.16.8", - "@babel/helper-compilation-targets": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-validator-option": "^7.16.7", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.16.7", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.16.7", - "@babel/plugin-proposal-async-generator-functions": "^7.16.8", - "@babel/plugin-proposal-class-properties": "^7.16.7", - "@babel/plugin-proposal-class-static-block": "^7.16.7", - "@babel/plugin-proposal-dynamic-import": "^7.16.7", - "@babel/plugin-proposal-export-namespace-from": "^7.16.7", - "@babel/plugin-proposal-json-strings": "^7.16.7", - "@babel/plugin-proposal-logical-assignment-operators": "^7.16.7", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.7", - "@babel/plugin-proposal-numeric-separator": "^7.16.7", - "@babel/plugin-proposal-object-rest-spread": "^7.16.7", - "@babel/plugin-proposal-optional-catch-binding": "^7.16.7", - "@babel/plugin-proposal-optional-chaining": "^7.16.7", - "@babel/plugin-proposal-private-methods": "^7.16.11", - "@babel/plugin-proposal-private-property-in-object": "^7.16.7", - "@babel/plugin-proposal-unicode-property-regex": "^7.16.7", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-class-static-block": "^7.14.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5", - "@babel/plugin-syntax-top-level-await": "^7.14.5", - "@babel/plugin-transform-arrow-functions": "^7.16.7", - "@babel/plugin-transform-async-to-generator": "^7.16.8", - "@babel/plugin-transform-block-scoped-functions": "^7.16.7", - "@babel/plugin-transform-block-scoping": "^7.16.7", - "@babel/plugin-transform-classes": "^7.16.7", - "@babel/plugin-transform-computed-properties": "^7.16.7", - "@babel/plugin-transform-destructuring": "^7.16.7", - "@babel/plugin-transform-dotall-regex": "^7.16.7", - "@babel/plugin-transform-duplicate-keys": "^7.16.7", - "@babel/plugin-transform-exponentiation-operator": "^7.16.7", - "@babel/plugin-transform-for-of": "^7.16.7", - "@babel/plugin-transform-function-name": "^7.16.7", - "@babel/plugin-transform-literals": "^7.16.7", - "@babel/plugin-transform-member-expression-literals": "^7.16.7", - "@babel/plugin-transform-modules-amd": "^7.16.7", - "@babel/plugin-transform-modules-commonjs": "^7.16.8", - "@babel/plugin-transform-modules-systemjs": "^7.16.7", - "@babel/plugin-transform-modules-umd": "^7.16.7", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.16.8", - "@babel/plugin-transform-new-target": "^7.16.7", - "@babel/plugin-transform-object-super": "^7.16.7", - "@babel/plugin-transform-parameters": "^7.16.7", - "@babel/plugin-transform-property-literals": "^7.16.7", - "@babel/plugin-transform-regenerator": "^7.16.7", - "@babel/plugin-transform-reserved-words": "^7.16.7", - "@babel/plugin-transform-shorthand-properties": "^7.16.7", - "@babel/plugin-transform-spread": "^7.16.7", - "@babel/plugin-transform-sticky-regex": "^7.16.7", - "@babel/plugin-transform-template-literals": "^7.16.7", - "@babel/plugin-transform-typeof-symbol": "^7.16.7", - "@babel/plugin-transform-unicode-escapes": "^7.16.7", - "@babel/plugin-transform-unicode-regex": "^7.16.7", - "@babel/preset-modules": "^0.1.5", - "@babel/types": "^7.16.8", - "babel-plugin-polyfill-corejs2": "^0.3.0", - "babel-plugin-polyfill-corejs3": "^0.5.0", - "babel-plugin-polyfill-regenerator": "^0.3.0", - "core-js-compat": "^3.20.2", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-env/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/preset-modules": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz", - "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", - "@babel/plugin-transform-dotall-regex": "^7.4.4", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/runtime": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.16.7.tgz", - "integrity": "sha512-9E9FJowqAsytyOY6LG+1KuueckRL+aQW+mKvXRXnuFGyRAyepJPmEo9vgMfXUA6O9u3IeEdv9MAkppFcaQwogQ==", - "dev": true, - "dependencies": { - "regenerator-runtime": "^0.13.4" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/template": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.16.7.tgz", - "integrity": "sha512-I8j/x8kHUrbYRTUxXrrMbfCa7jxkE7tZre39x3kjr9hvI82cK1FfqLygotcWN5kdPGWcLdWMHpSBavse5tWw3w==", - "dev": true, - "dependencies": { - "@babel/code-frame": "^7.16.7", - "@babel/parser": "^7.16.7", - "@babel/types": "^7.16.7" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/traverse": { - "version": "7.17.3", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.17.3.tgz", - "integrity": "sha512-5irClVky7TxRWIRtxlh2WPUUOLhcPN06AGgaQSB8AEwuyEBgJVuJ5imdHm5zxk8w0QS5T+tDfnDxAlhWjpb7cw==", - "dev": true, - "dependencies": { - "@babel/code-frame": "^7.16.7", - "@babel/generator": "^7.17.3", - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-function-name": "^7.16.7", - "@babel/helper-hoist-variables": "^7.16.7", - "@babel/helper-split-export-declaration": "^7.16.7", - "@babel/parser": "^7.17.3", - "@babel/types": "^7.17.0", - "debug": "^4.1.0", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/traverse/node_modules/@babel/generator": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.17.7.tgz", - "integrity": "sha512-oLcVCTeIFadUoArDTwpluncplrYBmTCCZZgXCbgNGvOBBiSDDK3eWO4b/+eOTli5tKv1lg+a5/NAXg+nTcei1w==", - "dev": true, - "dependencies": { - "@babel/types": "^7.17.0", - "jsesc": "^2.5.1", - "source-map": "^0.5.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/traverse/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/@babel/traverse/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/@babel/traverse/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@babel/types": { - "version": "7.17.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.17.0.tgz", - "integrity": "sha512-TmKSNO4D5rzhL5bjWFcVHHLETzfQ/AmbKpKPOSjlP0WoHZ6L911fgoOKY4Alp/emzG4cHJdyN49zpgkbXFEHHw==", - "dev": true, - "dependencies": { - "@babel/helper-validator-identifier": "^7.16.7", - "to-fast-properties": "^2.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@colors/colors": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", - "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", - "dev": true, - "engines": { - "node": ">=0.1.90" - } - }, - "node_modules/@csstools/postcss-progressive-custom-properties": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-progressive-custom-properties/-/postcss-progressive-custom-properties-1.3.0.tgz", - "integrity": "sha512-ASA9W1aIy5ygskZYuWams4BzafD12ULvSypmaLJT2jvQ8G0M3I8PRQhC0h7mG0Z3LI05+agZjqSR9+K9yaQQjA==", - "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.3" - } - }, - "node_modules/@develar/schema-utils": { - "version": "2.6.5", - "resolved": "https://registry.npmjs.org/@develar/schema-utils/-/schema-utils-2.6.5.tgz", - "integrity": "sha512-0cp4PsWQ/9avqTVMCtZ+GirikIA36ikvjtHweU4/j8yLtgObI0+JUPhYFScgwlteveGB1rt3Cm8UhN04XayDig==", - "dev": true, - "dependencies": { - "ajv": "^6.12.0", - "ajv-keywords": "^3.4.1" - }, - "engines": { - "node": ">= 8.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/@discoveryjs/json-ext": { - "version": "0.5.6", - "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.6.tgz", - "integrity": "sha512-ws57AidsDvREKrZKYffXddNkyaF14iHNHm8VQnZH6t99E8gczjNN0GpvcGny0imC80yQ0tHz1xVUKk/KFQSUyA==", - "dev": true, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/@electron/get": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@electron/get/-/get-1.14.1.tgz", - "integrity": "sha512-BrZYyL/6m0ZXz/lDxy/nlVhQz+WF+iPS6qXolEU8atw7h6v1aYkjwJZ63m+bJMBTxDE66X+r2tPS4a/8C82sZw==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "env-paths": "^2.2.0", - "fs-extra": "^8.1.0", - "got": "^9.6.0", - "progress": "^2.0.3", - "semver": "^6.2.0", - "sumchecker": "^3.0.1" - }, - "engines": { - "node": ">=8.6" - }, - "optionalDependencies": { - "global-agent": "^3.0.0", - "global-tunnel-ng": "^2.7.1" - } - }, - "node_modules/@electron/get/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/@electron/get/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/@electron/get/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@electron/universal": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@electron/universal/-/universal-1.0.5.tgz", - "integrity": "sha512-zX9O6+jr2NMyAdSkwEUlyltiI4/EBLu2Ls/VD3pUQdi3cAYeYfdQnT2AJJ38HE4QxLccbU13LSpccw1IWlkyag==", - "dev": true, - "dependencies": { - "@malept/cross-spawn-promise": "^1.1.0", - "asar": "^3.0.3", - "debug": "^4.3.1", - "dir-compare": "^2.4.0", - "fs-extra": "^9.0.1" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/@electron/universal/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/@electron/universal/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@electron/universal/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/universal/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/@electron/universal/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@gar/promisify": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@gar/promisify/-/promisify-1.1.3.tgz", - "integrity": "sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==", - "dev": true - }, - "node_modules/@istanbuljs/load-nyc-config": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", - "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", - "dev": true, - "dependencies": { - "camelcase": "^5.3.1", - "find-up": "^4.1.0", - "get-package-type": "^0.1.0", - "js-yaml": "^3.13.1", - "resolve-from": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@istanbuljs/schema": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", - "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.0.5.tgz", - "integrity": "sha512-VPeQ7+wH0itvQxnG+lIzWgkysKIr3L9sslimFW55rHMdGu/qCQ5z5h9zq4gI8uBtqkpHhsF4Z/OwExufUCThew==", - "dev": true, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.11", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.11.tgz", - "integrity": "sha512-Fg32GrJo61m+VqYSdRSjRXMjQ06j8YIYfcTqndLYVAaHmroZHLJZCydsWBOTDqXS2v+mjxohBWEMfg97GXmYQg==", - "dev": true - }, - "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.4.tgz", - "integrity": "sha512-vFv9ttIedivx0ux3QSjhgtCVjPZd5l46ZOMDSCwnH1yUO2e964gO8LZGyv2QkqcgR6TnBU1v+1IFqmeoG+0UJQ==", - "dev": true, - "dependencies": { - "@jridgewell/resolve-uri": "^3.0.3", - "@jridgewell/sourcemap-codec": "^1.4.10" - } - }, - "node_modules/@malept/cross-spawn-promise": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@malept/cross-spawn-promise/-/cross-spawn-promise-1.1.1.tgz", - "integrity": "sha512-RTBGWL5FWQcg9orDOCcp4LvItNzUPcyEU9bwaeJX0rJ1IQxzucC48Y0/sQLp/g6t99IQgAlGIaesJS+gTn7tVQ==", - "dev": true, - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/malept" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/subscription/pkg/npm-.malept-cross-spawn-promise?utm_medium=referral&utm_source=npm_fund" - } - ], - "dependencies": { - "cross-spawn": "^7.0.1" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/@malept/flatpak-bundler": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/@malept/flatpak-bundler/-/flatpak-bundler-0.4.0.tgz", - "integrity": "sha512-9QOtNffcOF/c1seMCDnjckb3R9WHcG34tky+FHpNKKCW0wc/scYLwMtO+ptyGUfMW0/b/n4qRiALlaFHc9Oj7Q==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "fs-extra": "^9.0.0", - "lodash": "^4.17.15", - "tmp-promise": "^3.0.2" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@malept/flatpak-bundler/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/@malept/flatpak-bundler/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@malept/flatpak-bundler/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@malept/flatpak-bundler/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/@malept/flatpak-bundler/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@ngtools/webpack": { - "version": "13.2.6", - "resolved": "https://registry.npmjs.org/@ngtools/webpack/-/webpack-13.2.6.tgz", - "integrity": "sha512-N8SvRV91+/57TcAfbghc0k0tKCukw/7KqbDaLPAQTGFekJ4xMGT3elMzOyBXTH3Hvp5HL8/hiBt2tG04qiMf+w==", - "dev": true, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0", - "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", - "yarn": ">= 1.13.0" - }, - "peerDependencies": { - "@angular/compiler-cli": "^13.0.0", - "typescript": ">=4.4.3 <4.6", - "webpack": "^5.30.0" - } - }, - "node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "dev": true, - "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", - "dev": true, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "dev": true, - "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@npmcli/fs": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-1.1.1.tgz", - "integrity": "sha512-8KG5RD0GVP4ydEzRn/I4BNDuxDtqVbOdm8675T49OIG/NGhaK0pjPX7ZcDlvKYbA+ulvVK3ztfcF4uBdOxuJbQ==", - "dev": true, - "dependencies": { - "@gar/promisify": "^1.0.1", - "semver": "^7.3.5" - } - }, - "node_modules/@npmcli/git": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@npmcli/git/-/git-2.1.0.tgz", - "integrity": "sha512-/hBFX/QG1b+N7PZBFs0bi+evgRZcK9nWBxQKZkGoXUT5hJSwl5c4d7y8/hm+NQZRPhQ67RzFaj5UM9YeyKoryw==", - "dev": true, - "dependencies": { - "@npmcli/promise-spawn": "^1.3.2", - "lru-cache": "^6.0.0", - "mkdirp": "^1.0.4", - "npm-pick-manifest": "^6.1.1", - "promise-inflight": "^1.0.1", - "promise-retry": "^2.0.1", - "semver": "^7.3.5", - "which": "^2.0.2" - } - }, - "node_modules/@npmcli/installed-package-contents": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/@npmcli/installed-package-contents/-/installed-package-contents-1.0.7.tgz", - "integrity": "sha512-9rufe0wnJusCQoLpV9ZPKIVP55itrM5BxOXs10DmdbRfgWtHy1LDyskbwRnBghuB0PrF7pNPOqREVtpz4HqzKw==", - "dev": true, - "dependencies": { - "npm-bundled": "^1.1.1", - "npm-normalize-package-bin": "^1.0.1" - }, - "bin": { - "installed-package-contents": "index.js" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/@npmcli/move-file": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@npmcli/move-file/-/move-file-1.1.2.tgz", - "integrity": "sha512-1SUf/Cg2GzGDyaf15aR9St9TWlb+XvbZXWpDx8YKs7MLzMH/BCeopv+y9vzrzgkfykCGuWOlSu3mZhj2+FQcrg==", - "deprecated": "This functionality has been moved to @npmcli/fs", - "dev": true, - "dependencies": { - "mkdirp": "^1.0.4", - "rimraf": "^3.0.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@npmcli/node-gyp": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@npmcli/node-gyp/-/node-gyp-1.0.3.tgz", - "integrity": "sha512-fnkhw+fmX65kiLqk6E3BFLXNC26rUhK90zVwe2yncPliVT/Qos3xjhTLE59Df8KnPlcwIERXKVlU1bXoUQ+liA==", - "dev": true - }, - "node_modules/@npmcli/promise-spawn": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/@npmcli/promise-spawn/-/promise-spawn-1.3.2.tgz", - "integrity": "sha512-QyAGYo/Fbj4MXeGdJcFzZ+FkDkomfRBrPM+9QYJSg+PxgAUL+LU3FneQk37rKR2/zjqkCV1BLHccX98wRXG3Sg==", - "dev": true, - "dependencies": { - "infer-owner": "^1.0.4" - } - }, - "node_modules/@npmcli/run-script": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@npmcli/run-script/-/run-script-2.0.0.tgz", - "integrity": "sha512-fSan/Pu11xS/TdaTpTB0MRn9guwGU8dye+x56mEVgBEd/QsybBbYcAL0phPXi8SGWFEChkQd6M9qL4y6VOpFig==", - "dev": true, - "dependencies": { - "@npmcli/node-gyp": "^1.0.2", - "@npmcli/promise-spawn": "^1.3.2", - "node-gyp": "^8.2.0", - "read-package-json-fast": "^2.0.1" - } - }, - "node_modules/@schematics/angular": { - "version": "13.2.6", - "resolved": "https://registry.npmjs.org/@schematics/angular/-/angular-13.2.6.tgz", - "integrity": "sha512-8NzHMX9+FSgaB0lJYxlTJv9OcBuolwZJqo9M/yX3RPSqSHghA33jWwgVbV551hBJOpbVEePerG1DQkIC99DXKA==", - "dev": true, - "dependencies": { - "@angular-devkit/core": "13.2.6", - "@angular-devkit/schematics": "13.2.6", - "jsonc-parser": "3.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.15.0 || >=16.10.0", - "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", - "yarn": ">= 1.13.0" - } - }, - "node_modules/@sindresorhus/is": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", - "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/@socket.io/base64-arraybuffer": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@socket.io/base64-arraybuffer/-/base64-arraybuffer-1.0.2.tgz", - "integrity": "sha512-dOlCBKnDw4iShaIsH/bxujKTM18+2TOAsYz+KSc11Am38H4q5Xw8Bbz97ZYdrVNM+um3p7w86Bvvmcn9q+5+eQ==", - "dev": true, - "engines": { - "node": ">= 0.6.0" - } - }, - "node_modules/@szmarczak/http-timer": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", - "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", - "dev": true, - "dependencies": { - "defer-to-connect": "^1.0.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@tootallnate/once": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz", - "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, - "node_modules/@types/body-parser": { - "version": "1.19.2", - "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", - "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", - "dev": true, - "dependencies": { - "@types/connect": "*", - "@types/node": "*" - } - }, - "node_modules/@types/bonjour": { - "version": "3.5.10", - "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.10.tgz", - "integrity": "sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==", - "dev": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/component-emitter": { - "version": "1.2.11", - "resolved": "https://registry.npmjs.org/@types/component-emitter/-/component-emitter-1.2.11.tgz", - "integrity": "sha512-SRXjM+tfsSlA9VuG8hGO2nft2p8zjXCK1VcC6N4NXbBbYbSia9kzCChYQajIjzIqOOOuh5Ock6MmV2oux4jDZQ==", - "dev": true - }, - "node_modules/@types/connect": { - "version": "3.4.35", - "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", - "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", - "dev": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/connect-history-api-fallback": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.3.5.tgz", - "integrity": "sha512-h8QJa8xSb1WD4fpKBDcATDNGXghFj6/3GRWG6dhmRcu0RX1Ubasur2Uvx5aeEwlf0MwblEC2bMzzMQntxnw/Cw==", - "dev": true, - "dependencies": { - "@types/express-serve-static-core": "*", - "@types/node": "*" - } - }, - "node_modules/@types/cookie": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.4.1.tgz", - "integrity": "sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q==", - "dev": true - }, - "node_modules/@types/cors": { - "version": "2.8.12", - "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.12.tgz", - "integrity": "sha512-vt+kDhq/M2ayberEtJcIN/hxXy1Pk+59g2FV/ZQceeaTyCtCucjL2Q7FXlFjtWn4n15KCr1NE2lNNFhp0lEThw==", - "dev": true - }, - "node_modules/@types/debug": { - "version": "4.1.7", - "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.7.tgz", - "integrity": "sha512-9AonUzyTjXXhEOa0DnqpzZi6VHlqKMswga9EXjpXnnqxwLtdvPPtlO8evrI5D9S6asFRCQ6v+wpiUKbw+vKqyg==", - "dev": true, - "dependencies": { - "@types/ms": "*" - } - }, - "node_modules/@types/eslint": { - "version": "8.4.1", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.4.1.tgz", - "integrity": "sha512-GE44+DNEyxxh2Kc6ro/VkIj+9ma0pO0bwv9+uHSyBrikYOHr8zYcdPvnBOp1aw8s+CjRvuSx7CyWqRrNFQ59mA==", - "dev": true, - "dependencies": { - "@types/estree": "*", - "@types/json-schema": "*" - } - }, - "node_modules/@types/eslint-scope": { - "version": "3.7.3", - "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.3.tgz", - "integrity": "sha512-PB3ldyrcnAicT35TWPs5IcwKD8S333HMaa2VVv4+wdvebJkjWuW/xESoB8IwRcog8HYVYamb1g/R31Qv5Bx03g==", - "dev": true, - "dependencies": { - "@types/eslint": "*", - "@types/estree": "*" - } - }, - "node_modules/@types/estree": { - "version": "0.0.50", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.50.tgz", - "integrity": "sha512-C6N5s2ZFtuZRj54k2/zyRhNDjJwwcViAM3Nbm8zjBpbqAdZ00mr0CFxvSKeO8Y/e03WVFLpQMdHYVfUd6SB+Hw==", - "dev": true - }, - "node_modules/@types/express": { - "version": "4.17.13", - "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.13.tgz", - "integrity": "sha512-6bSZTPaTIACxn48l50SR+axgrqm6qXFIxrdAKaG6PaJk3+zuUr35hBlgT7vOmJcum+OEaIBLtHV/qloEAFITeA==", - "dev": true, - "dependencies": { - "@types/body-parser": "*", - "@types/express-serve-static-core": "^4.17.18", - "@types/qs": "*", - "@types/serve-static": "*" - } - }, - "node_modules/@types/express-serve-static-core": { - "version": "4.17.28", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.28.tgz", - "integrity": "sha512-P1BJAEAW3E2DJUlkgq4tOL3RyMunoWXqbSCygWo5ZIWTjUgN1YnaXWW4VWl/oc8vs/XoYibEGBKP0uZyF4AHig==", - "dev": true, - "dependencies": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*" - } - }, - "node_modules/@types/fs-extra": { - "version": "9.0.13", - "resolved": "https://registry.npmjs.org/@types/fs-extra/-/fs-extra-9.0.13.tgz", - "integrity": "sha512-nEnwB++1u5lVDM2UI4c1+5R+FYaKfaAzS4OococimjVm3nQw3TuzH5UNsocrcTBbhnerblyHj4A49qXbIiZdpA==", - "dev": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/glob": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.2.0.tgz", - "integrity": "sha512-ZUxbzKl0IfJILTS6t7ip5fQQM/J3TJYubDm3nMbgubNNYS62eXeUpoLUC8/7fJNiFYHTrGPQn7hspDUzIHX3UA==", - "dev": true, - "optional": true, - "dependencies": { - "@types/minimatch": "*", - "@types/node": "*" - } - }, - "node_modules/@types/http-proxy": { - "version": "1.17.8", - "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.8.tgz", - "integrity": "sha512-5kPLG5BKpWYkw/LVOGWpiq3nEVqxiN32rTgI53Sk12/xHFQ2rG3ehI9IO+O3W2QoKeyB92dJkoka8SUm6BX1pA==", - "dev": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/jasmine": { - "version": "3.10.4", - "resolved": "https://registry.npmjs.org/@types/jasmine/-/jasmine-3.10.4.tgz", - "integrity": "sha512-7UAoOBYJpaZKmpJ8esMUIMl6pSdReliPQjviOOikWdQ7eVr4Hq8YbpeXyfXFKflZv8ymDFhYdTjVabbP2s4K4Q==", - "dev": true - }, - "node_modules/@types/json-schema": { - "version": "7.0.10", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.10.tgz", - "integrity": "sha512-BLO9bBq59vW3fxCpD4o0N4U+DXsvwvIcl+jofw0frQo/GrBFC+/jRZj1E7kgp6dvTyNmA4y6JCV5Id/r3mNP5A==", - "dev": true - }, - "node_modules/@types/mime": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz", - "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==", - "dev": true - }, - "node_modules/@types/minimatch": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.5.tgz", - "integrity": "sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ==", - "dev": true, - "optional": true - }, - "node_modules/@types/ms": { - "version": "0.7.31", - "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.31.tgz", - "integrity": "sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA==", - "dev": true - }, - "node_modules/@types/node": { - "version": "12.20.47", - "resolved": "https://registry.npmjs.org/@types/node/-/node-12.20.47.tgz", - "integrity": "sha512-BzcaRsnFuznzOItW1WpQrDHM7plAa7GIDMZ6b5pnMbkqEtM/6WCOhvZar39oeMQP79gwvFUWjjptE7/KGcNqFg==", - "dev": true - }, - "node_modules/@types/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==", - "dev": true - }, - "node_modules/@types/plist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/plist/-/plist-3.0.2.tgz", - "integrity": "sha512-ULqvZNGMv0zRFvqn8/4LSPtnmN4MfhlPNtJCTpKuIIxGVGZ2rYWzFXrvEBoh9CVyqSE7D6YFRJ1hydLHI6kbWw==", - "dev": true, - "optional": true, - "dependencies": { - "@types/node": "*", - "xmlbuilder": ">=11.0.1" - } - }, - "node_modules/@types/plist/node_modules/xmlbuilder": { - "version": "15.1.1", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-15.1.1.tgz", - "integrity": "sha512-yMqGBqtXyeN1e3TGYvgNgDVZ3j84W4cwkOXQswghol6APgZWaff9lnbvN7MHYJOiXsvGPXtjTYJEiC9J2wv9Eg==", - "dev": true, - "optional": true, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/@types/qs": { - "version": "6.9.7", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz", - "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==", - "dev": true - }, - "node_modules/@types/range-parser": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", - "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==", - "dev": true - }, - "node_modules/@types/retry": { - "version": "0.12.1", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.1.tgz", - "integrity": "sha512-xoDlM2S4ortawSWORYqsdU+2rxdh4LRW9ytc3zmT37RIKQh6IHyKwwtKhKis9ah8ol07DCkZxPt8BBvPjC6v4g==", - "dev": true - }, - "node_modules/@types/serve-index": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==", - "dev": true, - "dependencies": { - "@types/express": "*" - } - }, - "node_modules/@types/serve-static": { - "version": "1.13.10", - "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.13.10.tgz", - "integrity": "sha512-nCkHGI4w7ZgAdNkrEu0bv+4xNV/XDqW+DydknebMOQwkpDGx8G+HTlj7R7ABI8i8nKxVw0wtKPi1D+lPOkh4YQ==", - "dev": true, - "dependencies": { - "@types/mime": "^1", - "@types/node": "*" - } - }, - "node_modules/@types/sockjs": { - "version": "0.3.33", - "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.33.tgz", - "integrity": "sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==", - "dev": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/verror": { - "version": "1.10.5", - "resolved": "https://registry.npmjs.org/@types/verror/-/verror-1.10.5.tgz", - "integrity": "sha512-9UjMCHK5GPgQRoNbqdLIAvAy0EInuiqbW0PBMtVP6B5B2HQJlvoJHM+KodPZMEjOa5VkSc+5LH7xy+cUzQdmHw==", - "dev": true, - "optional": true - }, - "node_modules/@types/ws": { - "version": "8.5.3", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz", - "integrity": "sha512-6YOoWjruKj1uLf3INHH7D3qTXwFfEsg1kf3c0uDdSBJwfa/llkwIjrAGV7j7mVgGNbzTQ3HiHKKDXl6bJPD97w==", - "dev": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/yargs": { - "version": "17.0.10", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.10.tgz", - "integrity": "sha512-gmEaFwpj/7f/ROdtIlci1R1VYU1J4j95m8T+Tj3iBgiBFKg1foE/PSl93bBd5T9LDXNPo8UlNN6W0qwD8O5OaA==", - "dev": true, - "dependencies": { - "@types/yargs-parser": "*" - } - }, - "node_modules/@types/yargs-parser": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", - "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==", - "dev": true - }, - "node_modules/@types/yauzl": { - "version": "2.9.2", - "resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.9.2.tgz", - "integrity": "sha512-8uALY5LTvSuHgloDVUvWP3pIauILm+8/0pDMokuDYIoNsOkSwd5AiHBTSEJjKTDcZr5z8UpgOWZkxBF4iJftoA==", - "dev": true, - "optional": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@webassemblyjs/ast": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.1.tgz", - "integrity": "sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw==", - "dev": true, - "dependencies": { - "@webassemblyjs/helper-numbers": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1" - } - }, - "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz", - "integrity": "sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ==", - "dev": true - }, - "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz", - "integrity": "sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg==", - "dev": true - }, - "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz", - "integrity": "sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA==", - "dev": true - }, - "node_modules/@webassemblyjs/helper-numbers": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz", - "integrity": "sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ==", - "dev": true, - "dependencies": { - "@webassemblyjs/floating-point-hex-parser": "1.11.1", - "@webassemblyjs/helper-api-error": "1.11.1", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz", - "integrity": "sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q==", - "dev": true - }, - "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz", - "integrity": "sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg==", - "dev": true, - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1" - } - }, - "node_modules/@webassemblyjs/ieee754": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz", - "integrity": "sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ==", - "dev": true, - "dependencies": { - "@xtuc/ieee754": "^1.2.0" - } - }, - "node_modules/@webassemblyjs/leb128": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.1.tgz", - "integrity": "sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw==", - "dev": true, - "dependencies": { - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/utf8": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.1.tgz", - "integrity": "sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ==", - "dev": true - }, - "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz", - "integrity": "sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA==", - "dev": true, - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/helper-wasm-section": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1", - "@webassemblyjs/wasm-opt": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1", - "@webassemblyjs/wast-printer": "1.11.1" - } - }, - "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz", - "integrity": "sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA==", - "dev": true, - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/ieee754": "1.11.1", - "@webassemblyjs/leb128": "1.11.1", - "@webassemblyjs/utf8": "1.11.1" - } - }, - "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz", - "integrity": "sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw==", - "dev": true, - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1" - } - }, - "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz", - "integrity": "sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA==", - "dev": true, - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-api-error": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/ieee754": "1.11.1", - "@webassemblyjs/leb128": "1.11.1", - "@webassemblyjs/utf8": "1.11.1" - } - }, - "node_modules/@webassemblyjs/wast-printer": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz", - "integrity": "sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg==", - "dev": true, - "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@xtuc/ieee754": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", - "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", - "dev": true - }, - "node_modules/@xtuc/long": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", - "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", - "dev": true - }, - "node_modules/@yarnpkg/lockfile": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz", - "integrity": "sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==", - "dev": true - }, - "node_modules/7zip-bin": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/7zip-bin/-/7zip-bin-5.1.1.tgz", - "integrity": "sha512-sAP4LldeWNz0lNzmTird3uWfFDWWTeg6V/MsmyyLR9X1idwKBWIgt/ZvinqQldJm3LecKEs1emkbquO6PCiLVQ==", - "dev": true - }, - "node_modules/abab": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.5.tgz", - "integrity": "sha512-9IK9EadsbHo6jLWIpxpR6pL0sazTXV6+SQv25ZB+F7Bj9mJNaOc4nCRabwd5M/JwmUa8idz6Eci6eKfJryPs6Q==", - "dev": true - }, - "node_modules/abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", - "dev": true - }, - "node_modules/accepts": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", - "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", - "dependencies": { - "mime-types": "~2.1.34", - "negotiator": "0.6.3" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/acorn": { - "version": "8.7.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.7.0.tgz", - "integrity": "sha512-V/LGr1APy+PXIwKebEWrkZPwoeoF+w1jiOBUmuxuiUIaOHtob8Qc9BTrYo7VuI5fR8tqsy+buA2WFooR5olqvQ==", - "dev": true, - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-import-assertions": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.8.0.tgz", - "integrity": "sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw==", - "dev": true, - "peerDependencies": { - "acorn": "^8" - } - }, - "node_modules/adjust-sourcemap-loader": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/adjust-sourcemap-loader/-/adjust-sourcemap-loader-4.0.0.tgz", - "integrity": "sha512-OXwN5b9pCUXNQHJpwwD2qP40byEmSgzj8B4ydSN0uMNYWiFmJ6x6KwUllMmfk8Rwu/HJDFR7U8ubsWBoN0Xp0A==", - "dev": true, - "dependencies": { - "loader-utils": "^2.0.0", - "regex-parser": "^2.2.11" - }, - "engines": { - "node": ">=8.9" - } - }, - "node_modules/adjust-sourcemap-loader/node_modules/loader-utils": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.2.tgz", - "integrity": "sha512-TM57VeHptv569d/GKh6TAYdzKblwDNiumOdkFnejjD0XwTH87K90w3O7AiJRqdQoXygvi1VQTJTLGhJl7WqA7A==", - "dev": true, - "dependencies": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^2.1.2" - }, - "engines": { - "node": ">=8.9.0" - } - }, - "node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "dev": true, - "dependencies": { - "debug": "4" - }, - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/agent-base/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/agent-base/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/agentkeepalive": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.2.1.tgz", - "integrity": "sha512-Zn4cw2NEqd+9fiSVWMscnjyQ1a8Yfoc5oBajLeo5w+YBHgDUcEBY2hS4YpTz6iN5f/2zQiktcuM6tS8x1p9dpA==", - "dev": true, - "dependencies": { - "debug": "^4.1.0", - "depd": "^1.1.2", - "humanize-ms": "^1.2.1" - }, - "engines": { - "node": ">= 8.0.0" - } - }, - "node_modules/agentkeepalive/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/agentkeepalive/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "dev": true, - "dependencies": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-formats": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", - "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", - "dev": true, - "dependencies": { - "ajv": "^8.0.0" - }, - "peerDependencies": { - "ajv": "^8.0.0" - }, - "peerDependenciesMeta": { - "ajv": { - "optional": true - } - } - }, - "node_modules/ajv-formats/node_modules/ajv": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.10.0.tgz", - "integrity": "sha512-bzqAEZOjkrUMl2afH8dknrq5KEk2SrwdBROR+vH1EKVQTqaUbJVPdc/gEdggTMM0Se+s+Ja4ju4TlNcStKl2Hw==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-formats/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true - }, - "node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "dev": true, - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/ansi-align": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", - "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", - "dev": true, - "dependencies": { - "string-width": "^4.1.0" - } - }, - "node_modules/ansi-colors": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", - "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/ansi-escapes": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", - "dev": true, - "dependencies": { - "type-fest": "^0.21.3" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ansi-html-community": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", - "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", - "dev": true, - "engines": [ - "node >= 0.8.0" - ], - "bin": { - "ansi-html": "bin/ansi-html" - } - }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/anymatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", - "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", - "dev": true, - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/app-builder-bin": { - "version": "3.7.1", - "resolved": "https://registry.npmjs.org/app-builder-bin/-/app-builder-bin-3.7.1.tgz", - "integrity": "sha512-ql93vEUq6WsstGXD+SBLSIQw6SNnhbDEM0swzgugytMxLp3rT24Ag/jcC80ZHxiPRTdew1niuR7P3/FCrDqIjw==", - "dev": true - }, - "node_modules/app-builder-lib": { - "version": "22.14.13", - "resolved": "https://registry.npmjs.org/app-builder-lib/-/app-builder-lib-22.14.13.tgz", - "integrity": "sha512-SufmrtxU+D0Tn948fjEwAOlCN9757UXLkzzTWXMwZKR/5hisvgqeeBepWfphMIE6OkDGz0fbzEhL1P2Pty4XMg==", - "dev": true, - "dependencies": { - "@develar/schema-utils": "~2.6.5", - "@electron/universal": "1.0.5", - "@malept/flatpak-bundler": "^0.4.0", - "7zip-bin": "~5.1.1", - "async-exit-hook": "^2.0.1", - "bluebird-lst": "^1.0.9", - "builder-util": "22.14.13", - "builder-util-runtime": "8.9.2", - "chromium-pickle-js": "^0.2.0", - "debug": "^4.3.2", - "ejs": "^3.1.6", - "electron-osx-sign": "^0.5.0", - "electron-publish": "22.14.13", - "form-data": "^4.0.0", - "fs-extra": "^10.0.0", - "hosted-git-info": "^4.0.2", - "is-ci": "^3.0.0", - "isbinaryfile": "^4.0.8", - "js-yaml": "^4.1.0", - "lazy-val": "^1.0.5", - "minimatch": "^3.0.4", - "read-config-file": "6.2.0", - "sanitize-filename": "^1.6.3", - "semver": "^7.3.5", - "temp-file": "^3.4.0" - }, - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/app-builder-lib/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "node_modules/app-builder-lib/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/app-builder-lib/node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "dev": true, - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/app-builder-lib/node_modules/fs-extra": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.1.tgz", - "integrity": "sha512-NbdoVMZso2Lsrn/QwLXOy6rm0ufY2zEOKCDzJR/0kBsb0E6qed0P3iYK+Ath3BfvXEeu4JhEtXLgILx5psUfag==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/app-builder-lib/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/app-builder-lib/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/app-builder-lib/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/app-builder-lib/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/aproba": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/aproba/-/aproba-2.0.0.tgz", - "integrity": "sha512-lYe4Gx7QT+MKGbDsA+Z+he/Wtef0BiwDOlK/XkBrdfsh9J/jPPXbX0tE9x9cl27Tmu5gg3QUbUrQYa/y+KOHPQ==", - "dev": true - }, - "node_modules/are-we-there-yet": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-3.0.0.tgz", - "integrity": "sha512-0GWpv50YSOcLXaN6/FAKY3vfRbllXWV2xvfA/oKJF8pzFhWXPV+yjhJXDBbjscDYowv7Yw1A3uigpzn5iEGTyw==", - "dev": true, - "dependencies": { - "delegates": "^1.0.0", - "readable-stream": "^3.6.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16" - } - }, - "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dev": true, - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" - }, - "node_modules/array-union": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-3.0.1.tgz", - "integrity": "sha512-1OvF9IbWwaeiM9VhzYXVQacMibxpXOMYVNIvMtKRyX9SImBXpKcFr8XvFDeEslCyuH/t6KRt7HEO94AlP8Iatw==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/asar": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/asar/-/asar-3.1.0.tgz", - "integrity": "sha512-vyxPxP5arcAqN4F/ebHd/HhwnAiZtwhglvdmc7BR2f0ywbVNTOpSeyhLDbGXtE/y58hv1oC75TaNIXutnsOZsQ==", - "deprecated": "Please use @electron/asar moving forward. There is no API change, just a package name change", - "dev": true, - "dependencies": { - "chromium-pickle-js": "^0.2.0", - "commander": "^5.0.0", - "glob": "^7.1.6", - "minimatch": "^3.0.4" - }, - "bin": { - "asar": "bin/asar.js" - }, - "engines": { - "node": ">=10.12.0" - }, - "optionalDependencies": { - "@types/glob": "^7.1.1" - } - }, - "node_modules/asar/node_modules/commander": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", - "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, - "node_modules/asn1": { - "version": "0.2.6", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", - "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", - "dependencies": { - "safer-buffer": "~2.1.0" - } - }, - "node_modules/assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/astral-regex": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", - "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", - "dev": true, - "optional": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/async": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", - "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", - "dev": true, - "dependencies": { - "lodash": "^4.17.14" - } - }, - "node_modules/async-exit-hook": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/async-exit-hook/-/async-exit-hook-2.0.1.tgz", - "integrity": "sha512-NW2cX8m1Q7KPA7a5M2ULQeZ2wR5qI5PAbw5L0UOMxdioVk9PMZ0h1TmyZEkPYrCvYjDlFICusOu1dlEKAAeXBw==", - "dev": true, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" - }, - "node_modules/at-least-node": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", - "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", - "dev": true, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/atob": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", - "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", - "dev": true, - "bin": { - "atob": "bin/atob.js" - }, - "engines": { - "node": ">= 4.5.0" - } - }, - "node_modules/author-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/author-regex/-/author-regex-1.0.0.tgz", - "integrity": "sha1-0IiFvmubv5Q5/gh8dihyRfCoFFA=", - "dev": true, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/autoprefixer": { - "version": "10.4.4", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.4.tgz", - "integrity": "sha512-Tm8JxsB286VweiZ5F0anmbyGiNI3v3wGv3mz9W+cxEDYB/6jbnj6GM9H9mK3wIL8ftgl+C07Lcwb8PG5PCCPzA==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/autoprefixer" - } - ], - "dependencies": { - "browserslist": "^4.20.2", - "caniuse-lite": "^1.0.30001317", - "fraction.js": "^4.2.0", - "normalize-range": "^0.1.2", - "picocolors": "^1.0.0", - "postcss-value-parser": "^4.2.0" - }, - "bin": { - "autoprefixer": "bin/autoprefixer" - }, - "engines": { - "node": "^10 || ^12 || >=14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", - "engines": { - "node": "*" - } - }, - "node_modules/aws4": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", - "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" - }, - "node_modules/babel-loader": { - "version": "8.2.3", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.2.3.tgz", - "integrity": "sha512-n4Zeta8NC3QAsuyiizu0GkmRcQ6clkV9WFUnUf1iXP//IeSKbWjofW3UHyZVwlOB4y039YQKefawyTn64Zwbuw==", - "dev": true, - "dependencies": { - "find-cache-dir": "^3.3.1", - "loader-utils": "^1.4.0", - "make-dir": "^3.1.0", - "schema-utils": "^2.6.5" - }, - "engines": { - "node": ">= 8.9" - }, - "peerDependencies": { - "@babel/core": "^7.0.0", - "webpack": ">=2" - } - }, - "node_modules/babel-loader/node_modules/json5": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", - "dev": true, - "dependencies": { - "minimist": "^1.2.0" - }, - "bin": { - "json5": "lib/cli.js" - } - }, - "node_modules/babel-loader/node_modules/loader-utils": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.0.tgz", - "integrity": "sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA==", - "dev": true, - "dependencies": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^1.0.1" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "dev": true, - "dependencies": { - "object.assign": "^4.1.0" - } - }, - "node_modules/babel-plugin-istanbul": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", - "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.0.0", - "@istanbuljs/load-nyc-config": "^1.0.0", - "@istanbuljs/schema": "^0.1.2", - "istanbul-lib-instrument": "^5.0.4", - "test-exclude": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.1.tgz", - "integrity": "sha512-v7/T6EQcNfVLfcN2X8Lulb7DjprieyLWJK/zOWH5DUYcAgex9sP3h25Q+DLsX9TloXe3y1O8l2q2Jv9q8UVB9w==", - "dev": true, - "dependencies": { - "@babel/compat-data": "^7.13.11", - "@babel/helper-define-polyfill-provider": "^0.3.1", - "semver": "^6.1.1" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.2.tgz", - "integrity": "sha512-G3uJih0XWiID451fpeFaYGVuxHEjzKTHtc9uGFEjR6hHrvNzeS/PX+LLLcetJcytsB5m4j+K3o/EpXJNb/5IEQ==", - "dev": true, - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.3.1", - "core-js-compat": "^3.21.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.3.1.tgz", - "integrity": "sha512-Y2B06tvgHYt1x0yz17jGkGeeMr5FeKUu+ASJ+N6nB5lQ8Dapfg42i0OVrf8PNGJ3zKL4A23snMi1IRwrqqND7A==", - "dev": true, - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.3.1" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/base64id": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz", - "integrity": "sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog==", - "dev": true, - "engines": { - "node": "^4.5.0 || >= 5.9" - } - }, - "node_modules/batch": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", - "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY=", - "dev": true - }, - "node_modules/bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", - "dependencies": { - "tweetnacl": "^0.14.3" - } - }, - "node_modules/big.js": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", - "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/bl": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", - "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", - "dev": true, - "dependencies": { - "buffer": "^5.5.0", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - } - }, - "node_modules/bluebird": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", - "dev": true - }, - "node_modules/bluebird-lst": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/bluebird-lst/-/bluebird-lst-1.0.9.tgz", - "integrity": "sha512-7B1Rtx82hjnSD4PGLAjVWeYH3tHAcVUmChh85a3lltKQm6FresXh9ErQo6oAv6CqxttczC3/kEg8SY5NluPuUw==", - "dev": true, - "dependencies": { - "bluebird": "^3.5.5" - } - }, - "node_modules/body-parser": { - "version": "1.19.2", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.2.tgz", - "integrity": "sha512-SAAwOxgoCKMGs9uUAUFHygfLAyaniaoun6I8mFY9pRAJL9+Kec34aU+oIjDhTycub1jozEfEwx1W1IuOYxVSFw==", - "dependencies": { - "bytes": "3.1.2", - "content-type": "~1.0.4", - "debug": "2.6.9", - "depd": "~1.1.2", - "http-errors": "1.8.1", - "iconv-lite": "0.4.24", - "on-finished": "~2.3.0", - "qs": "6.9.7", - "raw-body": "2.4.3", - "type-is": "~1.6.18" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/bonjour": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/bonjour/-/bonjour-3.5.0.tgz", - "integrity": "sha1-jokKGD2O6aI5OzhExpGkK897yfU=", - "dev": true, - "dependencies": { - "array-flatten": "^2.1.0", - "deep-equal": "^1.0.1", - "dns-equal": "^1.0.0", - "dns-txt": "^2.0.2", - "multicast-dns": "^6.0.1", - "multicast-dns-service-types": "^1.1.0" - } - }, - "node_modules/bonjour/node_modules/array-flatten": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", - "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==", - "dev": true - }, - "node_modules/boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=", - "dev": true - }, - "node_modules/boolean": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/boolean/-/boolean-3.2.0.tgz", - "integrity": "sha512-d0II/GO9uf9lfUHH2BQsjxzRJZBdsjgsBiW4BvhWk/3qoKwQFjIDVN19PfX8F2D/r9PCMTtLWjYVCFrpeYUzsw==", - "dev": true, - "optional": true - }, - "node_modules/boxen": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz", - "integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==", - "dev": true, - "dependencies": { - "ansi-align": "^3.0.0", - "camelcase": "^6.2.0", - "chalk": "^4.1.0", - "cli-boxes": "^2.2.1", - "string-width": "^4.2.2", - "type-fest": "^0.20.2", - "widest-line": "^3.1.0", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/boxen/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/boxen/node_modules/camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/boxen/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/boxen/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/boxen/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/boxen/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, - "dependencies": { - "fill-range": "^7.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/browserslist": { - "version": "4.20.2", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.20.2.tgz", - "integrity": "sha512-CQOBCqp/9pDvDbx3xfMi+86pr4KXIf2FDkTTdeuYw8OxS9t898LA1Khq57gtufFILXpfgsSx5woNgsBgvGjpsA==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - } - ], - "dependencies": { - "caniuse-lite": "^1.0.30001317", - "electron-to-chromium": "^1.4.84", - "escalade": "^3.1.1", - "node-releases": "^2.0.2", - "picocolors": "^1.0.0" - }, - "bin": { - "browserslist": "cli.js" - }, - "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" - } - }, - "node_modules/buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "node_modules/buffer-alloc": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", - "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", - "dev": true, - "dependencies": { - "buffer-alloc-unsafe": "^1.1.0", - "buffer-fill": "^1.0.0" - } - }, - "node_modules/buffer-alloc-unsafe": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", - "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==", - "dev": true - }, - "node_modules/buffer-crc32": { - "version": "0.2.13", - "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", - "integrity": "sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/buffer-equal": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/buffer-equal/-/buffer-equal-1.0.0.tgz", - "integrity": "sha1-WWFrSYME1Var1GaWayLu2j7KX74=", - "dev": true, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/buffer-fill": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", - "integrity": "sha1-+PeLdniYiO858gXNY39o5wISKyw=", - "dev": true - }, - "node_modules/buffer-from": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", - "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", - "dev": true - }, - "node_modules/buffer-indexof": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-indexof/-/buffer-indexof-1.1.1.tgz", - "integrity": "sha512-4/rOEg86jivtPTeOUUT61jJO1Ya1TrR/OkqCSZDyq84WJh3LuuiphBYJN+fm5xufIk4XAFcEwte/8WzC8If/1g==", - "dev": true - }, - "node_modules/builder-util": { - "version": "22.14.13", - "resolved": "https://registry.npmjs.org/builder-util/-/builder-util-22.14.13.tgz", - "integrity": "sha512-oePC/qrrUuerhmH5iaCJzPRAKlSBylrhzuAJmRQClTyWnZUv6jbaHh+VoHMbEiE661wrj2S2aV7/bQh12cj1OA==", - "dev": true, - "dependencies": { - "@types/debug": "^4.1.6", - "@types/fs-extra": "^9.0.11", - "7zip-bin": "~5.1.1", - "app-builder-bin": "3.7.1", - "bluebird-lst": "^1.0.9", - "builder-util-runtime": "8.9.2", - "chalk": "^4.1.1", - "cross-spawn": "^7.0.3", - "debug": "^4.3.2", - "fs-extra": "^10.0.0", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.0", - "is-ci": "^3.0.0", - "js-yaml": "^4.1.0", - "source-map-support": "^0.5.19", - "stat-mode": "^1.0.0", - "temp-file": "^3.4.0" - } - }, - "node_modules/builder-util-runtime": { - "version": "8.9.2", - "resolved": "https://registry.npmjs.org/builder-util-runtime/-/builder-util-runtime-8.9.2.tgz", - "integrity": "sha512-rhuKm5vh7E0aAmT6i8aoSfEjxzdYEFX7zDApK+eNgOhjofnWb74d9SRJv0H/8nsgOkos0TZ4zxW0P8J4N7xQ2A==", - "dev": true, - "dependencies": { - "debug": "^4.3.2", - "sax": "^1.2.4" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/builder-util-runtime/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/builder-util-runtime/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/builder-util/node_modules/@tootallnate/once": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", - "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", - "dev": true, - "engines": { - "node": ">= 10" - } - }, - "node_modules/builder-util/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/builder-util/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "node_modules/builder-util/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/builder-util/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/builder-util/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/builder-util/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/builder-util/node_modules/fs-extra": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.1.tgz", - "integrity": "sha512-NbdoVMZso2Lsrn/QwLXOy6rm0ufY2zEOKCDzJR/0kBsb0E6qed0P3iYK+Ath3BfvXEeu4JhEtXLgILx5psUfag==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/builder-util/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/builder-util/node_modules/http-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", - "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", - "dev": true, - "dependencies": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/builder-util/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/builder-util/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/builder-util/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/builder-util/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/builder-util/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/builtins": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/builtins/-/builtins-1.0.3.tgz", - "integrity": "sha1-y5T662HIaWRR2zZTThQi+U8K7og=", - "dev": true - }, - "node_modules/bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cacache": { - "version": "15.3.0", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-15.3.0.tgz", - "integrity": "sha512-VVdYzXEn+cnbXpFgWs5hTT7OScegHVmLhJIR8Ufqk3iFD6A6j5iSX1KuBTfNEv4tdJWE2PzA6IVFtcLC7fN9wQ==", - "dev": true, - "dependencies": { - "@npmcli/fs": "^1.0.0", - "@npmcli/move-file": "^1.0.1", - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "glob": "^7.1.4", - "infer-owner": "^1.0.4", - "lru-cache": "^6.0.0", - "minipass": "^3.1.1", - "minipass-collect": "^1.0.2", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.2", - "mkdirp": "^1.0.3", - "p-map": "^4.0.0", - "promise-inflight": "^1.0.1", - "rimraf": "^3.0.2", - "ssri": "^8.0.1", - "tar": "^6.0.2", - "unique-filename": "^1.1.1" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/cacheable-request": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", - "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", - "dev": true, - "dependencies": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^3.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^1.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dev": true, - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cacheable-request/node_modules/lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dev": true, - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/caniuse-lite": { - "version": "1.0.30001319", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001319.tgz", - "integrity": "sha512-xjlIAFHucBRSMUo1kb5D4LYgcN1M45qdKP++lhqowDpwJwGkpIRTt5qQqnhxjj1vHcI7nrJxWhCC1ATrCEBTcw==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/caniuse-lite" - } - ] - }, - "node_modules/caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" - }, - "node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/chardet": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", - "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", - "dev": true - }, - "node_modules/chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", - "dev": true, - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ], - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - }, - "engines": { - "node": ">= 8.10.0" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" - } - }, - "node_modules/chokidar/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/chownr": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", - "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/chrome-trace-event": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", - "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", - "dev": true, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/chromium-pickle-js": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/chromium-pickle-js/-/chromium-pickle-js-0.2.0.tgz", - "integrity": "sha1-BKEGZywYsIWrd02YPfo+oTjyIgU=", - "dev": true - }, - "node_modules/ci-info": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.3.0.tgz", - "integrity": "sha512-riT/3vI5YpVH6/qomlDnJow6TBee2PBKSEpx3O32EGPYbWGIRsIlGRms3Sm74wYE1JMo8RnO04Hb12+v1J5ICw==", - "dev": true - }, - "node_modules/circular-dependency-plugin": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/circular-dependency-plugin/-/circular-dependency-plugin-5.2.2.tgz", - "integrity": "sha512-g38K9Cm5WRwlaH6g03B9OEz/0qRizI+2I7n+Gz+L5DxXJAPAiWQvwlYNm1V1jkdpUv95bOe/ASm2vfi/G560jQ==", - "dev": true, - "engines": { - "node": ">=6.0.0" - }, - "peerDependencies": { - "webpack": ">=4.0.1" - } - }, - "node_modules/clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/cli-boxes": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", - "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", - "dev": true, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cli-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", - "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", - "dev": true, - "dependencies": { - "restore-cursor": "^3.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cli-spinners": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.6.1.tgz", - "integrity": "sha512-x/5fWmGMnbKQAaNwN+UZlV79qBLM9JFnJuJ03gIi5whrob0xV0ofNVHy9DhwGdsMJQc2OKv0oGmLzvaqvAVv+g==", - "dev": true, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cli-truncate": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-2.1.0.tgz", - "integrity": "sha512-n8fOixwDD6b/ObinzTrp1ZKFzbgvKZvuz/TvejnLn1aQfC6r52XEx85FmuC+3HI+JM7coBRXUvNqEU2PHVrHpg==", - "dev": true, - "optional": true, - "dependencies": { - "slice-ansi": "^3.0.0", - "string-width": "^4.2.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cli-width": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz", - "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==", - "dev": true, - "engines": { - "node": ">= 10" - } - }, - "node_modules/cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "dev": true, - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, - "node_modules/clone": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", - "integrity": "sha1-2jCcwmPfFZlMaIypAheco8fNfH4=", - "dev": true, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/clone-deep": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", - "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", - "dev": true, - "dependencies": { - "is-plain-object": "^2.0.4", - "kind-of": "^6.0.2", - "shallow-clone": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/clone-response": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", - "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", - "dev": true, - "dependencies": { - "mimic-response": "^1.0.0" - } - }, - "node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true - }, - "node_modules/color-support": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", - "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", - "dev": true, - "bin": { - "color-support": "bin.js" - } - }, - "node_modules/colorette": { - "version": "2.0.16", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.16.tgz", - "integrity": "sha512-hUewv7oMjCp+wkBv5Rm0v87eJhq4woh5rSR+42YSQJKecCqgIqNkZ6lAlQms/BwHPJA5NKMRlpxPRv0n8HQW6g==", - "dev": true - }, - "node_modules/colors": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/colors/-/colors-1.0.3.tgz", - "integrity": "sha1-BDP0TYCWgP3rYO0mDxsMJi6CpAs=", - "dev": true, - "engines": { - "node": ">=0.1.90" - } - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", - "dev": true - }, - "node_modules/commondir": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", - "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=", - "dev": true - }, - "node_modules/compare-version": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/compare-version/-/compare-version-0.1.2.tgz", - "integrity": "sha1-AWLsLZNR9d3VmpICy6k1NmpyUIA=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/component-emitter": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", - "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==", - "dev": true - }, - "node_modules/compressible": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", - "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", - "dev": true, - "dependencies": { - "mime-db": ">= 1.43.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/compression": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", - "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", - "dev": true, - "dependencies": { - "accepts": "~1.3.5", - "bytes": "3.0.0", - "compressible": "~2.0.16", - "debug": "2.6.9", - "on-headers": "~1.0.2", - "safe-buffer": "5.1.2", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/compression/node_modules/bytes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=", - "dev": true, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/compression/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", - "dev": true - }, - "node_modules/concat-stream": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", - "dev": true, - "engines": [ - "node >= 0.8" - ], - "dependencies": { - "buffer-from": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^2.2.2", - "typedarray": "^0.0.6" - } - }, - "node_modules/concat-stream/node_modules/readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dev": true, - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/concat-stream/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - }, - "node_modules/concat-stream/node_modules/string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dev": true, - "dependencies": { - "safe-buffer": "~5.1.0" - } - }, - "node_modules/config-chain": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", - "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", - "dev": true, - "optional": true, - "dependencies": { - "ini": "^1.3.4", - "proto-list": "~1.2.1" - } - }, - "node_modules/config-chain/node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "dev": true, - "optional": true - }, - "node_modules/configstore": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", - "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", - "dev": true, - "dependencies": { - "dot-prop": "^5.2.0", - "graceful-fs": "^4.1.2", - "make-dir": "^3.0.0", - "unique-string": "^2.0.0", - "write-file-atomic": "^3.0.0", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/connect": { - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/connect/-/connect-3.7.0.tgz", - "integrity": "sha512-ZqRXc+tZukToSNmh5C2iWMSoV3X1YUcPbqEM4DkEG5tNQXrQUZCNVGGv3IuicnkMtPfGf3Xtp8WCXs295iQ1pQ==", - "dev": true, - "dependencies": { - "debug": "2.6.9", - "finalhandler": "1.1.2", - "parseurl": "~1.3.3", - "utils-merge": "1.0.1" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/connect-history-api-fallback": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz", - "integrity": "sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg==", - "dev": true, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/console-control-strings": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", - "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=", - "dev": true - }, - "node_modules/content-disposition": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", - "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", - "dependencies": { - "safe-buffer": "5.2.1" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/convert-source-map": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz", - "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==", - "dev": true, - "dependencies": { - "safe-buffer": "~5.1.1" - } - }, - "node_modules/convert-source-map/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - }, - "node_modules/cookie": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.2.tgz", - "integrity": "sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" - }, - "node_modules/copy-anything": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/copy-anything/-/copy-anything-2.0.6.tgz", - "integrity": "sha512-1j20GZTsvKNkc4BY3NpMOM8tt///wY3FpIzozTOFO2ffuZcV61nojHXVKIy3WM+7ADCy5FVhdZYHYDdgTU0yJw==", - "dev": true, - "dependencies": { - "is-what": "^3.14.1" - }, - "funding": { - "url": "https://github.com/sponsors/mesqueeb" - } - }, - "node_modules/copy-webpack-plugin": { - "version": "10.2.1", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-10.2.1.tgz", - "integrity": "sha512-nr81NhCAIpAWXGCK5thrKmfCQ6GDY0L5RN0U+BnIn/7Us55+UCex5ANNsNKmIVtDRnk0Ecf+/kzp9SUVrrBMLg==", - "dev": true, - "dependencies": { - "fast-glob": "^3.2.7", - "glob-parent": "^6.0.1", - "globby": "^12.0.2", - "normalize-path": "^3.0.0", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0" - }, - "engines": { - "node": ">= 12.20.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.1.0" - } - }, - "node_modules/copy-webpack-plugin/node_modules/ajv": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.10.0.tgz", - "integrity": "sha512-bzqAEZOjkrUMl2afH8dknrq5KEk2SrwdBROR+vH1EKVQTqaUbJVPdc/gEdggTMM0Se+s+Ja4ju4TlNcStKl2Hw==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/copy-webpack-plugin/node_modules/ajv-keywords": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/copy-webpack-plugin/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true - }, - "node_modules/copy-webpack-plugin/node_modules/schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", - "dev": true, - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/core-js": { - "version": "3.20.3", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.20.3.tgz", - "integrity": "sha512-vVl8j8ph6tRS3B8qir40H7yw7voy17xL0piAjlbBUsH7WIfzoedL/ZOr1OV9FyZQLWXsayOJyV4tnRyXR85/ag==", - "deprecated": "core-js@<3.23.3 is no longer maintained and not recommended for usage due to the number of issues. Because of the V8 engine whims, feature detection in old core-js versions could cause a slowdown up to 100x even if nothing is polyfilled. Some versions have web compatibility issues. Please, upgrade your dependencies to the actual version of core-js.", - "dev": true, - "hasInstallScript": true, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-js-compat": { - "version": "3.21.1", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.21.1.tgz", - "integrity": "sha512-gbgX5AUvMb8gwxC7FLVWYT7Kkgu/y7+h/h1X43yJkNqhlK2fuYyQimqvKGNZFAY6CKii/GFKJ2cp/1/42TN36g==", - "dev": true, - "dependencies": { - "browserslist": "^4.19.1", - "semver": "7.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-js-compat/node_modules/semver": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.0.0.tgz", - "integrity": "sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" - }, - "node_modules/cors": { - "version": "2.8.5", - "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz", - "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", - "dev": true, - "dependencies": { - "object-assign": "^4", - "vary": "^1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/cosmiconfig": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.0.1.tgz", - "integrity": "sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ==", - "dev": true, - "dependencies": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.2.1", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.10.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/crc": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/crc/-/crc-3.8.0.tgz", - "integrity": "sha512-iX3mfgcTMIq3ZKLIsVFAbv7+Mc10kxabAGQb8HvjA1o3T1PIYprbakQ65d3I+2HGHt6nSKkM9PYjgoJO2KcFBQ==", - "dev": true, - "optional": true, - "dependencies": { - "buffer": "^5.1.0" - } - }, - "node_modules/critters": { - "version": "0.0.16", - "resolved": "https://registry.npmjs.org/critters/-/critters-0.0.16.tgz", - "integrity": "sha512-JwjgmO6i3y6RWtLYmXwO5jMd+maZt8Tnfu7VVISmEWyQqfLpB8soBswf8/2bu6SBXxtKA68Al3c+qIG1ApT68A==", - "dev": true, - "dependencies": { - "chalk": "^4.1.0", - "css-select": "^4.2.0", - "parse5": "^6.0.1", - "parse5-htmlparser2-tree-adapter": "^6.0.1", - "postcss": "^8.3.7", - "pretty-bytes": "^5.3.0" - } - }, - "node_modules/critters/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/critters/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/critters/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/critters/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/critters/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/critters/node_modules/parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==", - "dev": true - }, - "node_modules/critters/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/cross-spawn-windows-exe": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/cross-spawn-windows-exe/-/cross-spawn-windows-exe-1.2.0.tgz", - "integrity": "sha512-mkLtJJcYbDCxEG7Js6eUnUNndWjyUZwJ3H7bErmmtOYU/Zb99DyUkpamuIZE0b3bhmJyZ7D90uS6f+CGxRRjOw==", - "dev": true, - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/malept" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/subscription/pkg/npm-cross-spawn-windows-exe?utm_medium=referral&utm_source=npm_fund" - } - ], - "dependencies": { - "@malept/cross-spawn-promise": "^1.1.0", - "is-wsl": "^2.2.0", - "which": "^2.0.2" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/crypto-random-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", - "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/css": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/css/-/css-3.0.0.tgz", - "integrity": "sha512-DG9pFfwOrzc+hawpmqX/dHYHJG+Bsdb0klhyi1sDneOgGOXy9wQIC8hzyVp1e4NRYDBdxcylvywPkkXCHAzTyQ==", - "dev": true, - "dependencies": { - "inherits": "^2.0.4", - "source-map": "^0.6.1", - "source-map-resolve": "^0.6.0" - } - }, - "node_modules/css-blank-pseudo": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/css-blank-pseudo/-/css-blank-pseudo-3.0.3.tgz", - "integrity": "sha512-VS90XWtsHGqoM0t4KpH053c4ehxZ2E6HtGI7x68YFV0pTo/QmkV/YFA+NnlvK8guxZVNWGQhVNJGC39Q8XF4OQ==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.9" - }, - "bin": { - "css-blank-pseudo": "dist/cli.cjs" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/css-has-pseudo": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/css-has-pseudo/-/css-has-pseudo-3.0.4.tgz", - "integrity": "sha512-Vse0xpR1K9MNlp2j5w1pgWIJtm1a8qS0JwS9goFYcImjlHEmywP9VUF05aGBXzGpDJF86QXk4L0ypBmwPhGArw==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.9" - }, - "bin": { - "css-has-pseudo": "dist/cli.cjs" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/css-loader": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.5.1.tgz", - "integrity": "sha512-gEy2w9AnJNnD9Kuo4XAP9VflW/ujKoS9c/syO+uWMlm5igc7LysKzPXaDoR2vroROkSwsTS2tGr1yGGEbZOYZQ==", - "dev": true, - "dependencies": { - "icss-utils": "^5.1.0", - "postcss": "^8.2.15", - "postcss-modules-extract-imports": "^3.0.0", - "postcss-modules-local-by-default": "^4.0.0", - "postcss-modules-scope": "^3.0.0", - "postcss-modules-values": "^4.0.0", - "postcss-value-parser": "^4.1.0", - "semver": "^7.3.5" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - } - }, - "node_modules/css-prefers-color-scheme": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/css-prefers-color-scheme/-/css-prefers-color-scheme-6.0.3.tgz", - "integrity": "sha512-4BqMbZksRkJQx2zAjrokiGMd07RqOa2IxIrrN10lyBe9xhn9DEvjUK79J6jkeiv9D9hQFXKb6g1jwU62jziJZA==", - "dev": true, - "bin": { - "css-prefers-color-scheme": "dist/cli.cjs" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/css-select": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.2.1.tgz", - "integrity": "sha512-/aUslKhzkTNCQUB2qTX84lVmfia9NyjP3WpDGtj/WxhwBzWBYUV3DgUpurHTme8UTPcPlAD1DJ+b0nN/t50zDQ==", - "dev": true, - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^5.1.0", - "domhandler": "^4.3.0", - "domutils": "^2.8.0", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/css-what": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-5.1.0.tgz", - "integrity": "sha512-arSMRWIIFY0hV8pIxZMEfmMI47Wj3R/aWpZDDxWYCPEiOMv6tfOrnpDtgxBYPEQD4V0Y/958+1TdC3iWTFcUPw==", - "dev": true, - "engines": { - "node": ">= 6" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/css/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cssdb": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cssdb/-/cssdb-5.1.0.tgz", - "integrity": "sha512-/vqjXhv1x9eGkE/zO6o8ZOI7dgdZbLVLUGyVRbPgk6YipXbW87YzUCcO+Jrmi5bwJlAH6oD+MNeZyRgXea1GZw==", - "dev": true - }, - "node_modules/cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", - "dev": true, - "bin": { - "cssesc": "bin/cssesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/custom-event": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/custom-event/-/custom-event-1.0.1.tgz", - "integrity": "sha1-XQKkaFCt8bSjF5RqOSj8y1v9BCU=", - "dev": true - }, - "node_modules/dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", - "dependencies": { - "assert-plus": "^1.0.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/date-format": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/date-format/-/date-format-4.0.6.tgz", - "integrity": "sha512-B9vvg5rHuQ8cbUXE/RMWMyX2YA5TecT3jKF5fLtGNlzPlU7zblSPmAm2OImDbWL+LDOQ6pUm+4LOFz+ywS41Zw==", - "dev": true, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/decode-uri-component": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", - "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=", - "dev": true, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", - "dev": true, - "dependencies": { - "mimic-response": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/deep-equal": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.1.1.tgz", - "integrity": "sha512-yd9c5AdiqVcR+JjcwUQb9DkhJc8ngNr0MahEBGvDiJw8puWab2yZlh+nkasOnZP+EGTAP6rRp2JzJhJZzvNF8g==", - "dev": true, - "dependencies": { - "is-arguments": "^1.0.4", - "is-date-object": "^1.0.1", - "is-regex": "^1.0.4", - "object-is": "^1.0.1", - "object-keys": "^1.1.1", - "regexp.prototype.flags": "^1.2.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "dev": true, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/default-gateway": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", - "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", - "dev": true, - "dependencies": { - "execa": "^5.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/defaults": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.3.tgz", - "integrity": "sha1-xlYFHpgX2f8I7YgUd/P+QBnz730=", - "dev": true, - "dependencies": { - "clone": "^1.0.2" - } - }, - "node_modules/defer-to-connect": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", - "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==", - "dev": true - }, - "node_modules/define-lazy-prop": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", - "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/define-properties": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", - "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", - "dev": true, - "dependencies": { - "object-keys": "^1.0.12" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/del": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/del/-/del-6.0.0.tgz", - "integrity": "sha512-1shh9DQ23L16oXSZKB2JxpL7iMy2E0S9d517ptA1P8iw0alkPtQcrKH7ru31rYtKwF499HkTu+DRzq3TCKDFRQ==", - "dev": true, - "dependencies": { - "globby": "^11.0.1", - "graceful-fs": "^4.2.4", - "is-glob": "^4.0.1", - "is-path-cwd": "^2.2.0", - "is-path-inside": "^3.0.2", - "p-map": "^4.0.0", - "rimraf": "^3.0.2", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/del/node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/del/node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "dev": true, - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/del/node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/delegates": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", - "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=", - "dev": true - }, - "node_modules/depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/dependency-graph": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/dependency-graph/-/dependency-graph-0.11.0.tgz", - "integrity": "sha512-JeMq7fEshyepOWDfcfHK06N3MhyPhz++vtqWhMT5O9A3K42rdsEDpfdVqjaqaAhsw6a+ZqeDvQVtD0hFHQWrzg==", - "dev": true, - "engines": { - "node": ">= 0.6.0" - } - }, - "node_modules/destroy": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", - "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" - }, - "node_modules/detect-node": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", - "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==", - "dev": true - }, - "node_modules/di": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/di/-/di-0.0.1.tgz", - "integrity": "sha1-gGZJMmzqp8qjMG112YXqJ0i6kTw=", - "dev": true - }, - "node_modules/dir-compare": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/dir-compare/-/dir-compare-2.4.0.tgz", - "integrity": "sha512-l9hmu8x/rjVC9Z2zmGzkhOEowZvW7pmYws5CWHutg8u1JgvsKWMx7Q/UODeu4djLZ4FgW5besw5yvMQnBHzuCA==", - "dev": true, - "dependencies": { - "buffer-equal": "1.0.0", - "colors": "1.0.3", - "commander": "2.9.0", - "minimatch": "3.0.4" - }, - "bin": { - "dircompare": "src/cli/dircompare.js" - } - }, - "node_modules/dir-compare/node_modules/commander": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.9.0.tgz", - "integrity": "sha1-nJkJQXbhIkDLItbFFGCYQA/g99Q=", - "dev": true, - "dependencies": { - "graceful-readlink": ">= 1.0.0" - }, - "engines": { - "node": ">= 0.6.x" - } - }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/dmg-builder": { - "version": "22.14.13", - "resolved": "https://registry.npmjs.org/dmg-builder/-/dmg-builder-22.14.13.tgz", - "integrity": "sha512-xNOugB6AbIRETeU2uID15sUfjdZZcKdxK8xkFnwIggsM00PJ12JxpLNPTjcRoUnfwj3WrPjilrO64vRMwNItQg==", - "dev": true, - "dependencies": { - "app-builder-lib": "22.14.13", - "builder-util": "22.14.13", - "builder-util-runtime": "8.9.2", - "fs-extra": "^10.0.0", - "iconv-lite": "^0.6.2", - "js-yaml": "^4.1.0" - }, - "optionalDependencies": { - "dmg-license": "^1.0.9" - } - }, - "node_modules/dmg-builder/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "node_modules/dmg-builder/node_modules/fs-extra": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.1.tgz", - "integrity": "sha512-NbdoVMZso2Lsrn/QwLXOy6rm0ufY2zEOKCDzJR/0kBsb0E6qed0P3iYK+Ath3BfvXEeu4JhEtXLgILx5psUfag==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/dmg-builder/node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dev": true, - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/dmg-builder/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/dmg-builder/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/dmg-builder/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/dmg-license": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/dmg-license/-/dmg-license-1.0.11.tgz", - "integrity": "sha512-ZdzmqwKmECOWJpqefloC5OJy1+WZBBse5+MR88z9g9Zn4VY+WYUkAyojmhzJckH5YbbZGcYIuGAkY5/Ys5OM2Q==", - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "dependencies": { - "@types/plist": "^3.0.1", - "@types/verror": "^1.10.3", - "ajv": "^6.10.0", - "crc": "^3.8.0", - "iconv-corefoundation": "^1.1.7", - "plist": "^3.0.4", - "smart-buffer": "^4.0.2", - "verror": "^1.10.0" - }, - "bin": { - "dmg-license": "bin/dmg-license.js" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/dns-equal": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", - "integrity": "sha1-s55/HabrCnW6nBcySzR1PEfgZU0=", - "dev": true - }, - "node_modules/dns-packet": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-1.3.4.tgz", - "integrity": "sha512-BQ6F4vycLXBvdrJZ6S3gZewt6rcrks9KBgM9vrhW+knGRqc8uEdT7fuCwloc7nny5xNoMJ17HGH0R/6fpo8ECA==", - "dev": true, - "dependencies": { - "ip": "^1.1.0", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/dns-txt": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/dns-txt/-/dns-txt-2.0.2.tgz", - "integrity": "sha1-uR2Ab10nGI5Ks+fRB9iBocxGQrY=", - "dev": true, - "dependencies": { - "buffer-indexof": "^1.0.0" - } - }, - "node_modules/dom-serialize": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/dom-serialize/-/dom-serialize-2.2.1.tgz", - "integrity": "sha1-ViromZ9Evl6jB29UGdzVnrQ6yVs=", - "dev": true, - "dependencies": { - "custom-event": "~1.0.0", - "ent": "~2.2.0", - "extend": "^3.0.0", - "void-elements": "^2.0.0" - } - }, - "node_modules/dom-serializer": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.2.tgz", - "integrity": "sha512-5c54Bk5Dw4qAxNOI1pFEizPSjVsx5+bpJKmL2kPn8JhBUq2q09tTCa3mjijun2NfK78NMouDYNMBkOrPZiS+ig==", - "dev": true, - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ] - }, - "node_modules/domhandler": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", - "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", - "dev": true, - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/domutils": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", - "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", - "dev": true, - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/dot-prop": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", - "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", - "dev": true, - "dependencies": { - "is-obj": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/dotenv": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-10.0.0.tgz", - "integrity": "sha512-rlBi9d8jpv9Sf1klPjNfFAuWDjKLwTIJJ/VxtoTwIR6hnZxcEOQCZg2oIL3MWBYw5GpUDKOEnND7LXTbIpQ03Q==", - "engines": { - "node": ">=10" - } - }, - "node_modules/dotenv-expand": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-5.1.0.tgz", - "integrity": "sha512-YXQl1DSa4/PQyRfgrv6aoNjhasp/p4qs9FjJ4q4cQk+8m4r6k4ZSiEyytKG8f8W9gi8WsQtIObNmKd+tMzNTmA==", - "dev": true - }, - "node_modules/duplexer3": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", - "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=", - "dev": true - }, - "node_modules/ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", - "dependencies": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" - }, - "node_modules/ejs": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/ejs/-/ejs-3.1.6.tgz", - "integrity": "sha512-9lt9Zse4hPucPkoP7FHDF0LQAlGyF9JVpnClFLFH3aSSbxmyoqINRpp/9wePWJTUl4KOQwRL72Iw3InHPDkoGw==", - "dev": true, - "dependencies": { - "jake": "^10.6.1" - }, - "bin": { - "ejs": "bin/cli.js" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/electron": { - "version": "17.1.2", - "resolved": "https://registry.npmjs.org/electron/-/electron-17.1.2.tgz", - "integrity": "sha512-hqKQaUIRWX5Y2eAD8FZINWD/e5TKdpkbBYbkcZmJS4Bd1PKQsaDVc9h5xoA8zZQkPymE9rss+swjRpAFurOPGQ==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "@electron/get": "^1.13.0", - "@types/node": "^14.6.2", - "extract-zip": "^1.0.3" - }, - "bin": { - "electron": "cli.js" - }, - "engines": { - "node": ">= 8.6" - } - }, - "node_modules/electron-builder": { - "version": "22.14.13", - "resolved": "https://registry.npmjs.org/electron-builder/-/electron-builder-22.14.13.tgz", - "integrity": "sha512-3fgLxqF2TXVKiUPeg74O4V3l0l3j7ERLazo8sUbRkApw0+4iVAf2BJkHsHMaXiigsgCoEzK/F4/rB5rne/VAnw==", - "dev": true, - "dependencies": { - "@types/yargs": "^17.0.1", - "app-builder-lib": "22.14.13", - "builder-util": "22.14.13", - "builder-util-runtime": "8.9.2", - "chalk": "^4.1.1", - "dmg-builder": "22.14.13", - "fs-extra": "^10.0.0", - "is-ci": "^3.0.0", - "lazy-val": "^1.0.5", - "read-config-file": "6.2.0", - "update-notifier": "^5.1.0", - "yargs": "^17.0.1" - }, - "bin": { - "electron-builder": "cli.js", - "install-app-deps": "install-app-deps.js" - }, - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/electron-builder/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/electron-builder/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/electron-builder/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/electron-builder/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/electron-builder/node_modules/fs-extra": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.1.tgz", - "integrity": "sha512-NbdoVMZso2Lsrn/QwLXOy6rm0ufY2zEOKCDzJR/0kBsb0E6qed0P3iYK+Ath3BfvXEeu4JhEtXLgILx5psUfag==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/electron-builder/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/electron-builder/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-builder/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/electron-builder/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/electron-notarize": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/electron-notarize/-/electron-notarize-1.1.1.tgz", - "integrity": "sha512-kufsnqh86CTX89AYNG3NCPoboqnku/+32RxeJ2+7A4Rbm4bbOx0Nc7XTy3/gAlBfpj9xPAxHfhZLOHgfi6cJVw==", - "deprecated": "Please use @electron/notarize moving forward. There is no API change, just a package name change", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "fs-extra": "^9.0.1" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/electron-notarize/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/electron-notarize/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/electron-notarize/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-notarize/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/electron-notarize/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/electron-osx-sign": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/electron-osx-sign/-/electron-osx-sign-0.5.0.tgz", - "integrity": "sha512-icoRLHzFz/qxzDh/N4Pi2z4yVHurlsCAYQvsCSG7fCedJ4UJXBS6PoQyGH71IfcqKupcKeK7HX/NkyfG+v6vlQ==", - "deprecated": "Please use @electron/osx-sign moving forward. Be aware the API is slightly different", - "dev": true, - "dependencies": { - "bluebird": "^3.5.0", - "compare-version": "^0.1.2", - "debug": "^2.6.8", - "isbinaryfile": "^3.0.2", - "minimist": "^1.2.0", - "plist": "^3.0.1" - }, - "bin": { - "electron-osx-flat": "bin/electron-osx-flat.js", - "electron-osx-sign": "bin/electron-osx-sign.js" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/electron-osx-sign/node_modules/isbinaryfile": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-3.0.3.tgz", - "integrity": "sha512-8cJBL5tTd2OS0dM4jz07wQd5g0dCCqIhUxPIGtZfa5L6hWlvV5MHTITy/DBAsF+Oe2LS1X3krBUhNwaGUWpWxw==", - "dev": true, - "dependencies": { - "buffer-alloc": "^1.2.0" - }, - "engines": { - "node": ">=0.6.0" - } - }, - "node_modules/electron-packager": { - "version": "15.4.0", - "resolved": "https://registry.npmjs.org/electron-packager/-/electron-packager-15.4.0.tgz", - "integrity": "sha512-JrrLcBP15KGrPj0cZ/ALKGmaQ4gJkn3mocf0E3bRKdR3kxKWYcDRpCvdhksYDXw/r3I6tMEcZ7XzyApWFXdVpw==", - "dev": true, - "dependencies": { - "@electron/get": "^1.6.0", - "asar": "^3.1.0", - "cross-spawn-windows-exe": "^1.2.0", - "debug": "^4.0.1", - "electron-notarize": "^1.1.1", - "electron-osx-sign": "^0.5.0", - "extract-zip": "^2.0.0", - "filenamify": "^4.1.0", - "fs-extra": "^9.0.0", - "galactus": "^0.2.1", - "get-package-info": "^1.0.0", - "junk": "^3.1.0", - "parse-author": "^2.0.0", - "plist": "^3.0.0", - "rcedit": "^3.0.1", - "resolve": "^1.1.6", - "semver": "^7.1.3", - "yargs-parser": "^20.0.0" - }, - "bin": { - "electron-packager": "bin/electron-packager.js" - }, - "engines": { - "node": ">= 10.12.0" - }, - "funding": { - "url": "https://github.com/electron/electron-packager?sponsor=1" - } - }, - "node_modules/electron-packager/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/electron-packager/node_modules/extract-zip": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-2.0.1.tgz", - "integrity": "sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "get-stream": "^5.1.0", - "yauzl": "^2.10.0" - }, - "bin": { - "extract-zip": "cli.js" - }, - "engines": { - "node": ">= 10.17.0" - }, - "optionalDependencies": { - "@types/yauzl": "^2.9.1" - } - }, - "node_modules/electron-packager/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/electron-packager/node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dev": true, - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/electron-packager/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-packager/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/electron-packager/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/electron-packager/node_modules/yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/electron-publish": { - "version": "22.14.13", - "resolved": "https://registry.npmjs.org/electron-publish/-/electron-publish-22.14.13.tgz", - "integrity": "sha512-0oP3QiNj3e8ewOaEpEJV/o6Zrmy2VarVvZ/bH7kyO/S/aJf9x8vQsKVWpsdmSiZ5DJEHgarFIXrnO0ZQf0P9iQ==", - "dev": true, - "dependencies": { - "@types/fs-extra": "^9.0.11", - "builder-util": "22.14.13", - "builder-util-runtime": "8.9.2", - "chalk": "^4.1.1", - "fs-extra": "^10.0.0", - "lazy-val": "^1.0.5", - "mime": "^2.5.2" - } - }, - "node_modules/electron-publish/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/electron-publish/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/electron-publish/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/electron-publish/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/electron-publish/node_modules/fs-extra": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.1.tgz", - "integrity": "sha512-NbdoVMZso2Lsrn/QwLXOy6rm0ufY2zEOKCDzJR/0kBsb0E6qed0P3iYK+Ath3BfvXEeu4JhEtXLgILx5psUfag==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/electron-publish/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/electron-publish/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-publish/node_modules/mime": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz", - "integrity": "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg==", - "dev": true, - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/electron-publish/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/electron-publish/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/electron-to-chromium": { - "version": "1.4.89", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.89.tgz", - "integrity": "sha512-z1Axg0Fu54fse8wN4fd+GAINdU5mJmLtcl6bqIcYyzNVGONcfHAeeJi88KYMQVKalhXlYuVPzKkFIU5VD0raUw==", - "dev": true - }, - "node_modules/electron/node_modules/@types/node": { - "version": "14.18.12", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.12.tgz", - "integrity": "sha512-q4jlIR71hUpWTnGhXWcakgkZeHa3CCjcQcnuzU8M891BAWA2jHiziiWEPEkdS5pFsz7H9HJiy8BrK7tBRNrY7A==", - "dev": true - }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "node_modules/emojis-list": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", - "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", - "dev": true, - "engines": { - "node": ">= 4" - } - }, - "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/encoding": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", - "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", - "dev": true, - "optional": true, - "dependencies": { - "iconv-lite": "^0.6.2" - } - }, - "node_modules/encoding/node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dev": true, - "optional": true, - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dev": true, - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/engine.io": { - "version": "6.1.3", - "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.1.3.tgz", - "integrity": "sha512-rqs60YwkvWTLLnfazqgZqLa/aKo+9cueVfEi/dZ8PyGyaf8TLOxj++4QMIgeG3Gn0AhrWiFXvghsoY9L9h25GA==", - "dev": true, - "dependencies": { - "@types/cookie": "^0.4.1", - "@types/cors": "^2.8.12", - "@types/node": ">=10.0.0", - "accepts": "~1.3.4", - "base64id": "2.0.0", - "cookie": "~0.4.1", - "cors": "~2.8.5", - "debug": "~4.3.1", - "engine.io-parser": "~5.0.3", - "ws": "~8.2.3" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/engine.io-parser": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.0.3.tgz", - "integrity": "sha512-BtQxwF27XUNnSafQLvDi0dQ8s3i6VgzSoQMJacpIcGNrlUdfHSKbgm3jmjCVvQluGzqwujQMPAoMai3oYSTurg==", - "dev": true, - "dependencies": { - "@socket.io/base64-arraybuffer": "~1.0.2" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/engine.io/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/engine.io/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/engine.io/node_modules/ws": { - "version": "8.2.3", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.2.3.tgz", - "integrity": "sha512-wBuoj1BDpC6ZQ1B7DWQBYVLphPWkm8i9Y0/3YdHjHKHiohOJ1ws+3OccDWtH+PoC9DZD5WOTrJvNbWvjS6JWaA==", - "dev": true, - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/enhanced-resolve": { - "version": "5.9.2", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.9.2.tgz", - "integrity": "sha512-GIm3fQfwLJ8YZx2smuHpBKkXC1yOk+OBEmKckVyL0i/ea8mqDEykK3ld5dgH1QYPNyT/lIllxV2LULnxCHaHkA==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.4", - "tapable": "^2.2.0" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/ent": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/ent/-/ent-2.2.0.tgz", - "integrity": "sha1-6WQhkyWiHQX0RGai9obtbOX13R0=", - "dev": true - }, - "node_modules/entities": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", - "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", - "dev": true, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/env-paths": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", - "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/err-code": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz", - "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==", - "dev": true - }, - "node_modules/errno": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.8.tgz", - "integrity": "sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==", - "dev": true, - "optional": true, - "dependencies": { - "prr": "~1.0.1" - }, - "bin": { - "errno": "cli.js" - } - }, - "node_modules/error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dev": true, - "dependencies": { - "is-arrayish": "^0.2.1" - } - }, - "node_modules/es-module-lexer": { - "version": "0.9.3", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-0.9.3.tgz", - "integrity": "sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ==", - "dev": true - }, - "node_modules/es6-error": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz", - "integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==", - "dev": true, - "optional": true - }, - "node_modules/esbuild-android-arm64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-android-arm64/-/esbuild-android-arm64-0.14.22.tgz", - "integrity": "sha512-k1Uu4uC4UOFgrnTj2zuj75EswFSEBK+H6lT70/DdS4mTAOfs2ECv2I9ZYvr3w0WL0T4YItzJdK7fPNxcPw6YmQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-darwin-64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-darwin-64/-/esbuild-darwin-64-0.14.22.tgz", - "integrity": "sha512-d8Ceuo6Vw6HM3fW218FB6jTY6O3r2WNcTAU0SGsBkXZ3k8SDoRLd3Nrc//EqzdgYnzDNMNtrWegK2Qsss4THhw==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-darwin-arm64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-darwin-arm64/-/esbuild-darwin-arm64-0.14.22.tgz", - "integrity": "sha512-YAt9Tj3SkIUkswuzHxkaNlT9+sg0xvzDvE75LlBo4DI++ogSgSmKNR6B4eUhU5EUUepVXcXdRIdqMq9ppeRqfw==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-freebsd-64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-freebsd-64/-/esbuild-freebsd-64-0.14.22.tgz", - "integrity": "sha512-ek1HUv7fkXMy87Qm2G4IRohN+Qux4IcnrDBPZGXNN33KAL0pEJJzdTv0hB/42+DCYWylSrSKxk3KUXfqXOoH4A==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-freebsd-arm64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-freebsd-arm64/-/esbuild-freebsd-arm64-0.14.22.tgz", - "integrity": "sha512-zPh9SzjRvr9FwsouNYTqgqFlsMIW07O8mNXulGeQx6O5ApgGUBZBgtzSlBQXkHi18WjrosYfsvp5nzOKiWzkjQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-linux-32": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-linux-32/-/esbuild-linux-32-0.14.22.tgz", - "integrity": "sha512-SnpveoE4nzjb9t2hqCIzzTWBM0RzcCINDMBB67H6OXIuDa4KqFqaIgmTchNA9pJKOVLVIKd5FYxNiJStli21qg==", - "cpu": [ - "ia32" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-linux-64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-linux-64/-/esbuild-linux-64-0.14.22.tgz", - "integrity": "sha512-Zcl9Wg7gKhOWWNqAjygyqzB+fJa19glgl2JG7GtuxHyL1uEnWlpSMytTLMqtfbmRykIHdab797IOZeKwk5g0zg==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-linux-arm": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-linux-arm/-/esbuild-linux-arm-0.14.22.tgz", - "integrity": "sha512-soPDdbpt/C0XvOOK45p4EFt8HbH5g+0uHs5nUKjHVExfgR7du734kEkXR/mE5zmjrlymk5AA79I0VIvj90WZ4g==", - "cpu": [ - "arm" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-linux-arm64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-linux-arm64/-/esbuild-linux-arm64-0.14.22.tgz", - "integrity": "sha512-8q/FRBJtV5IHnQChO3LHh/Jf7KLrxJ/RCTGdBvlVZhBde+dk3/qS9fFsUy+rs3dEi49aAsyVitTwlKw1SUFm+A==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-linux-mips64le": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-linux-mips64le/-/esbuild-linux-mips64le-0.14.22.tgz", - "integrity": "sha512-SiNDfuRXhGh1JQLLA9JPprBgPVFOsGuQ0yDfSPTNxztmVJd8W2mX++c4FfLpAwxuJe183mLuKf7qKCHQs5ZnBQ==", - "cpu": [ - "mips64el" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-linux-ppc64le": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-linux-ppc64le/-/esbuild-linux-ppc64le-0.14.22.tgz", - "integrity": "sha512-6t/GI9I+3o1EFm2AyN9+TsjdgWCpg2nwniEhjm2qJWtJyJ5VzTXGUU3alCO3evopu8G0hN2Bu1Jhz2YmZD0kng==", - "cpu": [ - "ppc64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-linux-riscv64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-linux-riscv64/-/esbuild-linux-riscv64-0.14.22.tgz", - "integrity": "sha512-AyJHipZKe88sc+tp5layovquw5cvz45QXw5SaDgAq2M911wLHiCvDtf/07oDx8eweCyzYzG5Y39Ih568amMTCQ==", - "cpu": [ - "riscv64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-linux-s390x": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-linux-s390x/-/esbuild-linux-s390x-0.14.22.tgz", - "integrity": "sha512-Sz1NjZewTIXSblQDZWEFZYjOK6p8tV6hrshYdXZ0NHTjWE+lwxpOpWeElUGtEmiPcMT71FiuA9ODplqzzSxkzw==", - "cpu": [ - "s390x" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-netbsd-64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-netbsd-64/-/esbuild-netbsd-64-0.14.22.tgz", - "integrity": "sha512-TBbCtx+k32xydImsHxvFgsOCuFqCTGIxhzRNbgSL1Z2CKhzxwT92kQMhxort9N/fZM2CkRCPPs5wzQSamtzEHA==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-openbsd-64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-openbsd-64/-/esbuild-openbsd-64-0.14.22.tgz", - "integrity": "sha512-vK912As725haT313ANZZZN+0EysEEQXWC/+YE4rQvOQzLuxAQc2tjbzlAFREx3C8+uMuZj/q7E5gyVB7TzpcTA==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-sunos-64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-sunos-64/-/esbuild-sunos-64-0.14.22.tgz", - "integrity": "sha512-/mbJdXTW7MTcsPhtfDsDyPEOju9EOABvCjeUU2OJ7fWpX/Em/H3WYDa86tzLUbcVg++BScQDzqV/7RYw5XNY0g==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-wasm": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-wasm/-/esbuild-wasm-0.14.22.tgz", - "integrity": "sha512-FOSAM29GN1fWusw0oLMv6JYhoheDIh5+atC72TkJKfIUMID6yISlicoQSd9gsNSFsNBvABvtE2jR4JB1j4FkFw==", - "dev": true, - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-windows-32": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-windows-32/-/esbuild-windows-32-0.14.22.tgz", - "integrity": "sha512-1vRIkuvPTjeSVK3diVrnMLSbkuE36jxA+8zGLUOrT4bb7E/JZvDRhvtbWXWaveUc/7LbhaNFhHNvfPuSw2QOQg==", - "cpu": [ - "ia32" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-windows-64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-windows-64/-/esbuild-windows-64-0.14.22.tgz", - "integrity": "sha512-AxjIDcOmx17vr31C5hp20HIwz1MymtMjKqX4qL6whPj0dT9lwxPexmLj6G1CpR3vFhui6m75EnBEe4QL82SYqw==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/esbuild-windows-arm64": { - "version": "0.14.22", - "resolved": "https://registry.npmjs.org/esbuild-windows-arm64/-/esbuild-windows-arm64-0.14.22.tgz", - "integrity": "sha512-5wvQ+39tHmRhNpu2Fx04l7QfeK3mQ9tKzDqqGR8n/4WUxsFxnVLfDRBGirIfk4AfWlxk60kqirlODPoT5LqMUg==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-goat": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", - "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" - }, - "node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", - "dev": true, - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "dev": true, - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dev": true, - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esrecurse/node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "dev": true, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/eventemitter-asyncresource": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/eventemitter-asyncresource/-/eventemitter-asyncresource-1.0.0.tgz", - "integrity": "sha512-39F7TBIV0G7gTelxwbEqnwhp90eqCPON1k0NwNfwhgKn4Co4ybUbj2pECcXT0B3ztRKZ7Pw1JujUUgmQJHcVAQ==", - "dev": true - }, - "node_modules/eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", - "dev": true - }, - "node_modules/events": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", - "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", - "dev": true, - "engines": { - "node": ">=0.8.x" - } - }, - "node_modules/execa": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", - "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", - "dev": true, - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.0", - "human-signals": "^2.1.0", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.1", - "onetime": "^5.1.2", - "signal-exit": "^3.0.3", - "strip-final-newline": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/express": { - "version": "4.17.3", - "resolved": "https://registry.npmjs.org/express/-/express-4.17.3.tgz", - "integrity": "sha512-yuSQpz5I+Ch7gFrPCk4/c+dIBKlQUxtgwqzph132bsT6qhuzss6I8cLJQz7B3rFblzd6wtcI0ZbGltH/C4LjUg==", - "dependencies": { - "accepts": "~1.3.8", - "array-flatten": "1.1.1", - "body-parser": "1.19.2", - "content-disposition": "0.5.4", - "content-type": "~1.0.4", - "cookie": "0.4.2", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "~1.1.2", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "~1.1.2", - "fresh": "0.5.2", - "merge-descriptors": "1.0.1", - "methods": "~1.1.2", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.7", - "qs": "6.9.7", - "range-parser": "~1.2.1", - "safe-buffer": "5.2.1", - "send": "0.17.2", - "serve-static": "1.14.2", - "setprototypeof": "1.2.0", - "statuses": "~1.5.0", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "node_modules/external-editor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", - "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", - "dev": true, - "dependencies": { - "chardet": "^0.7.0", - "iconv-lite": "^0.4.24", - "tmp": "^0.0.33" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/extract-zip": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-1.7.0.tgz", - "integrity": "sha512-xoh5G1W/PB0/27lXgMQyIhP5DSY/LhoCsOyZgb+6iMmRtCwVBo55uKaMoEYrDCKQhWvqEip5ZPKAc6eFNyf/MA==", - "dev": true, - "dependencies": { - "concat-stream": "^1.6.2", - "debug": "^2.6.9", - "mkdirp": "^0.5.4", - "yauzl": "^2.10.0" - }, - "bin": { - "extract-zip": "cli.js" - } - }, - "node_modules/extract-zip/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dev": true, - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", - "engines": [ - "node >=0.6.0" - ] - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" - }, - "node_modules/fast-glob": { - "version": "3.2.11", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.11.tgz", - "integrity": "sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew==", - "dev": true, - "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" - }, - "engines": { - "node": ">=8.6.0" - } - }, - "node_modules/fast-glob/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" - }, - "node_modules/fastq": { - "version": "1.13.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.13.0.tgz", - "integrity": "sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==", - "dev": true, - "dependencies": { - "reusify": "^1.0.4" - } - }, - "node_modules/faye-websocket": { - "version": "0.11.4", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", - "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", - "dev": true, - "dependencies": { - "websocket-driver": ">=0.5.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/fd-slicer": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", - "integrity": "sha1-JcfInLH5B3+IkbvmHY85Dq4lbx4=", - "dev": true, - "dependencies": { - "pend": "~1.2.0" - } - }, - "node_modules/figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "dev": true, - "dependencies": { - "escape-string-regexp": "^1.0.5" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/filelist": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/filelist/-/filelist-1.0.2.tgz", - "integrity": "sha512-z7O0IS8Plc39rTCq6i6iHxk43duYOn8uFJiWSewIq0Bww1RNybVHSCjahmcC87ZqAm4OTvFzlzeGu3XAzG1ctQ==", - "dev": true, - "dependencies": { - "minimatch": "^3.0.4" - } - }, - "node_modules/filename-reserved-regex": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/filename-reserved-regex/-/filename-reserved-regex-2.0.0.tgz", - "integrity": "sha1-q/c9+rc10EVECr/qLZHzieu/oik=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/filenamify": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/filenamify/-/filenamify-4.3.0.tgz", - "integrity": "sha512-hcFKyUG57yWGAzu1CMt/dPzYZuv+jAJUT85bL8mrXvNe6hWj6yEHEc4EdcgiA6Z3oi1/9wXJdZPXF2dZNgwgOg==", - "dev": true, - "dependencies": { - "filename-reserved-regex": "^2.0.0", - "strip-outer": "^1.0.1", - "trim-repeated": "^1.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/finalhandler": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz", - "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==", - "dependencies": { - "debug": "2.6.9", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "statuses": "~1.5.0", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/find-cache-dir": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz", - "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==", - "dev": true, - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^3.0.2", - "pkg-dir": "^4.1.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/avajs/find-cache-dir?sponsor=1" - } - }, - "node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dev": true, - "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/flatted": { - "version": "3.2.5", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.5.tgz", - "integrity": "sha512-WIWGi2L3DyTUvUrwRKgGi9TwxQMUEqPOPQBVi71R96jZXJdFskXEmf54BoZaS1kknGODoIGASGEzBUYdyMCBJg==", - "dev": true - }, - "node_modules/flora-colossus": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/flora-colossus/-/flora-colossus-1.0.1.tgz", - "integrity": "sha512-d+9na7t9FyH8gBJoNDSi28mE4NgQVGGvxQ4aHtFRetjyh5SXjuus+V5EZaxFmFdXVemSOrx0lsgEl/ZMjnOWJA==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "fs-extra": "^7.0.0" - }, - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/flora-colossus/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/flora-colossus/node_modules/fs-extra": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", - "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.1.2", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - }, - "engines": { - "node": ">=6 <7 || >=8" - } - }, - "node_modules/flora-colossus/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/follow-redirects": { - "version": "1.14.9", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.9.tgz", - "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==", - "dev": true, - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", - "engines": { - "node": "*" - } - }, - "node_modules/form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 0.12" - } - }, - "node_modules/forwarded": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", - "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fraction.js": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz", - "integrity": "sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA==", - "dev": true, - "engines": { - "node": "*" - }, - "funding": { - "type": "patreon", - "url": "https://www.patreon.com/infusion" - } - }, - "node_modules/fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fs-extra": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", - "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - }, - "engines": { - "node": ">=6 <7 || >=8" - } - }, - "node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "dev": true, - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/fs-monkey": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.3.tgz", - "integrity": "sha512-cybjIfiiE+pTWicSCLFHSrXZ6EilF30oh91FDP9S2B051prEa7QWfrVTQm10/dDpswBDXZugPa1Ogu8Yh+HV0Q==", - "dev": true - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", - "dev": true - }, - "node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "dev": true, - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true - }, - "node_modules/galactus": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/galactus/-/galactus-0.2.1.tgz", - "integrity": "sha1-y+0tIKQMH1Z5o1kI4rlBVzPnjbk=", - "dev": true, - "dependencies": { - "debug": "^3.1.0", - "flora-colossus": "^1.0.0", - "fs-extra": "^4.0.0" - } - }, - "node_modules/galactus/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/galactus/node_modules/fs-extra": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-4.0.3.tgz", - "integrity": "sha512-q6rbdDd1o2mAnQreO7YADIxf/Whx4AHBiRf6d+/cVT8h44ss+lHgxf1FemcqDnQt9X3ct4McHr+JMGlYSsK7Cg==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.1.2", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - } - }, - "node_modules/galactus/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true - }, - "node_modules/gauge": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/gauge/-/gauge-4.0.3.tgz", - "integrity": "sha512-ICw1DhAwMtb22rYFwEHgJcx1JCwJGv3x6G0OQUq56Nge+H4Q8JEwr8iveS0XFlsUNSI67F5ffMGK25bK4Pmskw==", - "dev": true, - "dependencies": { - "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.3", - "console-control-strings": "^1.1.0", - "has-unicode": "^2.0.1", - "signal-exit": "^3.0.7", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "wide-align": "^1.1.5" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16" - } - }, - "node_modules/gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true, - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/get-intrinsic": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", - "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", - "dev": true, - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-package-info": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/get-package-info/-/get-package-info-1.0.0.tgz", - "integrity": "sha1-ZDJ5ZWPigRPNlHTbvQAFKYWkmZw=", - "dev": true, - "dependencies": { - "bluebird": "^3.1.1", - "debug": "^2.2.0", - "lodash.get": "^4.0.0", - "read-pkg-up": "^2.0.0" - }, - "engines": { - "node": ">= 4.0" - } - }, - "node_modules/get-package-type": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", - "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", - "dev": true, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/get-stream": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", - "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", - "dependencies": { - "assert-plus": "^1.0.0" - } - }, - "node_modules/glob": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", - "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", - "dev": true, - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dev": true, - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/glob-to-regexp": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", - "dev": true - }, - "node_modules/global-agent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-agent/-/global-agent-3.0.0.tgz", - "integrity": "sha512-PT6XReJ+D07JvGoxQMkT6qji/jVNfX/h364XHZOWeRzy64sSFr+xJ5OX7LI3b4MPQzdL4H8Y8M0xzPpsVMwA8Q==", - "dev": true, - "optional": true, - "dependencies": { - "boolean": "^3.0.1", - "es6-error": "^4.1.1", - "matcher": "^3.0.0", - "roarr": "^2.15.3", - "semver": "^7.3.2", - "serialize-error": "^7.0.1" - }, - "engines": { - "node": ">=10.0" - } - }, - "node_modules/global-dirs": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.0.tgz", - "integrity": "sha512-v8ho2DS5RiCjftj1nD9NmnfaOzTdud7RRnVd9kFNOjqZbISlx5DQ+OrTkywgd0dIt7oFCvKetZSHoHcP3sDdiA==", - "dev": true, - "dependencies": { - "ini": "2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/global-tunnel-ng": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/global-tunnel-ng/-/global-tunnel-ng-2.7.1.tgz", - "integrity": "sha512-4s+DyciWBV0eK148wqXxcmVAbFVPqtc3sEtUE/GTQfuU80rySLcMhUmHKSHI7/LDj8q0gDYI1lIhRRB7ieRAqg==", - "dev": true, - "optional": true, - "dependencies": { - "encodeurl": "^1.0.2", - "lodash": "^4.17.10", - "npm-conf": "^1.1.3", - "tunnel": "^0.0.6" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/globalthis": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.2.tgz", - "integrity": "sha512-ZQnSFO1la8P7auIOQECnm0sSuoMeaSq0EEdXMBFF2QJO4uNcwbyhSgG3MruWNbFTqCLmxVwGOl7LZ9kASvHdeQ==", - "dev": true, - "optional": true, - "dependencies": { - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/globby": { - "version": "12.2.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-12.2.0.tgz", - "integrity": "sha512-wiSuFQLZ+urS9x2gGPl1H5drc5twabmm4m2gTR27XDFyjUHJUNsS8o/2aKyIF6IoBaR630atdher0XJ5g6OMmA==", - "dev": true, - "dependencies": { - "array-union": "^3.0.1", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.7", - "ignore": "^5.1.9", - "merge2": "^1.4.1", - "slash": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/got": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", - "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", - "dev": true, - "dependencies": { - "@sindresorhus/is": "^0.14.0", - "@szmarczak/http-timer": "^1.1.2", - "cacheable-request": "^6.0.0", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^4.1.0", - "lowercase-keys": "^1.0.1", - "mimic-response": "^1.0.1", - "p-cancelable": "^1.0.0", - "to-readable-stream": "^1.0.0", - "url-parse-lax": "^3.0.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/got/node_modules/get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "dev": true, - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.9", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz", - "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==", - "dev": true - }, - "node_modules/graceful-readlink": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/graceful-readlink/-/graceful-readlink-1.0.1.tgz", - "integrity": "sha1-TK+tdrxi8C+gObL5Tpo906ORpyU=", - "dev": true - }, - "node_modules/handle-thing": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", - "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==", - "dev": true - }, - "node_modules/har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", - "engines": { - "node": ">=4" - } - }, - "node_modules/har-validator": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", - "deprecated": "this library is no longer supported", - "dependencies": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "dev": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-tostringtag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", - "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", - "dev": true, - "dependencies": { - "has-symbols": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-unicode": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", - "integrity": "sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk=", - "dev": true - }, - "node_modules/has-yarn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", - "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/hdr-histogram-js": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/hdr-histogram-js/-/hdr-histogram-js-2.0.3.tgz", - "integrity": "sha512-Hkn78wwzWHNCp2uarhzQ2SGFLU3JY8SBDDd3TAABK4fc30wm+MuPOrg5QVFVfkKOQd6Bfz3ukJEI+q9sXEkK1g==", - "dev": true, - "dependencies": { - "@assemblyscript/loader": "^0.10.1", - "base64-js": "^1.2.0", - "pako": "^1.0.3" - } - }, - "node_modules/hdr-histogram-percentiles-obj": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/hdr-histogram-percentiles-obj/-/hdr-histogram-percentiles-obj-3.0.0.tgz", - "integrity": "sha512-7kIufnBqdsBGcSZLPJwqHT3yhk1QTsSlFsVD3kx5ixH/AlgBs9yM1q6DPhXZ8f8gtdqgh7N7/5btRLpQsS2gHw==", - "dev": true - }, - "node_modules/hosted-git-info": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz", - "integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==", - "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/hpack.js": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", - "integrity": "sha1-h3dMCUnlE/QuhFdbPEVoH63ioLI=", - "dev": true, - "dependencies": { - "inherits": "^2.0.1", - "obuf": "^1.0.0", - "readable-stream": "^2.0.1", - "wbuf": "^1.1.0" - } - }, - "node_modules/hpack.js/node_modules/readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dev": true, - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/hpack.js/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - }, - "node_modules/hpack.js/node_modules/string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dev": true, - "dependencies": { - "safe-buffer": "~5.1.0" - } - }, - "node_modules/html-entities": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.3.2.tgz", - "integrity": "sha512-c3Ab/url5ksaT0WyleslpBEthOzWhrjQbg75y7XUsfSzi3Dgzt0l8w5e7DylRn15MTlMMD58dTfzddNS2kcAjQ==", - "dev": true - }, - "node_modules/html-escaper": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", - "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", - "dev": true - }, - "node_modules/http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==", - "dev": true - }, - "node_modules/http-deceiver": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", - "integrity": "sha1-+nFolEq5pRnTN8sL7HKE3D5yPYc=", - "dev": true - }, - "node_modules/http-errors": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.8.1.tgz", - "integrity": "sha512-Kpk9Sm7NmI+RHhnj6OIWDI1d6fIoFAtFt9RLaTMRlg/8w49juAStsrBgp0Dp4OdxdVbRIeKhtCUvoi/RuAhO4g==", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": ">= 1.5.0 < 2", - "toidentifier": "1.0.1" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/http-parser-js": { - "version": "0.5.6", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.6.tgz", - "integrity": "sha512-vDlkRPDJn93swjcjqMSaGSPABbIarsr1TLAui/gLDXzV5VsJNdXNzMYDyNBLQkjWQCJ1uizu8T2oDMhmGt0PRA==", - "dev": true - }, - "node_modules/http-proxy": { - "version": "1.18.1", - "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", - "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", - "dev": true, - "dependencies": { - "eventemitter3": "^4.0.0", - "follow-redirects": "^1.0.0", - "requires-port": "^1.0.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/http-proxy-agent": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", - "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==", - "dev": true, - "dependencies": { - "@tootallnate/once": "1", - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/http-proxy-agent/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/http-proxy-agent/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/http-proxy-middleware": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.4.tgz", - "integrity": "sha512-m/4FxX17SUvz4lJ5WPXOHDUuCwIqXLfLHs1s0uZ3oYjhoXlx9csYxaOa0ElDEJ+h8Q4iJ1s+lTMbiCa4EXIJqg==", - "dev": true, - "dependencies": { - "@types/http-proxy": "^1.17.8", - "http-proxy": "^1.18.1", - "is-glob": "^4.0.1", - "is-plain-obj": "^3.0.0", - "micromatch": "^4.0.2" - }, - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "@types/express": "^4.17.13" - }, - "peerDependenciesMeta": { - "@types/express": { - "optional": true - } - } - }, - "node_modules/http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", - "dependencies": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - }, - "engines": { - "node": ">=0.8", - "npm": ">=1.3.7" - } - }, - "node_modules/https-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz", - "integrity": "sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA==", - "dev": true, - "dependencies": { - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/https-proxy-agent/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/https-proxy-agent/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/human-signals": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", - "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", - "dev": true, - "engines": { - "node": ">=10.17.0" - } - }, - "node_modules/humanize-ms": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", - "integrity": "sha1-xG4xWaKT9riW2ikxbYtv6Lt5u+0=", - "dev": true, - "dependencies": { - "ms": "^2.0.0" - } - }, - "node_modules/iconv-corefoundation": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/iconv-corefoundation/-/iconv-corefoundation-1.1.7.tgz", - "integrity": "sha512-T10qvkw0zz4wnm560lOEg0PovVqUXuOFhhHAkixw8/sycy7TJt7v/RrkEKEQnAw2viPSJu6iAkErxnzR0g8PpQ==", - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "dependencies": { - "cli-truncate": "^2.1.0", - "node-addon-api": "^1.6.3" - }, - "engines": { - "node": "^8.11.2 || >=10" - } - }, - "node_modules/iconv-corefoundation/node_modules/node-addon-api": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-1.7.2.tgz", - "integrity": "sha512-ibPK3iA+vaY1eEjESkQkM0BbCqFOaZMiXRTtdB0u7b4djtY6JnsjvPdUHVMg6xQt3B8fpTTWHI9A+ADjM9frzg==", - "dev": true, - "optional": true - }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/icss-utils": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", - "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", - "dev": true, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/ignore": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.0.tgz", - "integrity": "sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==", - "dev": true, - "engines": { - "node": ">= 4" - } - }, - "node_modules/ignore-by-default": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/ignore-by-default/-/ignore-by-default-1.0.1.tgz", - "integrity": "sha1-SMptcvbGo68Aqa1K5odr44ieKwk=", - "dev": true - }, - "node_modules/ignore-walk": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/ignore-walk/-/ignore-walk-4.0.1.tgz", - "integrity": "sha512-rzDQLaW4jQbh2YrOFlJdCtX8qgJTehFRYiUB2r1osqTeDzV/3+Jh8fz1oAPzUThf3iku8Ds4IDqawI5d8mUiQw==", - "dev": true, - "dependencies": { - "minimatch": "^3.0.4" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/image-size": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/image-size/-/image-size-0.5.5.tgz", - "integrity": "sha1-Cd/Uq50g4p6xw+gLiZA3jfnjy5w=", - "dev": true, - "optional": true, - "bin": { - "image-size": "bin/image-size.js" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/immutable": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.0.0.tgz", - "integrity": "sha512-zIE9hX70qew5qTUjSS7wi1iwj/l7+m54KWU247nhM3v806UdGj1yDndXj+IOYxxtW9zyLI+xqFNZjTuDaLUqFw==", - "dev": true - }, - "node_modules/import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", - "dev": true, - "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/import-fresh/node_modules/resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/import-lazy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz", - "integrity": "sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", - "dev": true, - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/infer-owner": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", - "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==", - "dev": true - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "dev": true, - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "node_modules/ini": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", - "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/inquirer": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.0.tgz", - "integrity": "sha512-0crLweprevJ02tTuA6ThpoAERAGyVILC4sS74uib58Xf/zSr1/ZWtmm7D5CI+bSQEaA04f0K7idaHpQbSWgiVQ==", - "dev": true, - "dependencies": { - "ansi-escapes": "^4.2.1", - "chalk": "^4.1.1", - "cli-cursor": "^3.1.0", - "cli-width": "^3.0.0", - "external-editor": "^3.0.3", - "figures": "^3.0.0", - "lodash": "^4.17.21", - "mute-stream": "0.0.8", - "ora": "^5.4.1", - "run-async": "^2.4.0", - "rxjs": "^7.2.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0", - "through": "^2.3.6" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/inquirer/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/inquirer/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/inquirer/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/inquirer/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/inquirer/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/inquirer/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ip": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz", - "integrity": "sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo=", - "dev": true - }, - "node_modules/ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/is-arguments": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", - "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", - "dev": true - }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "dev": true, - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-ci": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", - "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", - "dev": true, - "dependencies": { - "ci-info": "^3.2.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-core-module": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.8.1.tgz", - "integrity": "sha512-SdNCUs284hr40hFTFP6l0IfZ/RSrMXF3qgoRHd3/79unUTvrFO/JoXwkGm+5J/Oe3E/b5GsnG330uUNgRpu1PA==", - "dev": true, - "dependencies": { - "has": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-date-object": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", - "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", - "dev": true, - "dependencies": { - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "dev": true, - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "dev": true, - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-installed-globally": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", - "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", - "dev": true, - "dependencies": { - "global-dirs": "^3.0.0", - "is-path-inside": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-interactive": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", - "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-lambda": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-lambda/-/is-lambda-1.0.1.tgz", - "integrity": "sha1-PZh3iZ5qU+/AFgUEzeFfgubwYdU=", - "dev": true - }, - "node_modules/is-npm": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-5.0.0.tgz", - "integrity": "sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-path-cwd": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", - "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-plain-obj": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", - "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-plain-object": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", - "dev": true, - "dependencies": { - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-regex": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", - "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-stream": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", - "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", - "dev": true, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" - }, - "node_modules/is-unicode-supported": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", - "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-what": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/is-what/-/is-what-3.14.1.tgz", - "integrity": "sha512-sNxgpk9793nzSs7bA6JQJGeIuRBQhAaNGG77kzYQgMkrID+lS6SlK07K5LaptscDlSaIgH+GPFzf+d75FVxozA==", - "dev": true - }, - "node_modules/is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "dev": true, - "dependencies": { - "is-docker": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-yarn-global": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz", - "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==", - "dev": true - }, - "node_modules/isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", - "dev": true - }, - "node_modules/isbinaryfile": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-4.0.8.tgz", - "integrity": "sha512-53h6XFniq77YdW+spoRrebh0mnmTxRPTlcuIArO57lmMdq4uBKFKaeTjnb92oYWrSn/LVL+LT+Hap2tFQj8V+w==", - "dev": true, - "engines": { - "node": ">= 8.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/gjtorikian/" - } - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", - "dev": true - }, - "node_modules/isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" - }, - "node_modules/istanbul-lib-coverage": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz", - "integrity": "sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/istanbul-lib-instrument": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.1.0.tgz", - "integrity": "sha512-czwUz525rkOFDJxfKK6mYfIs9zBKILyrZQxjz3ABhjQXhbhFsSbo1HW/BFcsDnfJYJWA6thRR5/TUY2qs5W99Q==", - "dev": true, - "dependencies": { - "@babel/core": "^7.12.3", - "@babel/parser": "^7.14.7", - "@istanbuljs/schema": "^0.1.2", - "istanbul-lib-coverage": "^3.2.0", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/istanbul-lib-instrument/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/istanbul-lib-report": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", - "integrity": "sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==", - "dev": true, - "dependencies": { - "istanbul-lib-coverage": "^3.0.0", - "make-dir": "^3.0.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/istanbul-lib-report/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/istanbul-lib-report/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/istanbul-lib-source-maps": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", - "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "istanbul-lib-coverage": "^3.0.0", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/istanbul-lib-source-maps/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/istanbul-lib-source-maps/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/istanbul-lib-source-maps/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/istanbul-reports": { - "version": "3.1.4", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.4.tgz", - "integrity": "sha512-r1/DshN4KSE7xWEknZLLLLDn5CJybV3nw01VTkp6D5jzLuELlcbudfj/eSQFvrKsJuTVCGnePO7ho82Nw9zzfw==", - "dev": true, - "dependencies": { - "html-escaper": "^2.0.0", - "istanbul-lib-report": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/jake": { - "version": "10.8.4", - "resolved": "https://registry.npmjs.org/jake/-/jake-10.8.4.tgz", - "integrity": "sha512-MtWeTkl1qGsWUtbl/Jsca/8xSoK3x0UmS82sNbjqxxG/de/M/3b1DntdjHgPMC50enlTNwXOCRqPXLLt5cCfZA==", - "dev": true, - "dependencies": { - "async": "0.9.x", - "chalk": "^4.0.2", - "filelist": "^1.0.1", - "minimatch": "^3.0.4" - }, - "bin": { - "jake": "bin/cli.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/jake/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/jake/node_modules/async": { - "version": "0.9.2", - "resolved": "https://registry.npmjs.org/async/-/async-0.9.2.tgz", - "integrity": "sha1-rqdNXmHB+JlhO/ZL2mbUx48v0X0=", - "dev": true - }, - "node_modules/jake/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/jake/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/jake/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/jake/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/jake/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/jasmine-core": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/jasmine-core/-/jasmine-core-4.0.1.tgz", - "integrity": "sha512-w+JDABxQCkxbGGxg+a2hUVZyqUS2JKngvIyLGu/xiw2ZwgsoSB0iiecLQsQORSeaKQ6iGrCyWG86RfNDuoA7Lg==", - "dev": true - }, - "node_modules/jest-worker": { - "version": "27.5.1", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", - "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", - "dev": true, - "dependencies": { - "@types/node": "*", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": ">= 10.13.0" - } - }, - "node_modules/jest-worker/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/jest-worker/node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true - }, - "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" - }, - "node_modules/jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", - "dev": true, - "bin": { - "jsesc": "bin/jsesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=", - "dev": true - }, - "node_modules/json-parse-better-errors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", - "dev": true - }, - "node_modules/json-parse-even-better-errors": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", - "dev": true - }, - "node_modules/json-schema": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", - "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" - }, - "node_modules/json5": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.1.tgz", - "integrity": "sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==", - "dev": true, - "bin": { - "json5": "lib/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/jsonc-parser": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.0.0.tgz", - "integrity": "sha512-fQzRfAbIBnR0IQvftw9FJveWiHp72Fg20giDrHz6TdfB12UH/uue0D3hm57UB5KgAVuniLMCaS8P1IMj9NR7cA==", - "dev": true - }, - "node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", - "dev": true, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/jsonparse": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz", - "integrity": "sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=", - "dev": true, - "engines": [ - "node >= 0.2.0" - ] - }, - "node_modules/jsprim": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", - "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", - "dependencies": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.4.0", - "verror": "1.10.0" - }, - "engines": { - "node": ">=0.6.0" - } - }, - "node_modules/junk": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/junk/-/junk-3.1.0.tgz", - "integrity": "sha512-pBxcB3LFc8QVgdggvZWyeys+hnrNWg4OcZIU/1X59k5jQdLBlCsYGRQaz234SqoRLTCgMH00fY0xRJH+F9METQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/karma": { - "version": "6.3.17", - "resolved": "https://registry.npmjs.org/karma/-/karma-6.3.17.tgz", - "integrity": "sha512-2TfjHwrRExC8yHoWlPBULyaLwAFmXmxQrcuFImt/JsAsSZu1uOWTZ1ZsWjqQtWpHLiatJOHL5jFjXSJIgCd01g==", - "dev": true, - "dependencies": { - "@colors/colors": "1.5.0", - "body-parser": "^1.19.0", - "braces": "^3.0.2", - "chokidar": "^3.5.1", - "connect": "^3.7.0", - "di": "^0.0.1", - "dom-serialize": "^2.2.1", - "glob": "^7.1.7", - "graceful-fs": "^4.2.6", - "http-proxy": "^1.18.1", - "isbinaryfile": "^4.0.8", - "lodash": "^4.17.21", - "log4js": "^6.4.1", - "mime": "^2.5.2", - "minimatch": "^3.0.4", - "mkdirp": "^0.5.5", - "qjobs": "^1.2.0", - "range-parser": "^1.2.1", - "rimraf": "^3.0.2", - "socket.io": "^4.2.0", - "source-map": "^0.6.1", - "tmp": "^0.2.1", - "ua-parser-js": "^0.7.30", - "yargs": "^16.1.1" - }, - "bin": { - "karma": "bin/karma" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/karma-chrome-launcher": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/karma-chrome-launcher/-/karma-chrome-launcher-3.1.1.tgz", - "integrity": "sha512-hsIglcq1vtboGPAN+DGCISCFOxW+ZVnIqhDQcCMqqCp+4dmJ0Qpq5QAjkbA0X2L9Mi6OBkHi2Srrbmm7pUKkzQ==", - "dev": true, - "dependencies": { - "which": "^1.2.1" - } - }, - "node_modules/karma-chrome-launcher/node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "dev": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "which": "bin/which" - } - }, - "node_modules/karma-coverage": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/karma-coverage/-/karma-coverage-2.1.1.tgz", - "integrity": "sha512-oxeOSBVK/jdZsiX03LhHQkO4eISSQb5GbHi6Nsw3Mw7G4u6yUgacBAftnO7q+emPBLMsrNbz1pGIrj+Jb3z17A==", - "dev": true, - "dependencies": { - "istanbul-lib-coverage": "^3.2.0", - "istanbul-lib-instrument": "^4.0.3", - "istanbul-lib-report": "^3.0.0", - "istanbul-lib-source-maps": "^4.0.1", - "istanbul-reports": "^3.0.5", - "minimatch": "^3.0.4" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/karma-coverage/node_modules/istanbul-lib-instrument": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-4.0.3.tgz", - "integrity": "sha512-BXgQl9kf4WTCPCCpmFGoJkz/+uhvm7h7PFKUYxh7qarQd3ER33vHG//qaE8eN25l07YqZPpHXU9I09l/RD5aGQ==", - "dev": true, - "dependencies": { - "@babel/core": "^7.7.5", - "@istanbuljs/schema": "^0.1.2", - "istanbul-lib-coverage": "^3.0.0", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/karma-coverage/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/karma-jasmine": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/karma-jasmine/-/karma-jasmine-4.0.1.tgz", - "integrity": "sha512-h8XDAhTiZjJKzfkoO1laMH+zfNlra+dEQHUAjpn5JV1zCPtOIVWGQjLBrqhnzQa/hrU2XrZwSyBa6XjEBzfXzw==", - "dev": true, - "dependencies": { - "jasmine-core": "^3.6.0" - }, - "engines": { - "node": ">= 10" - }, - "peerDependencies": { - "karma": "*" - } - }, - "node_modules/karma-jasmine-html-reporter": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/karma-jasmine-html-reporter/-/karma-jasmine-html-reporter-1.7.0.tgz", - "integrity": "sha512-pzum1TL7j90DTE86eFt48/s12hqwQuiD+e5aXx2Dc9wDEn2LfGq6RoAxEZZjFiN0RDSCOnosEKRZWxbQ+iMpQQ==", - "dev": true, - "peerDependencies": { - "jasmine-core": ">=3.8", - "karma": ">=0.9", - "karma-jasmine": ">=1.1" - } - }, - "node_modules/karma-jasmine/node_modules/jasmine-core": { - "version": "3.99.1", - "resolved": "https://registry.npmjs.org/jasmine-core/-/jasmine-core-3.99.1.tgz", - "integrity": "sha512-Hu1dmuoGcZ7AfyynN3LsfruwMbxMALMka+YtZeGoLuDEySVmVAPaonkNoBRIw/ectu8b9tVQCJNgp4a4knp+tg==", - "dev": true - }, - "node_modules/karma-source-map-support": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/karma-source-map-support/-/karma-source-map-support-1.4.0.tgz", - "integrity": "sha512-RsBECncGO17KAoJCYXjv+ckIz+Ii9NCi+9enk+rq6XC81ezYkb4/RHE6CTXdA7IOJqoF3wcaLfVG0CPmE5ca6A==", - "dev": true, - "dependencies": { - "source-map-support": "^0.5.5" - } - }, - "node_modules/karma/node_modules/mime": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz", - "integrity": "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg==", - "dev": true, - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/karma/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dev": true, - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/karma/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/karma/node_modules/tmp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", - "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", - "dev": true, - "dependencies": { - "rimraf": "^3.0.0" - }, - "engines": { - "node": ">=8.17.0" - } - }, - "node_modules/karma/node_modules/yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "dev": true, - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/karma/node_modules/yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/keyv": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", - "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", - "dev": true, - "dependencies": { - "json-buffer": "3.0.0" - } - }, - "node_modules/kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/klona": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.5.tgz", - "integrity": "sha512-pJiBpiXMbt7dkzXe8Ghj/u4FfXOOa98fPW+bihOJ4SjnoijweJrNThJfd3ifXpXhREjpoF2mZVH1GfS9LV3kHQ==", - "dev": true, - "engines": { - "node": ">= 8" - } - }, - "node_modules/latest-version": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", - "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==", - "dev": true, - "dependencies": { - "package-json": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/lazy-val": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/lazy-val/-/lazy-val-1.0.5.tgz", - "integrity": "sha512-0/BnGCCfyUMkBpeDgWihanIAF9JmZhHBgUhEqzvf+adhNGLoP6TaiI5oF8oyb3I45P+PcnrqihSf01M0l0G5+Q==", - "dev": true - }, - "node_modules/less": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/less/-/less-4.1.2.tgz", - "integrity": "sha512-EoQp/Et7OSOVu0aJknJOtlXZsnr8XE8KwuzTHOLeVSEx8pVWUICc8Q0VYRHgzyjX78nMEyC/oztWFbgyhtNfDA==", - "dev": true, - "dependencies": { - "copy-anything": "^2.0.1", - "parse-node-version": "^1.0.1", - "tslib": "^2.3.0" - }, - "bin": { - "lessc": "bin/lessc" - }, - "engines": { - "node": ">=6" - }, - "optionalDependencies": { - "errno": "^0.1.1", - "graceful-fs": "^4.1.2", - "image-size": "~0.5.0", - "make-dir": "^2.1.0", - "mime": "^1.4.1", - "needle": "^2.5.2", - "source-map": "~0.6.0" - } - }, - "node_modules/less-loader": { - "version": "10.2.0", - "resolved": "https://registry.npmjs.org/less-loader/-/less-loader-10.2.0.tgz", - "integrity": "sha512-AV5KHWvCezW27GT90WATaDnfXBv99llDbtaj4bshq6DvAihMdNjaPDcUMa6EXKLRF+P2opFenJp89BXg91XLYg==", - "dev": true, - "dependencies": { - "klona": "^2.0.4" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "less": "^3.5.0 || ^4.0.0", - "webpack": "^5.0.0" - } - }, - "node_modules/less/node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "optional": true, - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/less/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true, - "optional": true, - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/less/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "optional": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/license-webpack-plugin": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/license-webpack-plugin/-/license-webpack-plugin-4.0.2.tgz", - "integrity": "sha512-771TFWFD70G1wLTC4oU2Cw4qvtmNrIw+wRvBtn+okgHl7slJVi7zfNcdmqDL72BojM30VNJ2UHylr1o77U37Jw==", - "dev": true, - "dependencies": { - "webpack-sources": "^3.0.0" - }, - "peerDependenciesMeta": { - "webpack": { - "optional": true - }, - "webpack-sources": { - "optional": true - } - } - }, - "node_modules/lines-and-columns": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", - "dev": true - }, - "node_modules/load-json-file": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz", - "integrity": "sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=", - "dev": true, - "dependencies": { - "graceful-fs": "^4.1.2", - "parse-json": "^2.2.0", - "pify": "^2.0.0", - "strip-bom": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/load-json-file/node_modules/parse-json": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", - "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", - "dev": true, - "dependencies": { - "error-ex": "^1.2.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/load-json-file/node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/loader-runner": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.2.0.tgz", - "integrity": "sha512-92+huvxMvYlMzMt0iIOukcwYBFpkYJdpl2xsZ7LrlayO7E8SOv+JJUEK17B/dJIHAOLMfh2dZZ/Y18WgmGtYNw==", - "dev": true, - "engines": { - "node": ">=6.11.5" - } - }, - "node_modules/loader-utils": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.0.tgz", - "integrity": "sha512-HVl9ZqccQihZ7JM85dco1MvO9G+ONvxoGa9rkhzFsneGLKSUg1gJf9bWzhRhcvm2qChhWpebQhP44qxjKIUCaQ==", - "dev": true, - "engines": { - "node": ">= 12.13.0" - } - }, - "node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dev": true, - "dependencies": { - "p-locate": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true - }, - "node_modules/lodash.debounce": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=", - "dev": true - }, - "node_modules/lodash.get": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", - "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=", - "dev": true - }, - "node_modules/log-symbols": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", - "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", - "dev": true, - "dependencies": { - "chalk": "^4.1.0", - "is-unicode-supported": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/log-symbols/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/log-symbols/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/log-symbols/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/log-symbols/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/log-symbols/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/log-symbols/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/log4js": { - "version": "6.4.4", - "resolved": "https://registry.npmjs.org/log4js/-/log4js-6.4.4.tgz", - "integrity": "sha512-ncaWPsuw9Vl1CKA406hVnJLGQKy1OHx6buk8J4rE2lVW+NW5Y82G5/DIloO7NkqLOUtNPEANaWC1kZYVjXssPw==", - "dev": true, - "dependencies": { - "date-format": "^4.0.6", - "debug": "^4.3.4", - "flatted": "^3.2.5", - "rfdc": "^1.3.0", - "streamroller": "^3.0.6" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/log4js/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/log4js/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/magic-string": { - "version": "0.25.7", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.25.7.tgz", - "integrity": "sha512-4CrMT5DOHTDk4HYDlzmwu4FVCcIYI8gauveasrdCu2IKIFOJ3f0v/8MDGJCDL9oD2ppz/Av1b0Nj345H9M+XIA==", - "dev": true, - "dependencies": { - "sourcemap-codec": "^1.4.4" - } - }, - "node_modules/make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "dev": true, - "dependencies": { - "semver": "^6.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/make-dir/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/make-fetch-happen": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-9.1.0.tgz", - "integrity": "sha512-+zopwDy7DNknmwPQplem5lAZX/eCOzSvSNNcSKm5eVwTkOBzoktEfXsa9L23J/GIRhxRsaxzkPEhrJEpE2F4Gg==", - "dev": true, - "dependencies": { - "agentkeepalive": "^4.1.3", - "cacache": "^15.2.0", - "http-cache-semantics": "^4.1.0", - "http-proxy-agent": "^4.0.1", - "https-proxy-agent": "^5.0.0", - "is-lambda": "^1.0.1", - "lru-cache": "^6.0.0", - "minipass": "^3.1.3", - "minipass-collect": "^1.0.2", - "minipass-fetch": "^1.3.2", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.2", - "promise-retry": "^2.0.1", - "socks-proxy-agent": "^6.0.0", - "ssri": "^8.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/matcher": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/matcher/-/matcher-3.0.0.tgz", - "integrity": "sha512-OkeDaAZ/bQCxeFAozM55PKcKU0yJMPGifLwV4Qgjitu+5MoAfSQN4lsLJeXZ1b8w0x+/Emda6MZgXS1jvsapng==", - "dev": true, - "optional": true, - "dependencies": { - "escape-string-regexp": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/matcher/node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true, - "optional": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/memfs": { - "version": "3.4.1", - "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.4.1.tgz", - "integrity": "sha512-1c9VPVvW5P7I85c35zAdEr1TD5+F11IToIHIlrVIcflfnzPkJa0ZoYEoEdYDP8KgPFoSZ/opDrUsAoZWym3mtw==", - "dev": true, - "dependencies": { - "fs-monkey": "1.0.3" - }, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" - }, - "node_modules/merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", - "dev": true - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "dev": true, - "engines": { - "node": ">= 8" - } - }, - "node_modules/methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/micromatch": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.4.tgz", - "integrity": "sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==", - "dev": true, - "dependencies": { - "braces": "^3.0.1", - "picomatch": "^2.2.3" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/mini-css-extract-plugin": { - "version": "2.5.3", - "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.5.3.tgz", - "integrity": "sha512-YseMB8cs8U/KCaAGQoqYmfUuhhGW0a9p9XvWXrxVOkE3/IiISTLw4ALNt7JR5B2eYauFM+PQGSbXMDmVbR7Tfw==", - "dev": true, - "dependencies": { - "schema-utils": "^4.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/ajv": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.10.0.tgz", - "integrity": "sha512-bzqAEZOjkrUMl2afH8dknrq5KEk2SrwdBROR+vH1EKVQTqaUbJVPdc/gEdggTMM0Se+s+Ja4ju4TlNcStKl2Hw==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/ajv-keywords": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true - }, - "node_modules/mini-css-extract-plugin/node_modules/schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", - "dev": true, - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/minimalistic-assert": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==", - "dev": true - }, - "node_modules/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==", - "dev": true - }, - "node_modules/minipass": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.6.tgz", - "integrity": "sha512-rty5kpw9/z8SX9dmxblFA6edItUmwJgMeYDZRrwlIVN27i8gysGbznJwUggw2V/FVqFSDdWy040ZPS811DYAqQ==", - "dev": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minipass-collect": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", - "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", - "dev": true, - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/minipass-fetch": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-1.4.1.tgz", - "integrity": "sha512-CGH1eblLq26Y15+Azk7ey4xh0J/XfJfrCox5LDJiKqI2Q2iwOLOKrlmIaODiSQS8d18jalF6y2K2ePUm0CmShw==", - "dev": true, - "dependencies": { - "minipass": "^3.1.0", - "minipass-sized": "^1.0.3", - "minizlib": "^2.0.0" - }, - "engines": { - "node": ">=8" - }, - "optionalDependencies": { - "encoding": "^0.1.12" - } - }, - "node_modules/minipass-flush": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/minipass-flush/-/minipass-flush-1.0.5.tgz", - "integrity": "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==", - "dev": true, - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/minipass-json-stream": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minipass-json-stream/-/minipass-json-stream-1.0.1.tgz", - "integrity": "sha512-ODqY18UZt/I8k+b7rl2AENgbWE8IDYam+undIJONvigAz8KR5GWblsFTEfQs0WODsjbSXWlm+JHEv8Gr6Tfdbg==", - "dev": true, - "dependencies": { - "jsonparse": "^1.3.1", - "minipass": "^3.0.0" - } - }, - "node_modules/minipass-pipeline": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", - "integrity": "sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==", - "dev": true, - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minipass-sized": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", - "integrity": "sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g==", - "dev": true, - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "dev": true, - "dependencies": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "dev": true, - "bin": { - "mkdirp": "bin/cmd.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "node_modules/multicast-dns": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-6.2.3.tgz", - "integrity": "sha512-ji6J5enbMyGRHIAkAOu3WdV8nggqviKCEKtXcOqfphZZtQrmHKycfynJ2V7eVPUA4NhJ6V7Wf4TmGbTwKE9B6g==", - "dev": true, - "dependencies": { - "dns-packet": "^1.3.1", - "thunky": "^1.0.2" - }, - "bin": { - "multicast-dns": "cli.js" - } - }, - "node_modules/multicast-dns-service-types": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz", - "integrity": "sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE=", - "dev": true - }, - "node_modules/mute-stream": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", - "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", - "dev": true - }, - "node_modules/nanoid": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.1.tgz", - "integrity": "sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw==", - "dev": true, - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "node_modules/needle": { - "version": "2.9.1", - "resolved": "https://registry.npmjs.org/needle/-/needle-2.9.1.tgz", - "integrity": "sha512-6R9fqJ5Zcmf+uYaFgdIHmLwNldn5HbK8L5ybn7Uz+ylX/rnOsSp1AHcvQSrCaFN+qNM1wpymHqD7mVasEOlHGQ==", - "dev": true, - "optional": true, - "dependencies": { - "debug": "^3.2.6", - "iconv-lite": "^0.4.4", - "sax": "^1.2.4" - }, - "bin": { - "needle": "bin/needle" - }, - "engines": { - "node": ">= 4.4.x" - } - }, - "node_modules/needle/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "optional": true, - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/needle/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true, - "optional": true - }, - "node_modules/negotiator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", - "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/neo-async": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", - "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", - "dev": true - }, - "node_modules/nice-napi": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nice-napi/-/nice-napi-1.0.2.tgz", - "integrity": "sha512-px/KnJAJZf5RuBGcfD+Sp2pAKq0ytz8j+1NehvgIGFkvtvFrDM3T8E4x/JJODXK9WZow8RRGrbA9QQ3hs+pDhA==", - "dev": true, - "hasInstallScript": true, - "optional": true, - "os": [ - "!win32" - ], - "dependencies": { - "node-addon-api": "^3.0.0", - "node-gyp-build": "^4.2.2" - } - }, - "node_modules/node-addon-api": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-3.2.1.tgz", - "integrity": "sha512-mmcei9JghVNDYydghQmeDX8KoAm0FAiYyIcUt/N4nhyAipB17pllZQDOJD2fotxABnt4Mdz+dKTO7eftLg4d0A==", - "dev": true, - "optional": true - }, - "node_modules/node-forge": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.0.tgz", - "integrity": "sha512-08ARB91bUi6zNKzVmaj3QO7cr397uiDT2nJ63cHjyNtCTWIgvS47j3eT0WfzUwS9+6Z5YshRaoasFkXCKrIYbA==", - "dev": true, - "engines": { - "node": ">= 6.13.0" - } - }, - "node_modules/node-gyp": { - "version": "8.4.1", - "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-8.4.1.tgz", - "integrity": "sha512-olTJRgUtAb/hOXG0E93wZDs5YiJlgbXxTwQAFHyNlRsXQnYzUaF2aGgujZbw+hR8aF4ZG/rST57bWMWD16jr9w==", - "dev": true, - "dependencies": { - "env-paths": "^2.2.0", - "glob": "^7.1.4", - "graceful-fs": "^4.2.6", - "make-fetch-happen": "^9.1.0", - "nopt": "^5.0.0", - "npmlog": "^6.0.0", - "rimraf": "^3.0.2", - "semver": "^7.3.5", - "tar": "^6.1.2", - "which": "^2.0.2" - }, - "bin": { - "node-gyp": "bin/node-gyp.js" - }, - "engines": { - "node": ">= 10.12.0" - } - }, - "node_modules/node-gyp-build": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.3.0.tgz", - "integrity": "sha512-iWjXZvmboq0ja1pUGULQBexmxq8CV4xBhX7VDOTbL7ZR4FOowwY/VOtRxBN/yKxmdGoIp4j5ysNT4u3S2pDQ3Q==", - "dev": true, - "optional": true, - "bin": { - "node-gyp-build": "bin.js", - "node-gyp-build-optional": "optional.js", - "node-gyp-build-test": "build-test.js" - } - }, - "node_modules/node-releases": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.2.tgz", - "integrity": "sha512-XxYDdcQ6eKqp/YjI+tb2C5WM2LgjnZrfYg4vgQt49EK268b6gYCHsBLrK2qvJo4FmCtqmKezb0WZFK4fkrZNsg==", - "dev": true - }, - "node_modules/nodemon": { - "version": "2.0.15", - "resolved": "https://registry.npmjs.org/nodemon/-/nodemon-2.0.15.tgz", - "integrity": "sha512-gdHMNx47Gw7b3kWxJV64NI+Q5nfl0y5DgDbiVtShiwa7Z0IZ07Ll4RLFo6AjrhzMtoEZn5PDE3/c2AbVsiCkpA==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "chokidar": "^3.5.2", - "debug": "^3.2.7", - "ignore-by-default": "^1.0.1", - "minimatch": "^3.0.4", - "pstree.remy": "^1.1.8", - "semver": "^5.7.1", - "supports-color": "^5.5.0", - "touch": "^3.1.0", - "undefsafe": "^2.0.5", - "update-notifier": "^5.1.0" - }, - "bin": { - "nodemon": "bin/nodemon.js" - }, - "engines": { - "node": ">=8.10.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/nodemon" - } - }, - "node_modules/nodemon/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/nodemon/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true - }, - "node_modules/nodemon/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true, - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/nopt": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", - "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", - "dev": true, - "dependencies": { - "abbrev": "1" - }, - "bin": { - "nopt": "bin/nopt.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "dependencies": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - } - }, - "node_modules/normalize-package-data/node_modules/hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", - "dev": true - }, - "node_modules/normalize-package-data/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true, - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-range": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-url": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz", - "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm-bundled": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/npm-bundled/-/npm-bundled-1.1.2.tgz", - "integrity": "sha512-x5DHup0SuyQcmL3s7Rx/YQ8sbw/Hzg0rj48eN0dV7hf5cmQq5PXIeioroH3raV1QC1yh3uTYuMThvEQF3iKgGQ==", - "dev": true, - "dependencies": { - "npm-normalize-package-bin": "^1.0.1" - } - }, - "node_modules/npm-conf": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/npm-conf/-/npm-conf-1.1.3.tgz", - "integrity": "sha512-Yic4bZHJOt9RCFbRP3GgpqhScOY4HH3V2P8yBj6CeYq118Qr+BLXqT2JvpJ00mryLESpgOxf5XlFv4ZjXxLScw==", - "dev": true, - "optional": true, - "dependencies": { - "config-chain": "^1.1.11", - "pify": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/npm-conf/node_modules/pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", - "dev": true, - "optional": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/npm-install-checks": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/npm-install-checks/-/npm-install-checks-4.0.0.tgz", - "integrity": "sha512-09OmyDkNLYwqKPOnbI8exiOZU2GVVmQp7tgez2BPi5OZC8M82elDAps7sxC4l//uSUtotWqoEIDwjRvWH4qz8w==", - "dev": true, - "dependencies": { - "semver": "^7.1.1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/npm-normalize-package-bin": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/npm-normalize-package-bin/-/npm-normalize-package-bin-1.0.1.tgz", - "integrity": "sha512-EPfafl6JL5/rU+ot6P3gRSCpPDW5VmIzX959Ob1+ySFUuuYHWHekXpwdUZcKP5C+DS4GEtdJluwBjnsNDl+fSA==", - "dev": true - }, - "node_modules/npm-package-arg": { - "version": "8.1.5", - "resolved": "https://registry.npmjs.org/npm-package-arg/-/npm-package-arg-8.1.5.tgz", - "integrity": "sha512-LhgZrg0n0VgvzVdSm1oiZworPbTxYHUJCgtsJW8mGvlDpxTM1vSJc3m5QZeUkhAHIzbz3VCHd/R4osi1L1Tg/Q==", - "dev": true, - "dependencies": { - "hosted-git-info": "^4.0.1", - "semver": "^7.3.4", - "validate-npm-package-name": "^3.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/npm-packlist": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/npm-packlist/-/npm-packlist-3.0.0.tgz", - "integrity": "sha512-L/cbzmutAwII5glUcf2DBRNY/d0TFd4e/FnaZigJV6JD85RHZXJFGwCndjMWiiViiWSsWt3tiOLpI3ByTnIdFQ==", - "dev": true, - "dependencies": { - "glob": "^7.1.6", - "ignore-walk": "^4.0.1", - "npm-bundled": "^1.1.1", - "npm-normalize-package-bin": "^1.0.1" - }, - "bin": { - "npm-packlist": "bin/index.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/npm-pick-manifest": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/npm-pick-manifest/-/npm-pick-manifest-6.1.1.tgz", - "integrity": "sha512-dBsdBtORT84S8V8UTad1WlUyKIY9iMsAmqxHbLdeEeBNMLQDlDWWra3wYUx9EBEIiG/YwAy0XyNHDd2goAsfuA==", - "dev": true, - "dependencies": { - "npm-install-checks": "^4.0.0", - "npm-normalize-package-bin": "^1.0.1", - "npm-package-arg": "^8.1.2", - "semver": "^7.3.4" - } - }, - "node_modules/npm-registry-fetch": { - "version": "12.0.2", - "resolved": "https://registry.npmjs.org/npm-registry-fetch/-/npm-registry-fetch-12.0.2.tgz", - "integrity": "sha512-Df5QT3RaJnXYuOwtXBXS9BWs+tHH2olvkCLh6jcR/b/u3DvPMlp3J0TvvYwplPKxHMOwfg287PYih9QqaVFoKA==", - "dev": true, - "dependencies": { - "make-fetch-happen": "^10.0.1", - "minipass": "^3.1.6", - "minipass-fetch": "^1.4.1", - "minipass-json-stream": "^1.0.1", - "minizlib": "^2.1.2", - "npm-package-arg": "^8.1.5" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16" - } - }, - "node_modules/npm-registry-fetch/node_modules/@tootallnate/once": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", - "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", - "dev": true, - "engines": { - "node": ">= 10" - } - }, - "node_modules/npm-registry-fetch/node_modules/cacache": { - "version": "16.0.2", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-16.0.2.tgz", - "integrity": "sha512-Q17j7s8X81i/QYVrKVQ/qwWGT+pYLfpTcZ+X+p/Qw9FULy9JEfb2FECYTTt6mPV6A/vk92nRZ80ncpKxiGTrIA==", - "dev": true, - "dependencies": { - "@npmcli/fs": "^1.0.0", - "@npmcli/move-file": "^1.1.2", - "chownr": "^2.0.0", - "fs-minipass": "^2.1.0", - "glob": "^7.2.0", - "infer-owner": "^1.0.4", - "lru-cache": "^7.5.1", - "minipass": "^3.1.6", - "minipass-collect": "^1.0.2", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "mkdirp": "^1.0.4", - "p-map": "^4.0.0", - "promise-inflight": "^1.0.1", - "rimraf": "^3.0.2", - "ssri": "^8.0.1", - "tar": "^6.1.11", - "unique-filename": "^1.1.1" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16" - } - }, - "node_modules/npm-registry-fetch/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/npm-registry-fetch/node_modules/http-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", - "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", - "dev": true, - "dependencies": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/npm-registry-fetch/node_modules/lru-cache": { - "version": "7.7.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.7.1.tgz", - "integrity": "sha512-cRffBiTW8s73eH4aTXqBcTLU0xQnwGV3/imttRHGWCrbergmnK4D6JXQd8qin5z43HnDwRI+o7mVW0LEB+tpAw==", - "deprecated": "Please update to latest patch version to fix memory leak https://github.com/isaacs/node-lru-cache/issues/227", - "dev": true, - "engines": { - "node": ">=12" - } - }, - "node_modules/npm-registry-fetch/node_modules/make-fetch-happen": { - "version": "10.0.6", - "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-10.0.6.tgz", - "integrity": "sha512-4Gfh6lV3TLXmj7qz79hBFuvVqjYSMW6v2+sxtdX4LFQU0rK3V/txRjE0DoZb7X0IF3t9f8NO3CxPSWlvdckhVA==", - "dev": true, - "dependencies": { - "agentkeepalive": "^4.2.1", - "cacache": "^16.0.0", - "http-cache-semantics": "^4.1.0", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.0", - "is-lambda": "^1.0.1", - "lru-cache": "^7.5.1", - "minipass": "^3.1.6", - "minipass-collect": "^1.0.2", - "minipass-fetch": "^2.0.3", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", - "promise-retry": "^2.0.1", - "socks-proxy-agent": "^6.1.1", - "ssri": "^8.0.1" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16" - } - }, - "node_modules/npm-registry-fetch/node_modules/make-fetch-happen/node_modules/minipass-fetch": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-2.0.3.tgz", - "integrity": "sha512-VA+eiiUtaIvpQJXISwE3OiMvQwAWrgKb97F0aXlCS1Ahikr8fEQq8m3Hf7Kv9KT3nokuHigJKsDMB6atU04olQ==", - "dev": true, - "dependencies": { - "minipass": "^3.1.6", - "minipass-sized": "^1.0.3", - "minizlib": "^2.1.2" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16" - }, - "optionalDependencies": { - "encoding": "^0.1.13" - } - }, - "node_modules/npm-registry-fetch/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/npm-run-path": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", - "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", - "dev": true, - "dependencies": { - "path-key": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npmlog": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-6.0.1.tgz", - "integrity": "sha512-BTHDvY6nrRHuRfyjt1MAufLxYdVXZfd099H4+i1f0lPywNQyI4foeNXJRObB/uy+TYqUW0vAD9gbdSOXPst7Eg==", - "dev": true, - "dependencies": { - "are-we-there-yet": "^3.0.0", - "console-control-strings": "^1.1.0", - "gauge": "^4.0.0", - "set-blocking": "^2.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16" - } - }, - "node_modules/nth-check": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.1.tgz", - "integrity": "sha512-it1vE95zF6dTT9lBsYbxvqh0Soy4SPowchj0UBGj/V6cTPnXXtQOPUbhZ6CmGzAD/rW22LQK6E96pcdJXk4A4w==", - "dev": true, - "dependencies": { - "boolbase": "^1.0.0" - }, - "funding": { - "url": "https://github.com/fb55/nth-check?sponsor=1" - } - }, - "node_modules/oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", - "engines": { - "node": "*" - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-is": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", - "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.assign": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", - "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "has-symbols": "^1.0.1", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/obuf": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", - "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==", - "dev": true - }, - "node_modules/on-finished": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", - "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/on-headers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", - "dev": true, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dev": true, - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "dev": true, - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/open": { - "version": "8.4.0", - "resolved": "https://registry.npmjs.org/open/-/open-8.4.0.tgz", - "integrity": "sha512-XgFPPM+B28FtCCgSb9I+s9szOC1vZRSwgWsRUA5ylIxRTgKozqjOCrVOqGsYABPYK5qnfqClxZTFBa8PKt2v6Q==", - "dev": true, - "dependencies": { - "define-lazy-prop": "^2.0.0", - "is-docker": "^2.1.1", - "is-wsl": "^2.2.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ora": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", - "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", - "dev": true, - "dependencies": { - "bl": "^4.1.0", - "chalk": "^4.1.0", - "cli-cursor": "^3.1.0", - "cli-spinners": "^2.5.0", - "is-interactive": "^1.0.0", - "is-unicode-supported": "^0.1.0", - "log-symbols": "^4.1.0", - "strip-ansi": "^6.0.0", - "wcwidth": "^1.0.1" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ora/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/ora/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/ora/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/ora/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/ora/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/ora/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/os-tmpdir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", - "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/p-cancelable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", - "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dev": true, - "dependencies": { - "p-limit": "^2.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", - "dev": true, - "dependencies": { - "aggregate-error": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-retry": { - "version": "4.6.1", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.1.tgz", - "integrity": "sha512-e2xXGNhZOZ0lfgR9kL34iGlU8N/KO0xZnQxVEwdeOvpqNDQfdnxIYizvWtK8RglUa3bGqI8g0R/BdfzLMxRkiA==", - "dev": true, - "dependencies": { - "@types/retry": "^0.12.0", - "retry": "^0.13.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz", - "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==", - "dev": true, - "dependencies": { - "got": "^9.6.0", - "registry-auth-token": "^4.0.0", - "registry-url": "^5.0.0", - "semver": "^6.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/package-json/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/pacote": { - "version": "12.0.3", - "resolved": "https://registry.npmjs.org/pacote/-/pacote-12.0.3.tgz", - "integrity": "sha512-CdYEl03JDrRO3x18uHjBYA9TyoW8gy+ThVcypcDkxPtKlw76e4ejhYB6i9lJ+/cebbjpqPW/CijjqxwDTts8Ow==", - "dev": true, - "dependencies": { - "@npmcli/git": "^2.1.0", - "@npmcli/installed-package-contents": "^1.0.6", - "@npmcli/promise-spawn": "^1.2.0", - "@npmcli/run-script": "^2.0.0", - "cacache": "^15.0.5", - "chownr": "^2.0.0", - "fs-minipass": "^2.1.0", - "infer-owner": "^1.0.4", - "minipass": "^3.1.3", - "mkdirp": "^1.0.3", - "npm-package-arg": "^8.0.1", - "npm-packlist": "^3.0.0", - "npm-pick-manifest": "^6.0.0", - "npm-registry-fetch": "^12.0.0", - "promise-retry": "^2.0.1", - "read-package-json-fast": "^2.0.1", - "rimraf": "^3.0.2", - "ssri": "^8.0.1", - "tar": "^6.1.0" - }, - "bin": { - "pacote": "lib/bin.js" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16" - } - }, - "node_modules/pako": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", - "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==", - "dev": true - }, - "node_modules/parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "dev": true, - "dependencies": { - "callsites": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/parse-author": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/parse-author/-/parse-author-2.0.0.tgz", - "integrity": "sha1-00YL8d3Q367tQtp1QkLmX7aEqB8=", - "dev": true, - "dependencies": { - "author-regex": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/parse-json": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", - "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", - "dev": true, - "dependencies": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/parse-node-version": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parse-node-version/-/parse-node-version-1.0.1.tgz", - "integrity": "sha512-3YHlOa/JgH6Mnpr05jP9eDG254US9ek25LyIxZlDItp2iJtwyaXQb57lBYLdT3MowkUFYEV2XXNAYIPlESvJlA==", - "dev": true, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/parse5": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-5.1.1.tgz", - "integrity": "sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug==", - "optional": true - }, - "node_modules/parse5-html-rewriting-stream": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5-html-rewriting-stream/-/parse5-html-rewriting-stream-6.0.1.tgz", - "integrity": "sha512-vwLQzynJVEfUlURxgnf51yAJDQTtVpNyGD8tKi2Za7m+akukNHxCcUQMAa/mUGLhCeicFdpy7Tlvj8ZNKadprg==", - "dev": true, - "dependencies": { - "parse5": "^6.0.1", - "parse5-sax-parser": "^6.0.1" - } - }, - "node_modules/parse5-html-rewriting-stream/node_modules/parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==", - "dev": true - }, - "node_modules/parse5-htmlparser2-tree-adapter": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz", - "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==", - "dev": true, - "dependencies": { - "parse5": "^6.0.1" - } - }, - "node_modules/parse5-htmlparser2-tree-adapter/node_modules/parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==", - "dev": true - }, - "node_modules/parse5-sax-parser": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5-sax-parser/-/parse5-sax-parser-6.0.1.tgz", - "integrity": "sha512-kXX+5S81lgESA0LsDuGjAlBybImAChYRMT+/uKCEXFBFOeEhS52qUCydGhU3qLRD8D9DVjaUo821WK7DM4iCeg==", - "dev": true, - "dependencies": { - "parse5": "^6.0.1" - } - }, - "node_modules/parse5-sax-parser/node_modules/parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==", - "dev": true - }, - "node_modules/parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/path": { - "version": "0.12.7", - "resolved": "https://registry.npmjs.org/path/-/path-0.12.7.tgz", - "integrity": "sha1-1NwqUGxM4hl+tIHr/NWzbAFAsQ8=", - "dependencies": { - "process": "^0.11.1", - "util": "^0.10.3" - } - }, - "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "dev": true - }, - "node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" - }, - "node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/pend": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", - "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=", - "dev": true - }, - "node_modules/performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" - }, - "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", - "dev": true - }, - "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true, - "optional": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/piscina": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/piscina/-/piscina-3.2.0.tgz", - "integrity": "sha512-yn/jMdHRw+q2ZJhFhyqsmANcbF6V2QwmD84c6xRau+QpQOmtrBCoRGdvTfeuFDYXB5W2m6MfLkjkvQa9lUSmIA==", - "dev": true, - "dependencies": { - "eventemitter-asyncresource": "^1.0.0", - "hdr-histogram-js": "^2.0.1", - "hdr-histogram-percentiles-obj": "^3.0.0" - }, - "optionalDependencies": { - "nice-napi": "^1.0.2" - } - }, - "node_modules/pkg-dir": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", - "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", - "dev": true, - "dependencies": { - "find-up": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/plist": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/plist/-/plist-3.0.4.tgz", - "integrity": "sha512-ksrr8y9+nXOxQB2osVNqrgvX/XQPOXaU4BQMKjYq8PvaY1U18mo+fKgBSwzK+luSyinOuPae956lSVcBwxlAMg==", - "dev": true, - "dependencies": { - "base64-js": "^1.5.1", - "xmlbuilder": "^9.0.7" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/portfinder": { - "version": "1.0.28", - "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.28.tgz", - "integrity": "sha512-Se+2isanIcEqf2XMHjyUKskczxbPH7dQnlMjXX6+dybayyHvAf/TCgyMRlzf/B6QDhAEFOGes0pzRo3by4AbMA==", - "dev": true, - "dependencies": { - "async": "^2.6.2", - "debug": "^3.1.1", - "mkdirp": "^0.5.5" - }, - "engines": { - "node": ">= 0.12.0" - } - }, - "node_modules/portfinder/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/portfinder/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dev": true, - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/portfinder/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true - }, - "node_modules/postcss": { - "version": "8.4.5", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.5.tgz", - "integrity": "sha512-jBDboWM8qpaqwkMwItqTQTiFikhs/67OYVvblFFTM7MrZjt6yMKd6r2kgXizEbTTljacm4NldIlZnhbjr84QYg==", - "dev": true, - "dependencies": { - "nanoid": "^3.1.30", - "picocolors": "^1.0.0", - "source-map-js": "^1.0.1" - }, - "engines": { - "node": "^10 || ^12 || >=14" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - } - }, - "node_modules/postcss-attribute-case-insensitive": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-5.0.0.tgz", - "integrity": "sha512-b4g9eagFGq9T5SWX4+USfVyjIb3liPnjhHHRMP7FMB2kFVpYyfEscV0wP3eaXhKlcHKUut8lt5BGoeylWA/dBQ==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.2" - }, - "peerDependencies": { - "postcss": "^8.0.2" - } - }, - "node_modules/postcss-color-functional-notation": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/postcss-color-functional-notation/-/postcss-color-functional-notation-4.2.2.tgz", - "integrity": "sha512-DXVtwUhIk4f49KK5EGuEdgx4Gnyj6+t2jBSEmxvpIK9QI40tWrpS2Pua8Q7iIZWBrki2QOaeUdEaLPPa91K0RQ==", - "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-color-hex-alpha": { - "version": "8.0.3", - "resolved": "https://registry.npmjs.org/postcss-color-hex-alpha/-/postcss-color-hex-alpha-8.0.3.tgz", - "integrity": "sha512-fESawWJCrBV035DcbKRPAVmy21LpoyiXdPTuHUfWJ14ZRjY7Y7PA6P4g8z6LQGYhU1WAxkTxjIjurXzoe68Glw==", - "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-color-rebeccapurple": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/postcss-color-rebeccapurple/-/postcss-color-rebeccapurple-7.0.2.tgz", - "integrity": "sha512-SFc3MaocHaQ6k3oZaFwH8io6MdypkUtEy/eXzXEB1vEQlO3S3oDc/FSZA8AsS04Z25RirQhlDlHLh3dn7XewWw==", - "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.3" - } - }, - "node_modules/postcss-custom-media": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/postcss-custom-media/-/postcss-custom-media-8.0.0.tgz", - "integrity": "sha512-FvO2GzMUaTN0t1fBULDeIvxr5IvbDXcIatt6pnJghc736nqNgsGao5NT+5+WVLAQiTt6Cb3YUms0jiPaXhL//g==", - "dev": true, - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-custom-properties": { - "version": "12.1.5", - "resolved": "https://registry.npmjs.org/postcss-custom-properties/-/postcss-custom-properties-12.1.5.tgz", - "integrity": "sha512-FHbbB/hRo/7cxLGkc2NS7cDRIDN1oFqQnUKBiyh4b/gwk8DD8udvmRDpUhEK836kB8ggUCieHVOvZDnF9XhI3g==", - "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-custom-selectors": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/postcss-custom-selectors/-/postcss-custom-selectors-6.0.0.tgz", - "integrity": "sha512-/1iyBhz/W8jUepjGyu7V1OPcGbc636snN1yXEQCinb6Bwt7KxsiU7/bLQlp8GwAXzCh7cobBU5odNn/2zQWR8Q==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.4" - }, - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "postcss": "^8.1.2" - } - }, - "node_modules/postcss-dir-pseudo-class": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/postcss-dir-pseudo-class/-/postcss-dir-pseudo-class-6.0.4.tgz", - "integrity": "sha512-I8epwGy5ftdzNWEYok9VjW9whC4xnelAtbajGv4adql4FIF09rnrxnA9Y8xSHN47y7gqFIv10C5+ImsLeJpKBw==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.9" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-double-position-gradients": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/postcss-double-position-gradients/-/postcss-double-position-gradients-3.1.1.tgz", - "integrity": "sha512-jM+CGkTs4FcG53sMPjrrGE0rIvLDdCrqMzgDC5fLI7JHDO7o6QG8C5TQBtExb13hdBdoH9C2QVbG4jo2y9lErQ==", - "dev": true, - "dependencies": { - "@csstools/postcss-progressive-custom-properties": "^1.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-env-function": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/postcss-env-function/-/postcss-env-function-4.0.6.tgz", - "integrity": "sha512-kpA6FsLra+NqcFnL81TnsU+Z7orGtDTxcOhl6pwXeEq1yFPpRMkCDpHhrz8CFQDr/Wfm0jLiNQ1OsGGPjlqPwA==", - "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-focus-visible": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/postcss-focus-visible/-/postcss-focus-visible-6.0.4.tgz", - "integrity": "sha512-QcKuUU/dgNsstIK6HELFRT5Y3lbrMLEOwG+A4s5cA+fx3A3y/JTq3X9LaOj3OC3ALH0XqyrgQIgey/MIZ8Wczw==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.9" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-focus-within": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/postcss-focus-within/-/postcss-focus-within-5.0.4.tgz", - "integrity": "sha512-vvjDN++C0mu8jz4af5d52CB184ogg/sSxAFS+oUJQq2SuCe7T5U2iIsVJtsCp2d6R4j0jr5+q3rPkBVZkXD9fQ==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.9" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-font-variant": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/postcss-font-variant/-/postcss-font-variant-5.0.0.tgz", - "integrity": "sha512-1fmkBaCALD72CK2a9i468mA/+tr9/1cBxRRMXOUaZqO43oWPR5imcyPjXwuv7PXbCid4ndlP5zWhidQVVa3hmA==", - "dev": true, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-gap-properties": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/postcss-gap-properties/-/postcss-gap-properties-3.0.3.tgz", - "integrity": "sha512-rPPZRLPmEKgLk/KlXMqRaNkYTUpE7YC+bOIQFN5xcu1Vp11Y4faIXv6/Jpft6FMnl6YRxZqDZG0qQOW80stzxQ==", - "dev": true, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-image-set-function": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/postcss-image-set-function/-/postcss-image-set-function-4.0.6.tgz", - "integrity": "sha512-KfdC6vg53GC+vPd2+HYzsZ6obmPqOk6HY09kttU19+Gj1nC3S3XBVEXDHxkhxTohgZqzbUb94bKXvKDnYWBm/A==", - "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-import": { - "version": "14.0.2", - "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-14.0.2.tgz", - "integrity": "sha512-BJ2pVK4KhUyMcqjuKs9RijV5tatNzNa73e/32aBVE/ejYPe37iH+6vAu9WvqUkB5OAYgLHzbSvzHnorybJCm9g==", - "dev": true, - "dependencies": { - "postcss-value-parser": "^4.0.0", - "read-cache": "^1.0.0", - "resolve": "^1.1.7" - }, - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "postcss": "^8.0.0" - } - }, - "node_modules/postcss-initial": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-initial/-/postcss-initial-4.0.1.tgz", - "integrity": "sha512-0ueD7rPqX8Pn1xJIjay0AZeIuDoF+V+VvMt/uOnn+4ezUKhZM/NokDeP6DwMNyIoYByuN/94IQnt5FEkaN59xQ==", - "dev": true, - "peerDependencies": { - "postcss": "^8.0.0" - } - }, - "node_modules/postcss-lab-function": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/postcss-lab-function/-/postcss-lab-function-4.1.2.tgz", - "integrity": "sha512-isudf5ldhg4fk16M8viAwAbg6Gv14lVO35N3Z/49NhbwPQ2xbiEoHgrRgpgQojosF4vF7jY653ktB6dDrUOR8Q==", - "dev": true, - "dependencies": { - "@csstools/postcss-progressive-custom-properties": "^1.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-loader": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-6.2.1.tgz", - "integrity": "sha512-WbbYpmAaKcux/P66bZ40bpWsBucjx/TTgVVzRZ9yUO8yQfVBlameJ0ZGVaPfH64hNSBh63a+ICP5nqOpBA0w+Q==", - "dev": true, - "dependencies": { - "cosmiconfig": "^7.0.0", - "klona": "^2.0.5", - "semver": "^7.3.5" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "postcss": "^7.0.0 || ^8.0.1", - "webpack": "^5.0.0" - } - }, - "node_modules/postcss-logical": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/postcss-logical/-/postcss-logical-5.0.4.tgz", - "integrity": "sha512-RHXxplCeLh9VjinvMrZONq7im4wjWGlRJAqmAVLXyZaXwfDWP73/oq4NdIp+OZwhQUMj0zjqDfM5Fj7qby+B4g==", - "dev": true, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-media-minmax": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/postcss-media-minmax/-/postcss-media-minmax-5.0.0.tgz", - "integrity": "sha512-yDUvFf9QdFZTuCUg0g0uNSHVlJ5X1lSzDZjPSFaiCWvjgsvu8vEVxtahPrLMinIDEEGnx6cBe6iqdx5YWz08wQ==", - "dev": true, - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-extract-imports": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz", - "integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==", - "dev": true, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-local-by-default": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.0.tgz", - "integrity": "sha512-sT7ihtmGSF9yhm6ggikHdV0hlziDTX7oFoXtuVWeDd3hHObNkcHRo9V3yg7vCAY7cONyxJC/XXCmmiHHcvX7bQ==", - "dev": true, - "dependencies": { - "icss-utils": "^5.0.0", - "postcss-selector-parser": "^6.0.2", - "postcss-value-parser": "^4.1.0" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-scope": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz", - "integrity": "sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.4" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-values": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", - "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", - "dev": true, - "dependencies": { - "icss-utils": "^5.0.0" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-nesting": { - "version": "10.1.3", - "resolved": "https://registry.npmjs.org/postcss-nesting/-/postcss-nesting-10.1.3.tgz", - "integrity": "sha512-wUC+/YCik4wH3StsbC5fBG1s2Z3ZV74vjGqBFYtmYKlVxoio5TYGM06AiaKkQPPlkXWn72HKfS7Cw5PYxnoXSw==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.9" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-overflow-shorthand": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/postcss-overflow-shorthand/-/postcss-overflow-shorthand-3.0.3.tgz", - "integrity": "sha512-CxZwoWup9KXzQeeIxtgOciQ00tDtnylYIlJBBODqkgS/PU2jISuWOL/mYLHmZb9ZhZiCaNKsCRiLp22dZUtNsg==", - "dev": true, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-page-break": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/postcss-page-break/-/postcss-page-break-3.0.4.tgz", - "integrity": "sha512-1JGu8oCjVXLa9q9rFTo4MbeeA5FMe00/9C7lN4va606Rdb+HkxXtXsmEDrIraQ11fGz/WvKWa8gMuCKkrXpTsQ==", - "dev": true, - "peerDependencies": { - "postcss": "^8" - } - }, - "node_modules/postcss-place": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/postcss-place/-/postcss-place-7.0.4.tgz", - "integrity": "sha512-MrgKeiiu5OC/TETQO45kV3npRjOFxEHthsqGtkh3I1rPbZSbXGD/lZVi9j13cYh+NA8PIAPyk6sGjT9QbRyvSg==", - "dev": true, - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-preset-env": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/postcss-preset-env/-/postcss-preset-env-7.2.3.tgz", - "integrity": "sha512-Ok0DhLfwrcNGrBn8sNdy1uZqWRk/9FId0GiQ39W4ILop5GHtjJs8bu1MY9isPwHInpVEPWjb4CEcEaSbBLpfwA==", - "dev": true, - "dependencies": { - "autoprefixer": "^10.4.2", - "browserslist": "^4.19.1", - "caniuse-lite": "^1.0.30001299", - "css-blank-pseudo": "^3.0.2", - "css-has-pseudo": "^3.0.3", - "css-prefers-color-scheme": "^6.0.2", - "cssdb": "^5.0.0", - "postcss-attribute-case-insensitive": "^5.0.0", - "postcss-color-functional-notation": "^4.2.1", - "postcss-color-hex-alpha": "^8.0.2", - "postcss-color-rebeccapurple": "^7.0.2", - "postcss-custom-media": "^8.0.0", - "postcss-custom-properties": "^12.1.2", - "postcss-custom-selectors": "^6.0.0", - "postcss-dir-pseudo-class": "^6.0.3", - "postcss-double-position-gradients": "^3.0.4", - "postcss-env-function": "^4.0.4", - "postcss-focus-visible": "^6.0.3", - "postcss-focus-within": "^5.0.3", - "postcss-font-variant": "^5.0.0", - "postcss-gap-properties": "^3.0.2", - "postcss-image-set-function": "^4.0.4", - "postcss-initial": "^4.0.1", - "postcss-lab-function": "^4.0.3", - "postcss-logical": "^5.0.3", - "postcss-media-minmax": "^5.0.0", - "postcss-nesting": "^10.1.2", - "postcss-overflow-shorthand": "^3.0.2", - "postcss-page-break": "^3.0.4", - "postcss-place": "^7.0.3", - "postcss-pseudo-class-any-link": "^7.0.2", - "postcss-replace-overflow-wrap": "^4.0.0", - "postcss-selector-not": "^5.0.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-pseudo-class-any-link": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/postcss-pseudo-class-any-link/-/postcss-pseudo-class-any-link-7.1.1.tgz", - "integrity": "sha512-JRoLFvPEX/1YTPxRxp1JO4WxBVXJYrSY7NHeak5LImwJ+VobFMwYDQHvfTXEpcn+7fYIeGkC29zYFhFWIZD8fg==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.9" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-replace-overflow-wrap": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-replace-overflow-wrap/-/postcss-replace-overflow-wrap-4.0.0.tgz", - "integrity": "sha512-KmF7SBPphT4gPPcKZc7aDkweHiKEEO8cla/GjcBK+ckKxiZslIu3C4GCRW3DNfL0o7yW7kMQu9xlZ1kXRXLXtw==", - "dev": true, - "peerDependencies": { - "postcss": "^8.0.3" - } - }, - "node_modules/postcss-selector-not": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-not/-/postcss-selector-not-5.0.0.tgz", - "integrity": "sha512-/2K3A4TCP9orP4TNS7u3tGdRFVKqz/E6pX3aGnriPG0jU78of8wsUcqE4QAhWEU0d+WnMSF93Ah3F//vUtK+iQ==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-selector-parser": { - "version": "6.0.9", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.9.tgz", - "integrity": "sha512-UO3SgnZOVTwu4kyLR22UQ1xZh086RyNZppb7lLAKBFK8a32ttG5i87Y/P3+2bRSjZNyJ1B7hfFNo273tKe9YxQ==", - "dev": true, - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-value-parser": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", - "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", - "dev": true - }, - "node_modules/prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/pretty-bytes": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.6.0.tgz", - "integrity": "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==", - "dev": true, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/process": { - "version": "0.11.10", - "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", - "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=", - "engines": { - "node": ">= 0.6.0" - } - }, - "node_modules/process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", - "dev": true - }, - "node_modules/progress": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", - "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", - "dev": true, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/promise-inflight": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", - "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=", - "dev": true - }, - "node_modules/promise-retry": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz", - "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", - "dev": true, - "dependencies": { - "err-code": "^2.0.2", - "retry": "^0.12.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/promise-retry/node_modules/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", - "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=", - "dev": true, - "engines": { - "node": ">= 4" - } - }, - "node_modules/proto-list": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", - "integrity": "sha1-IS1b/hMYMGpCD2QCuOJv85ZHqEk=", - "dev": true, - "optional": true - }, - "node_modules/proxy-addr": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", - "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", - "dependencies": { - "forwarded": "0.2.0", - "ipaddr.js": "1.9.1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/prr": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/prr/-/prr-1.0.1.tgz", - "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY=", - "dev": true, - "optional": true - }, - "node_modules/psl": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", - "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" - }, - "node_modules/pstree.remy": { - "version": "1.1.8", - "resolved": "https://registry.npmjs.org/pstree.remy/-/pstree.remy-1.1.8.tgz", - "integrity": "sha512-77DZwxQmxKnu3aR542U+X8FypNzbfJ+C5XQDk3uWjWxn6151aIMGthWYRXTqT1E5oJvg+ljaa2OJi+VfvCOQ8w==", - "dev": true - }, - "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dev": true, - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/pupa": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz", - "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==", - "dev": true, - "dependencies": { - "escape-goat": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/qjobs": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/qjobs/-/qjobs-1.2.0.tgz", - "integrity": "sha512-8YOJEHtxpySA3fFDyCRxA+UUV+fA+rTWnuWvylOK/NCjhY+b4ocCtmu8TtsWb+mYeU+GCHf/S66KZF/AsteKHg==", - "dev": true, - "engines": { - "node": ">=0.9" - } - }, - "node_modules/qs": { - "version": "6.9.7", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.9.7.tgz", - "integrity": "sha512-IhMFgUmuNpyRfxA90umL7ByLlgRXu6tIfKPpF5TmcfRLlLCckfP/g3IQmju6jjpu+Hh8rA+2p6A27ZSPOOHdKw==", - "engines": { - "node": ">=0.6" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "dev": true, - "dependencies": { - "safe-buffer": "^5.1.0" - } - }, - "node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "2.4.3", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.3.tgz", - "integrity": "sha512-UlTNLIcu0uzb4D2f4WltY6cVjLi+/jEN4lgEUj3E04tpMDpUlkBo/eSn6zou9hum2VMNpCCUone0O0WeJim07g==", - "dependencies": { - "bytes": "3.1.2", - "http-errors": "1.8.1", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dev": true, - "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "bin": { - "rc": "cli.js" - } - }, - "node_modules/rc/node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "dev": true - }, - "node_modules/rcedit": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/rcedit/-/rcedit-3.0.1.tgz", - "integrity": "sha512-XM0Jv40/y4hVAqj/MO70o/IWs4uOsaSoo2mLyk3klFDW+SStLnCtzuQu+1OBTIMGlM8CvaK9ftlYCp6DJ+cMsw==", - "dev": true, - "dependencies": { - "cross-spawn-windows-exe": "^1.1.0" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/read-cache": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", - "integrity": "sha1-5mTvMRYRZsl1HNvo28+GtftY93Q=", - "dev": true, - "dependencies": { - "pify": "^2.3.0" - } - }, - "node_modules/read-cache/node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/read-config-file": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/read-config-file/-/read-config-file-6.2.0.tgz", - "integrity": "sha512-gx7Pgr5I56JtYz+WuqEbQHj/xWo+5Vwua2jhb1VwM4Wid5PqYmZ4i00ZB0YEGIfkVBsCv9UrjgyqCiQfS/Oosg==", - "dev": true, - "dependencies": { - "dotenv": "^9.0.2", - "dotenv-expand": "^5.1.0", - "js-yaml": "^4.1.0", - "json5": "^2.2.0", - "lazy-val": "^1.0.4" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/read-config-file/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "node_modules/read-config-file/node_modules/dotenv": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-9.0.2.tgz", - "integrity": "sha512-I9OvvrHp4pIARv4+x9iuewrWycX6CcZtoAu1XrzPxc5UygMJXJZYmBsynku8IkrJwgypE5DGNjDPmPRhDCptUg==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/read-config-file/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/read-package-json-fast": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/read-package-json-fast/-/read-package-json-fast-2.0.3.tgz", - "integrity": "sha512-W/BKtbL+dUjTuRL2vziuYhp76s5HZ9qQhd/dKfWIZveD0O40453QNyZhC0e63lqZrAQ4jiOapVoeJ7JrszenQQ==", - "dev": true, - "dependencies": { - "json-parse-even-better-errors": "^2.3.0", - "npm-normalize-package-bin": "^1.0.1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/read-pkg": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz", - "integrity": "sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=", - "dev": true, - "dependencies": { - "load-json-file": "^2.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-2.0.0.tgz", - "integrity": "sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=", - "dev": true, - "dependencies": { - "find-up": "^2.0.0", - "read-pkg": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", - "dev": true, - "dependencies": { - "locate-path": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", - "dev": true, - "dependencies": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", - "dev": true, - "dependencies": { - "p-try": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", - "dev": true, - "dependencies": { - "p-limit": "^1.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg-up/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg/node_modules/path-type": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", - "integrity": "sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=", - "dev": true, - "dependencies": { - "pify": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/read-pkg/node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dev": true, - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "dev": true, - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/reflect-metadata": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.1.13.tgz", - "integrity": "sha512-Ts1Y/anZELhSsjMcU605fU9RE4Oi3p5ORujwbIKXfWa+0Zxs510Qrmrce5/Jowq3cHSZSJqBjypxmHarc+vEWg==", - "dev": true - }, - "node_modules/regenerate": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==", - "dev": true - }, - "node_modules/regenerate-unicode-properties": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.0.1.tgz", - "integrity": "sha512-vn5DU6yg6h8hP/2OkQo3K7uVILvY4iu0oI4t3HFa81UPkhGJwkRwM10JEc3upjdhHjs/k8GJY1sRBhk5sr69Bw==", - "dev": true, - "dependencies": { - "regenerate": "^1.4.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/regenerator-runtime": { - "version": "0.13.9", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz", - "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==", - "dev": true - }, - "node_modules/regenerator-transform": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.5.tgz", - "integrity": "sha512-eOf6vka5IO151Jfsw2NO9WpGX58W6wWmefK3I1zEGr0lOD0u8rwPaNqQL1aRxUaxLeKO3ArNh3VYg1KbaD+FFw==", - "dev": true, - "dependencies": { - "@babel/runtime": "^7.8.4" - } - }, - "node_modules/regex-parser": { - "version": "2.2.11", - "resolved": "https://registry.npmjs.org/regex-parser/-/regex-parser-2.2.11.tgz", - "integrity": "sha512-jbD/FT0+9MBU2XAZluI7w2OBs1RBi6p9M83nkoZayQXXU9e8Robt69FcZc7wU4eJD/YFTjn1JdCk3rbMJajz8Q==", - "dev": true - }, - "node_modules/regexp.prototype.flags": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.1.tgz", - "integrity": "sha512-pMR7hBVUUGI7PMA37m2ofIdQCsomVnas+Jn5UPGAHQ+/LlwKm/aTLJHdasmHRzlfeZwHiAOaRSo2rbBDm3nNUQ==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/regexpu-core": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.0.1.tgz", - "integrity": "sha512-CriEZlrKK9VJw/xQGJpQM5rY88BtuL8DM+AEwvcThHilbxiTAy8vq4iJnd2tqq8wLmjbGZzP7ZcKFjbGkmEFrw==", - "dev": true, - "dependencies": { - "regenerate": "^1.4.2", - "regenerate-unicode-properties": "^10.0.1", - "regjsgen": "^0.6.0", - "regjsparser": "^0.8.2", - "unicode-match-property-ecmascript": "^2.0.0", - "unicode-match-property-value-ecmascript": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/registry-auth-token": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.1.tgz", - "integrity": "sha512-6gkSb4U6aWJB4SF2ZvLb76yCBjcvufXBqvvEx1HbmKPkutswjW1xNVRY0+daljIYRbogN7O0etYSlbiaEQyMyw==", - "dev": true, - "dependencies": { - "rc": "^1.2.8" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/registry-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz", - "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==", - "dev": true, - "dependencies": { - "rc": "^1.2.8" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/regjsgen": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.6.0.tgz", - "integrity": "sha512-ozE883Uigtqj3bx7OhL1KNbCzGyW2NQZPl6Hs09WTvCuZD5sTI4JY58bkbQWa/Y9hxIsvJ3M8Nbf7j54IqeZbA==", - "dev": true - }, - "node_modules/regjsparser": { - "version": "0.8.4", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.8.4.tgz", - "integrity": "sha512-J3LABycON/VNEu3abOviqGHuB/LOtOQj8SKmfP9anY5GfAVw/SPjwzSjxGjbZXIxbGfqTHtJw58C2Li/WkStmA==", - "dev": true, - "dependencies": { - "jsesc": "~0.5.0" - }, - "bin": { - "regjsparser": "bin/parser" - } - }, - "node_modules/regjsparser/node_modules/jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=", - "dev": true, - "bin": { - "jsesc": "bin/jsesc" - } - }, - "node_modules/request": { - "version": "2.88.2", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", - "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", - "deprecated": "request has been deprecated, see https://github.com/request/request/issues/3142", - "dependencies": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/request/node_modules/qs": { - "version": "6.5.3", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", - "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/require-from-string": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", - "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=", - "dev": true - }, - "node_modules/resolve": { - "version": "1.22.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.0.tgz", - "integrity": "sha512-Hhtrw0nLeSrFQ7phPp4OOcVjLPIeMnRlr5mcnVuMe7M/7eBn98A3hmFRLoFo3DLZkivSYwhRUJTyPyWAk56WLw==", - "dev": true, - "dependencies": { - "is-core-module": "^2.8.1", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/resolve-url-loader": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-url-loader/-/resolve-url-loader-5.0.0.tgz", - "integrity": "sha512-uZtduh8/8srhBoMx//5bwqjQ+rfYOUq8zC9NrMUGtjBiGTtFJM42s58/36+hTqeqINcnYe08Nj3LkK9lW4N8Xg==", - "dev": true, - "dependencies": { - "adjust-sourcemap-loader": "^4.0.0", - "convert-source-map": "^1.7.0", - "loader-utils": "^2.0.0", - "postcss": "^8.2.14", - "source-map": "0.6.1" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/resolve-url-loader/node_modules/loader-utils": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.2.tgz", - "integrity": "sha512-TM57VeHptv569d/GKh6TAYdzKblwDNiumOdkFnejjD0XwTH87K90w3O7AiJRqdQoXygvi1VQTJTLGhJl7WqA7A==", - "dev": true, - "dependencies": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^2.1.2" - }, - "engines": { - "node": ">=8.9.0" - } - }, - "node_modules/resolve-url-loader/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", - "dev": true, - "dependencies": { - "lowercase-keys": "^1.0.0" - } - }, - "node_modules/restore-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", - "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", - "dev": true, - "dependencies": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/retry": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", - "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", - "dev": true, - "engines": { - "node": ">= 4" - } - }, - "node_modules/reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "dev": true, - "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" - } - }, - "node_modules/rfdc": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.3.0.tgz", - "integrity": "sha512-V2hovdzFbOi77/WajaSMXk2OLm+xNIeQdMMuB7icj7bk6zi2F8GGAxigcnDFpJHbNyNcgyJDiP+8nOrY5cZGrA==", - "dev": true - }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dev": true, - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/roarr": { - "version": "2.15.4", - "resolved": "https://registry.npmjs.org/roarr/-/roarr-2.15.4.tgz", - "integrity": "sha512-CHhPh+UNHD2GTXNYhPWLnU8ONHdI+5DI+4EYIAOaiD63rHeYlZvyh8P+in5999TTSFgUYuKUAjzRI4mdh/p+2A==", - "dev": true, - "optional": true, - "dependencies": { - "boolean": "^3.0.1", - "detect-node": "^2.0.4", - "globalthis": "^1.0.1", - "json-stringify-safe": "^5.0.1", - "semver-compare": "^1.0.0", - "sprintf-js": "^1.1.2" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/roarr/node_modules/sprintf-js": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz", - "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==", - "dev": true, - "optional": true - }, - "node_modules/run-async": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", - "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", - "dev": true, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, - "node_modules/rxjs": { - "version": "7.5.5", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.5.5.tgz", - "integrity": "sha512-sy+H0pQofO95VDmFLzyaw9xNJU4KTRSwQIGM6+iG3SypAtCiLDzpeG8sJrNCWn2Up9km+KhkvTdbkrdy+yzZdw==", - "dependencies": { - "tslib": "^2.1.0" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "node_modules/sanitize-filename": { - "version": "1.6.3", - "resolved": "https://registry.npmjs.org/sanitize-filename/-/sanitize-filename-1.6.3.tgz", - "integrity": "sha512-y/52Mcy7aw3gRm7IrcGDFx/bCk4AhRh2eI9luHOQM86nZsqwiRkkq2GekHXBBD+SmPidc8i2PqtYZl+pWJ8Oeg==", - "dev": true, - "dependencies": { - "truncate-utf8-bytes": "^1.0.0" - } - }, - "node_modules/sass": { - "version": "1.49.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.49.0.tgz", - "integrity": "sha512-TVwVdNDj6p6b4QymJtNtRS2YtLJ/CqZriGg0eIAbAKMlN8Xy6kbv33FsEZSF7FufFFM705SQviHjjThfaQ4VNw==", - "dev": true, - "dependencies": { - "chokidar": ">=3.0.0 <4.0.0", - "immutable": "^4.0.0", - "source-map-js": ">=0.6.2 <2.0.0" - }, - "bin": { - "sass": "sass.js" - }, - "engines": { - "node": ">=8.9.0" - } - }, - "node_modules/sass-loader": { - "version": "12.4.0", - "resolved": "https://registry.npmjs.org/sass-loader/-/sass-loader-12.4.0.tgz", - "integrity": "sha512-7xN+8khDIzym1oL9XyS6zP6Ges+Bo2B2xbPrjdMHEYyV3AQYhd/wXeru++3ODHF0zMjYmVadblSKrPrjEkL8mg==", - "dev": true, - "dependencies": { - "klona": "^2.0.4", - "neo-async": "^2.6.2" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "fibers": ">= 3.1.0", - "node-sass": "^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0", - "sass": "^1.3.0", - "webpack": "^5.0.0" - }, - "peerDependenciesMeta": { - "fibers": { - "optional": true - }, - "node-sass": { - "optional": true - }, - "sass": { - "optional": true - } - } - }, - "node_modules/sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==", - "dev": true - }, - "node_modules/schema-utils": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz", - "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==", - "dev": true, - "dependencies": { - "@types/json-schema": "^7.0.5", - "ajv": "^6.12.4", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 8.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/select-hose": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", - "integrity": "sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo=", - "dev": true - }, - "node_modules/selfsigned": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.0.0.tgz", - "integrity": "sha512-cUdFiCbKoa1mZ6osuJs2uDHrs0k0oprsKveFiiaBKCNq3SYyb5gs2HxhQyDNLCmL51ZZThqi4YNDpCK6GOP1iQ==", - "dev": true, - "dependencies": { - "node-forge": "^1.2.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver": { - "version": "7.3.5", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", - "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", - "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver-compare": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/semver-compare/-/semver-compare-1.0.0.tgz", - "integrity": "sha1-De4hahyUGrN+nvsXiPavxf9VN/w=", - "dev": true, - "optional": true - }, - "node_modules/semver-diff": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", - "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", - "dev": true, - "dependencies": { - "semver": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/semver-diff/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/send": { - "version": "0.17.2", - "resolved": "https://registry.npmjs.org/send/-/send-0.17.2.tgz", - "integrity": "sha512-UJYB6wFSJE3G00nEivR5rgWp8c2xXvJ3OPWPhmuteU0IKj8nKbG3DrjiOmLwpnHGYWAVwA69zmTm++YG0Hmwww==", - "dependencies": { - "debug": "2.6.9", - "depd": "~1.1.2", - "destroy": "~1.0.4", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "1.8.1", - "mime": "1.6.0", - "ms": "2.1.3", - "on-finished": "~2.3.0", - "range-parser": "~1.2.1", - "statuses": "~1.5.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/send/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/serialize-error": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/serialize-error/-/serialize-error-7.0.1.tgz", - "integrity": "sha512-8I8TjW5KMOKsZQTvoxjuSIa7foAwPWGOts+6o7sgjz41/qMD9VQHEDxi6PBvK2l0MXUmqZyNpUK+T2tQaaElvw==", - "dev": true, - "optional": true, - "dependencies": { - "type-fest": "^0.13.1" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/serialize-error/node_modules/type-fest": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.13.1.tgz", - "integrity": "sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==", - "dev": true, - "optional": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/serialize-javascript": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", - "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", - "dev": true, - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/serve-index": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=", - "dev": true, - "dependencies": { - "accepts": "~1.3.4", - "batch": "0.6.1", - "debug": "2.6.9", - "escape-html": "~1.0.3", - "http-errors": "~1.6.2", - "mime-types": "~2.1.17", - "parseurl": "~1.3.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/serve-index/node_modules/http-errors": { - "version": "1.6.3", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", - "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=", - "dev": true, - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.0", - "statuses": ">= 1.4.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", - "dev": true - }, - "node_modules/serve-index/node_modules/setprototypeof": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==", - "dev": true - }, - "node_modules/serve-static": { - "version": "1.14.2", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.2.tgz", - "integrity": "sha512-+TMNA9AFxUEGuC0z2mevogSnn9MXKb4fa7ngeRMJaaGv8vTwnIEkKi+QGvPt33HSnf8pRS+WGM0EbMtCJLKMBQ==", - "dependencies": { - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.17.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", - "dev": true - }, - "node_modules/setprototypeof": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" - }, - "node_modules/shallow-clone": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", - "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", - "dev": true, - "dependencies": { - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true - }, - "node_modules/slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/slice-ansi": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-3.0.0.tgz", - "integrity": "sha512-pSyv7bSTC7ig9Dcgbw9AuRNUb5k5V6oDudjZoMBSr13qpLBG7tB+zgCkARjq7xIUgdz5P1Qe8u+rSGdouOOIyQ==", - "dev": true, - "optional": true, - "dependencies": { - "ansi-styles": "^4.0.0", - "astral-regex": "^2.0.0", - "is-fullwidth-code-point": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/slice-ansi/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "optional": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/slice-ansi/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "optional": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/slice-ansi/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true, - "optional": true - }, - "node_modules/smart-buffer": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", - "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", - "dev": true, - "engines": { - "node": ">= 6.0.0", - "npm": ">= 3.0.0" - } - }, - "node_modules/socket.io": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.4.1.tgz", - "integrity": "sha512-s04vrBswdQBUmuWJuuNTmXUVJhP0cVky8bBDhdkf8y0Ptsu7fKU2LuLbts9g+pdmAdyMMn8F/9Mf1/wbtUN0fg==", - "dev": true, - "dependencies": { - "accepts": "~1.3.4", - "base64id": "~2.0.0", - "debug": "~4.3.2", - "engine.io": "~6.1.0", - "socket.io-adapter": "~2.3.3", - "socket.io-parser": "~4.0.4" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/socket.io-adapter": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.3.3.tgz", - "integrity": "sha512-Qd/iwn3VskrpNO60BeRyCyr8ZWw9CPZyitW4AQwmRZ8zCiyDiL+znRnWX6tDHXnWn1sJrM1+b6Mn6wEDJJ4aYQ==", - "dev": true - }, - "node_modules/socket.io-parser": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.0.4.tgz", - "integrity": "sha512-t+b0SS+IxG7Rxzda2EVvyBZbvFPBCjJoyHuE0P//7OAsN23GItzDRdWa6ALxZI/8R5ygK7jAR6t028/z+7295g==", - "dev": true, - "dependencies": { - "@types/component-emitter": "^1.2.10", - "component-emitter": "~1.3.0", - "debug": "~4.3.1" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/socket.io-parser/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/socket.io-parser/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/socket.io/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/socket.io/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/sockjs": { - "version": "0.3.24", - "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", - "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", - "dev": true, - "dependencies": { - "faye-websocket": "^0.11.3", - "uuid": "^8.3.2", - "websocket-driver": "^0.7.4" - } - }, - "node_modules/sockjs/node_modules/uuid": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", - "dev": true, - "bin": { - "uuid": "dist/bin/uuid" - } - }, - "node_modules/socks": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/socks/-/socks-2.6.2.tgz", - "integrity": "sha512-zDZhHhZRY9PxRruRMR7kMhnf3I8hDs4S3f9RecfnGxvcBHQcKcIH/oUcEWffsfl1XxdYlA7nnlGbbTvPz9D8gA==", - "dev": true, - "dependencies": { - "ip": "^1.1.5", - "smart-buffer": "^4.2.0" - }, - "engines": { - "node": ">= 10.13.0", - "npm": ">= 3.0.0" - } - }, - "node_modules/socks-proxy-agent": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-6.1.1.tgz", - "integrity": "sha512-t8J0kG3csjA4g6FTbsMOWws+7R7vuRC8aQ/wy3/1OWmsgwA68zs/+cExQ0koSitUDXqhufF/YJr9wtNMZHw5Ew==", - "dev": true, - "dependencies": { - "agent-base": "^6.0.2", - "debug": "^4.3.1", - "socks": "^2.6.1" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/socks-proxy-agent/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/socks-proxy-agent/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/source-map": { - "version": "0.7.3", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz", - "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==", - "dev": true, - "engines": { - "node": ">= 8" - } - }, - "node_modules/source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-loader": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/source-map-loader/-/source-map-loader-3.0.1.tgz", - "integrity": "sha512-Vp1UsfyPvgujKQzi4pyDiTOnE3E4H+yHvkVRN3c/9PJmQS4CQJExvcDvaX/D+RV+xQben9HJ56jMJS3CgUeWyA==", - "dev": true, - "dependencies": { - "abab": "^2.0.5", - "iconv-lite": "^0.6.3", - "source-map-js": "^1.0.1" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - } - }, - "node_modules/source-map-loader/node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dev": true, - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-resolve": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.6.0.tgz", - "integrity": "sha512-KXBr9d/fO/bWo97NXsPIAW1bFSBOuCnjbNTBMO7N59hsv5i9yzRDfcYwwt0l04+VqnKC+EwzvJZIP/qkuMgR/w==", - "deprecated": "See https://github.com/lydell/source-map-resolve#deprecated", - "dev": true, - "dependencies": { - "atob": "^2.1.2", - "decode-uri-component": "^0.2.0" - } - }, - "node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "dev": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/source-map-support/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/sourcemap-codec": { - "version": "1.4.8", - "resolved": "https://registry.npmjs.org/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz", - "integrity": "sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==", - "deprecated": "Please use @jridgewell/sourcemap-codec instead", - "dev": true - }, - "node_modules/spdx-correct": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz", - "integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==", - "dev": true, - "dependencies": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", - "dev": true - }, - "node_modules/spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "dev": true, - "dependencies": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/spdx-license-ids": { - "version": "3.0.11", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.11.tgz", - "integrity": "sha512-Ctl2BrFiM0X3MANYgj3CkygxhRmr9mi6xhejbdO960nF6EDJApTYpn0BQnDKlnNBULKiCN1n3w9EBkHK8ZWg+g==", - "dev": true - }, - "node_modules/spdy": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", - "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", - "dev": true, - "dependencies": { - "debug": "^4.1.0", - "handle-thing": "^2.0.0", - "http-deceiver": "^1.2.7", - "select-hose": "^2.0.0", - "spdy-transport": "^3.0.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/spdy-transport": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", - "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", - "dev": true, - "dependencies": { - "debug": "^4.1.0", - "detect-node": "^2.0.4", - "hpack.js": "^2.1.6", - "obuf": "^1.1.2", - "readable-stream": "^3.0.6", - "wbuf": "^1.7.3" - } - }, - "node_modules/spdy-transport/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/spdy-transport/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/spdy/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/spdy/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", - "dev": true - }, - "node_modules/sshpk": { - "version": "1.17.0", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.17.0.tgz", - "integrity": "sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==", - "dependencies": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - }, - "bin": { - "sshpk-conv": "bin/sshpk-conv", - "sshpk-sign": "bin/sshpk-sign", - "sshpk-verify": "bin/sshpk-verify" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ssri": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-8.0.1.tgz", - "integrity": "sha512-97qShzy1AiyxvPNIkLWoGua7xoQzzPjQ0HAH4B0rWKo7SZ6USuPcrUiAFrws0UH8RrbWmgq3LMTObhPIHbbBeQ==", - "dev": true, - "dependencies": { - "minipass": "^3.1.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/stat-mode": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/stat-mode/-/stat-mode-1.0.0.tgz", - "integrity": "sha512-jH9EhtKIjuXZ2cWxmXS8ZP80XyC3iasQxMDV8jzhNJpfDb7VbQLVW4Wvsxz9QZvzV+G4YoSfBUVKDOyxLzi/sg==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, - "node_modules/statuses": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/streamroller": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/streamroller/-/streamroller-3.0.6.tgz", - "integrity": "sha512-Qz32plKq/MZywYyhEatxyYc8vs994Gz0Hu2MSYXXLD233UyPeIeRBZARIIGwFer4Mdb8r3Y2UqKkgyDghM6QCg==", - "dev": true, - "dependencies": { - "date-format": "^4.0.6", - "debug": "^4.3.4", - "fs-extra": "^10.0.1" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/streamroller/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/streamroller/node_modules/fs-extra": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.1.tgz", - "integrity": "sha512-NbdoVMZso2Lsrn/QwLXOy6rm0ufY2zEOKCDzJR/0kBsb0E6qed0P3iYK+Ath3BfvXEeu4JhEtXLgILx5psUfag==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/streamroller/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/streamroller/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/streamroller/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "dev": true, - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/strip-final-newline": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", - "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-outer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/strip-outer/-/strip-outer-1.0.1.tgz", - "integrity": "sha512-k55yxKHwaXnpYGsOzg4Vl8+tDrWylxDEpknGjhTiZB8dFRU5rTo9CAzeycivxV3s+zlTKwrs6WxMxR95n26kwg==", - "dev": true, - "dependencies": { - "escape-string-regexp": "^1.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/stylus": { - "version": "0.56.0", - "resolved": "https://registry.npmjs.org/stylus/-/stylus-0.56.0.tgz", - "integrity": "sha512-Ev3fOb4bUElwWu4F9P9WjnnaSpc8XB9OFHSFZSKMFL1CE1oM+oFXWEgAqPmmZIyhBihuqIQlFsVTypiiS9RxeA==", - "dev": true, - "dependencies": { - "css": "^3.0.0", - "debug": "^4.3.2", - "glob": "^7.1.6", - "safer-buffer": "^2.1.2", - "sax": "~1.2.4", - "source-map": "^0.7.3" - }, - "bin": { - "stylus": "bin/stylus" - }, - "engines": { - "node": "*" - } - }, - "node_modules/stylus-loader": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/stylus-loader/-/stylus-loader-6.2.0.tgz", - "integrity": "sha512-5dsDc7qVQGRoc6pvCL20eYgRUxepZ9FpeK28XhdXaIPP6kXr6nI1zAAKFQgP5OBkOfKaURp4WUpJzspg1f01Gg==", - "dev": true, - "dependencies": { - "fast-glob": "^3.2.7", - "klona": "^2.0.4", - "normalize-path": "^3.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "stylus": ">=0.52.4", - "webpack": "^5.0.0" - } - }, - "node_modules/stylus/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/stylus/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/sumchecker": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/sumchecker/-/sumchecker-3.0.1.tgz", - "integrity": "sha512-MvjXzkz/BOfyVDkG0oFOtBxHX2u3gKbMHIF/dXblZsgD3BWOFLmHovIpZY7BykJdAjcqRCBi1WYBNdEC9yI7vg==", - "dev": true, - "dependencies": { - "debug": "^4.1.0" - }, - "engines": { - "node": ">= 8.0" - } - }, - "node_modules/sumchecker/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/sumchecker/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "dev": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/symbol-observable": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/symbol-observable/-/symbol-observable-4.0.0.tgz", - "integrity": "sha512-b19dMThMV4HVFynSAM1++gBHAbk2Tc/osgLIBZMKsyqh34jb2e8Os7T6ZW/Bt3pJFdBTd2JwAnAAEQV7rSNvcQ==", - "dev": true, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/tapable": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", - "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/tar": { - "version": "6.1.11", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.11.tgz", - "integrity": "sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA==", - "dev": true, - "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^3.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/temp-file": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/temp-file/-/temp-file-3.4.0.tgz", - "integrity": "sha512-C5tjlC/HCtVUOi3KWVokd4vHVViOmGjtLwIh4MuzPo/nMYTV/p1urt3RnMz2IWXDdKEGJH3k5+KPxtqRsUYGtg==", - "dev": true, - "dependencies": { - "async-exit-hook": "^2.0.1", - "fs-extra": "^10.0.0" - } - }, - "node_modules/temp-file/node_modules/fs-extra": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.1.tgz", - "integrity": "sha512-NbdoVMZso2Lsrn/QwLXOy6rm0ufY2zEOKCDzJR/0kBsb0E6qed0P3iYK+Ath3BfvXEeu4JhEtXLgILx5psUfag==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/temp-file/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/temp-file/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/terser": { - "version": "5.11.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.11.0.tgz", - "integrity": "sha512-uCA9DLanzzWSsN1UirKwylhhRz3aKPInlfmpGfw8VN6jHsAtu8HJtIpeeHHK23rxnE/cDc+yvmq5wqkIC6Kn0A==", - "dev": true, - "dependencies": { - "acorn": "^8.5.0", - "commander": "^2.20.0", - "source-map": "~0.7.2", - "source-map-support": "~0.5.20" - }, - "bin": { - "terser": "bin/terser" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/terser-webpack-plugin": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.1.tgz", - "integrity": "sha512-GvlZdT6wPQKbDNW/GDQzZFg/j4vKU96yl2q6mcUkzKOgW4gwf1Z8cZToUCrz31XHlPWH8MVb1r2tFtdDtTGJ7g==", - "dev": true, - "dependencies": { - "jest-worker": "^27.4.5", - "schema-utils": "^3.1.1", - "serialize-javascript": "^6.0.0", - "source-map": "^0.6.1", - "terser": "^5.7.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.1.0" - }, - "peerDependenciesMeta": { - "@swc/core": { - "optional": true - }, - "esbuild": { - "optional": true - }, - "uglify-js": { - "optional": true - } - } - }, - "node_modules/terser-webpack-plugin/node_modules/schema-utils": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.1.tgz", - "integrity": "sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==", - "dev": true, - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/terser-webpack-plugin/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/test-exclude": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", - "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", - "dev": true, - "dependencies": { - "@istanbuljs/schema": "^0.1.2", - "glob": "^7.1.4", - "minimatch": "^3.0.4" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", - "dev": true - }, - "node_modules/through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", - "dev": true - }, - "node_modules/thunky": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", - "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==", - "dev": true - }, - "node_modules/tmp": { - "version": "0.0.33", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "dev": true, - "dependencies": { - "os-tmpdir": "~1.0.2" - }, - "engines": { - "node": ">=0.6.0" - } - }, - "node_modules/tmp-promise": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/tmp-promise/-/tmp-promise-3.0.3.tgz", - "integrity": "sha512-RwM7MoPojPxsOBYnyd2hy0bxtIlVrihNs9pj5SUvY8Zz1sQcQG2tG1hSr8PDxfgEB8RNKDhqbIlroIarSNDNsQ==", - "dev": true, - "dependencies": { - "tmp": "^0.2.0" - } - }, - "node_modules/tmp-promise/node_modules/tmp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", - "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", - "dev": true, - "dependencies": { - "rimraf": "^3.0.0" - }, - "engines": { - "node": ">=8.17.0" - } - }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/to-readable-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", - "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/toidentifier": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", - "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/touch": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/touch/-/touch-3.1.0.tgz", - "integrity": "sha512-WBx8Uy5TLtOSRtIq+M03/sKDrXCLHxwDcquSP2c43Le03/9serjQBIztjRz6FkJez9D/hleyAXTBGLwwZUw9lA==", - "dev": true, - "dependencies": { - "nopt": "~1.0.10" - }, - "bin": { - "nodetouch": "bin/nodetouch.js" - } - }, - "node_modules/touch/node_modules/nopt": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz", - "integrity": "sha1-bd0hvSoxQXuScn3Vhfim83YI6+4=", - "dev": true, - "dependencies": { - "abbrev": "1" - }, - "bin": { - "nopt": "bin/nopt.js" - }, - "engines": { - "node": "*" - } - }, - "node_modules/tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "dependencies": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/tree-kill": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz", - "integrity": "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==", - "dev": true, - "bin": { - "tree-kill": "cli.js" - } - }, - "node_modules/trim-repeated": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/trim-repeated/-/trim-repeated-1.0.0.tgz", - "integrity": "sha1-42RqLqTokTEr9+rObPsFOAvAHCE=", - "dev": true, - "dependencies": { - "escape-string-regexp": "^1.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/truncate-utf8-bytes": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/truncate-utf8-bytes/-/truncate-utf8-bytes-1.0.2.tgz", - "integrity": "sha1-QFkjkJWS1W94pYGENLC3hInKXys=", - "dev": true, - "dependencies": { - "utf8-byte-length": "^1.0.1" - } - }, - "node_modules/tslib": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz", - "integrity": "sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw==" - }, - "node_modules/tunnel": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz", - "integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==", - "dev": true, - "optional": true, - "engines": { - "node": ">=0.6.11 <=0.7.0 || >=0.7.3" - } - }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, - "node_modules/tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" - }, - "node_modules/type-fest": { - "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "dependencies": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/typed-assert": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/typed-assert/-/typed-assert-1.0.9.tgz", - "integrity": "sha512-KNNZtayBCtmnNmbo5mG47p1XsCyrx6iVqomjcZnec/1Y5GGARaxPs6r49RnSPeUP3YjNYiU9sQHAtY4BBvnZwg==", - "dev": true - }, - "node_modules/typedarray": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", - "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", - "dev": true - }, - "node_modules/typedarray-to-buffer": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", - "dev": true, - "dependencies": { - "is-typedarray": "^1.0.0" - } - }, - "node_modules/typescript": { - "version": "4.5.5", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.5.tgz", - "integrity": "sha512-TCTIul70LyWe6IJWT8QSYeA54WQe8EjQFU4wY52Fasj5UKx88LNYKCgBEHcOMOrFF1rKGbD8v/xcNWVUq9SymA==", - "dev": true, - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=4.2.0" - } - }, - "node_modules/ua-parser-js": { - "version": "0.7.31", - "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.31.tgz", - "integrity": "sha512-qLK/Xe9E2uzmYI3qLeOmI0tEOt+TBBQyUIAh4aAgU05FVYzeZrKUdkAZfBNVGRaHVgV0TDkdEngJSw/SyQchkQ==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/ua-parser-js" - }, - { - "type": "paypal", - "url": "https://paypal.me/faisalman" - } - ], - "engines": { - "node": "*" - } - }, - "node_modules/undefsafe": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/undefsafe/-/undefsafe-2.0.5.tgz", - "integrity": "sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA==", - "dev": true - }, - "node_modules/unicode-canonical-property-names-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", - "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", - "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", - "dev": true, - "dependencies": { - "unicode-canonical-property-names-ecmascript": "^2.0.0", - "unicode-property-aliases-ecmascript": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-value-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.0.0.tgz", - "integrity": "sha512-7Yhkc0Ye+t4PNYzOGKedDhXbYIBe1XEQYQxOPyhcXNMJ0WCABqqj6ckydd6pWRZTHV4GuCPKdBAUiMc60tsKVw==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-property-aliases-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.0.0.tgz", - "integrity": "sha512-5Zfuy9q/DFr4tfO7ZPeVXb1aPoeQSdeFMLpYuFebehDAhbuevLs5yxSZmIFN1tP5F9Wl4IpJrYojg85/zgyZHQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/unique-filename": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", - "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", - "dev": true, - "dependencies": { - "unique-slug": "^2.0.0" - } - }, - "node_modules/unique-slug": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", - "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", - "dev": true, - "dependencies": { - "imurmurhash": "^0.1.4" - } - }, - "node_modules/unique-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", - "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", - "dev": true, - "dependencies": { - "crypto-random-string": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", - "dev": true, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/update-notifier": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-5.1.0.tgz", - "integrity": "sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw==", - "dev": true, - "dependencies": { - "boxen": "^5.0.0", - "chalk": "^4.1.0", - "configstore": "^5.0.1", - "has-yarn": "^2.1.0", - "import-lazy": "^2.1.0", - "is-ci": "^2.0.0", - "is-installed-globally": "^0.4.0", - "is-npm": "^5.0.0", - "is-yarn-global": "^0.3.0", - "latest-version": "^5.1.0", - "pupa": "^2.1.1", - "semver": "^7.3.4", - "semver-diff": "^3.1.1", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/yeoman/update-notifier?sponsor=1" - } - }, - "node_modules/update-notifier/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/update-notifier/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/update-notifier/node_modules/ci-info": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", - "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==", - "dev": true - }, - "node_modules/update-notifier/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/update-notifier/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/update-notifier/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/is-ci": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", - "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", - "dev": true, - "dependencies": { - "ci-info": "^2.0.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/update-notifier/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", - "dev": true, - "dependencies": { - "prepend-http": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/utf8-byte-length": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/utf8-byte-length/-/utf8-byte-length-1.0.4.tgz", - "integrity": "sha1-9F8VDExm7uloGGUFq5P8u4rWv2E=", - "dev": true - }, - "node_modules/util": { - "version": "0.10.4", - "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", - "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", - "dependencies": { - "inherits": "2.0.3" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", - "dev": true - }, - "node_modules/util/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "node_modules/utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", - "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", - "bin": { - "uuid": "bin/uuid" - } - }, - "node_modules/validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "dev": true, - "dependencies": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "node_modules/validate-npm-package-name": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz", - "integrity": "sha1-X6kS2B630MdK/BQN5zF/DKffQ34=", - "dev": true, - "dependencies": { - "builtins": "^1.0.3" - } - }, - "node_modules/vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "node_modules/void-elements": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-2.0.1.tgz", - "integrity": "sha1-wGavtYK7HLQSjWDqkjkulNXp2+w=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/watchpack": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.3.1.tgz", - "integrity": "sha512-x0t0JuydIo8qCNctdDrn1OzH/qDzk2+rdCOC3YzumZ42fiMqmQ7T3xQurykYMhYfHaPHTp4ZxAx2NfUo1K6QaA==", - "dev": true, - "dependencies": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/wbuf": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", - "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", - "dev": true, - "dependencies": { - "minimalistic-assert": "^1.0.0" - } - }, - "node_modules/wcwidth": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", - "integrity": "sha1-8LDc+RW8X/FSivrbLA4XtTLaL+g=", - "dev": true, - "dependencies": { - "defaults": "^1.0.3" - } - }, - "node_modules/webpack": { - "version": "5.67.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.67.0.tgz", - "integrity": "sha512-LjFbfMh89xBDpUMgA1W9Ur6Rn/gnr2Cq1jjHFPo4v6a79/ypznSYbAyPgGhwsxBtMIaEmDD1oJoA7BEYw/Fbrw==", - "dev": true, - "dependencies": { - "@types/eslint-scope": "^3.7.0", - "@types/estree": "^0.0.50", - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/wasm-edit": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1", - "acorn": "^8.4.1", - "acorn-import-assertions": "^1.7.6", - "browserslist": "^4.14.5", - "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.8.3", - "es-module-lexer": "^0.9.0", - "eslint-scope": "5.1.1", - "events": "^3.2.0", - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.2.9", - "json-parse-better-errors": "^1.0.2", - "loader-runner": "^4.2.0", - "mime-types": "^2.1.27", - "neo-async": "^2.6.2", - "schema-utils": "^3.1.0", - "tapable": "^2.1.1", - "terser-webpack-plugin": "^5.1.3", - "watchpack": "^2.3.1", - "webpack-sources": "^3.2.3" - }, - "bin": { - "webpack": "bin/webpack.js" - }, - "engines": { - "node": ">=10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependenciesMeta": { - "webpack-cli": { - "optional": true - } - } - }, - "node_modules/webpack-dev-middleware": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.0.tgz", - "integrity": "sha512-MouJz+rXAm9B1OTOYaJnn6rtD/lWZPy2ufQCH3BPs8Rloh/Du6Jze4p7AeLYHkVi0giJnYLaSGDC7S+GM9arhg==", - "dev": true, - "dependencies": { - "colorette": "^2.0.10", - "memfs": "^3.2.2", - "mime-types": "^2.1.31", - "range-parser": "^1.2.1", - "schema-utils": "^4.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.0.0 || ^5.0.0" - } - }, - "node_modules/webpack-dev-middleware/node_modules/ajv": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.10.0.tgz", - "integrity": "sha512-bzqAEZOjkrUMl2afH8dknrq5KEk2SrwdBROR+vH1EKVQTqaUbJVPdc/gEdggTMM0Se+s+Ja4ju4TlNcStKl2Hw==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/webpack-dev-middleware/node_modules/ajv-keywords": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/webpack-dev-middleware/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true - }, - "node_modules/webpack-dev-middleware/node_modules/schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", - "dev": true, - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/webpack-dev-server": { - "version": "4.7.3", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.7.3.tgz", - "integrity": "sha512-mlxq2AsIw2ag016nixkzUkdyOE8ST2GTy34uKSABp1c4nhjZvH90D5ZRR+UOLSsG4Z3TFahAi72a3ymRtfRm+Q==", - "dev": true, - "dependencies": { - "@types/bonjour": "^3.5.9", - "@types/connect-history-api-fallback": "^1.3.5", - "@types/serve-index": "^1.9.1", - "@types/sockjs": "^0.3.33", - "@types/ws": "^8.2.2", - "ansi-html-community": "^0.0.8", - "bonjour": "^3.5.0", - "chokidar": "^3.5.2", - "colorette": "^2.0.10", - "compression": "^1.7.4", - "connect-history-api-fallback": "^1.6.0", - "default-gateway": "^6.0.3", - "del": "^6.0.0", - "express": "^4.17.1", - "graceful-fs": "^4.2.6", - "html-entities": "^2.3.2", - "http-proxy-middleware": "^2.0.0", - "ipaddr.js": "^2.0.1", - "open": "^8.0.9", - "p-retry": "^4.5.0", - "portfinder": "^1.0.28", - "schema-utils": "^4.0.0", - "selfsigned": "^2.0.0", - "serve-index": "^1.9.1", - "sockjs": "^0.3.21", - "spdy": "^4.0.2", - "strip-ansi": "^7.0.0", - "webpack-dev-middleware": "^5.3.0", - "ws": "^8.1.0" - }, - "bin": { - "webpack-dev-server": "bin/webpack-dev-server.js" - }, - "engines": { - "node": ">= 12.13.0" - }, - "peerDependencies": { - "webpack": "^4.37.0 || ^5.0.0" - }, - "peerDependenciesMeta": { - "webpack-cli": { - "optional": true - } - } - }, - "node_modules/webpack-dev-server/node_modules/ajv": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.10.0.tgz", - "integrity": "sha512-bzqAEZOjkrUMl2afH8dknrq5KEk2SrwdBROR+vH1EKVQTqaUbJVPdc/gEdggTMM0Se+s+Ja4ju4TlNcStKl2Hw==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/webpack-dev-server/node_modules/ajv-keywords": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/webpack-dev-server/node_modules/ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/webpack-dev-server/node_modules/ipaddr.js": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.0.1.tgz", - "integrity": "sha512-1qTgH9NG+IIJ4yfKs2e6Pp1bZg8wbDbKHT21HrLIeYBTRLgMYKnMTPAuI3Lcs61nfx5h1xlXnbJtH1kX5/d/ng==", - "dev": true, - "engines": { - "node": ">= 10" - } - }, - "node_modules/webpack-dev-server/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true - }, - "node_modules/webpack-dev-server/node_modules/schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", - "dev": true, - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/webpack-dev-server/node_modules/strip-ansi": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.0.1.tgz", - "integrity": "sha512-cXNxvT8dFNRVfhVME3JAe98mkXDYN2O1l7jmcwMnOslDeESg1rF/OZMtK0nRAhiari1unG5cD4jG3rapUAkLbw==", - "dev": true, - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/webpack-merge": { - "version": "5.8.0", - "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.8.0.tgz", - "integrity": "sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q==", - "dev": true, - "dependencies": { - "clone-deep": "^4.0.1", - "wildcard": "^2.0.0" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/webpack-sources": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", - "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", - "dev": true, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/webpack-subresource-integrity": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/webpack-subresource-integrity/-/webpack-subresource-integrity-5.1.0.tgz", - "integrity": "sha512-sacXoX+xd8r4WKsy9MvH/q/vBtEHr86cpImXwyg74pFIpERKt6FmB8cXpeuh0ZLgclOlHI4Wcll7+R5L02xk9Q==", - "dev": true, - "dependencies": { - "typed-assert": "^1.0.8" - }, - "engines": { - "node": ">= 12" - }, - "peerDependencies": { - "html-webpack-plugin": ">= 5.0.0-beta.1 < 6", - "webpack": "^5.12.0" - }, - "peerDependenciesMeta": { - "html-webpack-plugin": { - "optional": true - } - } - }, - "node_modules/webpack/node_modules/schema-utils": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.1.tgz", - "integrity": "sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==", - "dev": true, - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/websocket-driver": { - "version": "0.7.4", - "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", - "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", - "dev": true, - "dependencies": { - "http-parser-js": ">=0.5.1", - "safe-buffer": ">=5.1.0", - "websocket-extensions": ">=0.1.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/websocket-extensions": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", - "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/wide-align": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", - "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", - "dev": true, - "dependencies": { - "string-width": "^1.0.2 || 2 || 3 || 4" - } - }, - "node_modules/widest-line": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", - "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", - "dev": true, - "dependencies": { - "string-width": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wildcard": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.0.tgz", - "integrity": "sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw==", - "dev": true - }, - "node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/wrap-ansi/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "dev": true - }, - "node_modules/write-file-atomic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", - "dev": true, - "dependencies": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "node_modules/ws": { - "version": "8.5.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.5.0.tgz", - "integrity": "sha512-BWX0SWVgLPzYwF8lTzEy1egjhS4S4OEAHfsO8o65WOVsrnSRGaSiUaa9e0ggGlkMTtBlmOpEXiie9RUcBO86qg==", - "dev": true, - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/xdg-basedir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", - "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/xmlbuilder": { - "version": "9.0.7", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", - "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=", - "dev": true, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - }, - "node_modules/yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, - "node_modules/yargs": { - "version": "17.4.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.4.0.tgz", - "integrity": "sha512-WJudfrk81yWFSOkZYpAZx4Nt7V4xp7S/uJkX0CnxovMCt1wCE8LNftPpNuF9X/u9gN5nsD7ycYtRcDf2pL3UiA==", - "dev": true, - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs-parser": { - "version": "21.0.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.0.1.tgz", - "integrity": "sha512-9BK1jFpLzJROCI5TzwZL/TU4gqjK5xiHV/RfWLOahrjAko/e4DJkRDZQXfvqAsiZzzYhgAzbgz6lg48jcm4GLg==", - "dev": true, - "engines": { - "node": ">=12" - } - }, - "node_modules/yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha1-x+sXyT4RLLEIb6bY5R+wZnt5pfk=", - "dev": true, - "dependencies": { - "buffer-crc32": "~0.2.3", - "fd-slicer": "~1.1.0" - } - }, - "node_modules/zone.js": { - "version": "0.11.5", - "resolved": "https://registry.npmjs.org/zone.js/-/zone.js-0.11.5.tgz", - "integrity": "sha512-D1/7VxEuQ7xk6z/kAROe4SUbd9CzxY4zOwVGnGHerd/SgLIVU5f4esDzQUsOCeArn933BZfWMKydH7l7dPEp0g==", - "dependencies": { - "tslib": "^2.3.0" - } - } - } -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/package.json b/ai-solutions/windows/angular-app-nlp/Electron app UI/package.json deleted file mode 100644 index 7d6aad41..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/package.json +++ /dev/null @@ -1,101 +0,0 @@ -{ - "name": "QA_App", - "author": "", - "homepage": "./", - "version": "1.0.0", - "description": "", - "main": "main.js", - "build": { - "appId": "com.electron.QA_App", - "productName": "QA_App", - "files": [ - "**/*", - "dist/**/*" - ], - "directories": { - "output": "release", - "buildResources": "dist" - }, - "asar": true, - "dmg": { - "contents": [ - { - "x": 110, - "y": 150 - }, - { - "x": 240, - "y": 150, - "type": "link", - "path": "/Applications" - } - ] - }, - "linux": { - "target": [ - "AppImage", - "deb" - ], - "icon": "" - }, - "win": { - "target": "nsis", - "icon": "" - }, - "nsis": { - "oneClick": false, - "perMachine": false, - "installerIcon": "", - "uninstallerIcon": "", - "createDesktopShortcut": "always" - }, - "afterPack": "./removeLocales.js" - }, - "postinstall": "electron-builder install-app-deps", - "scripts": { - "ng": "ng", - "start": "ng serve --proxy-config proxy.conf.json", - "dist": "electron-builder", - "electron": "electron .", - "build": "ng build --prod" - }, - "private": true, - "dependencies": { - "@angular/animations": "~13.2.0", - "@angular/common": "~13.2.0", - "@angular/compiler": "~13.2.0", - "@angular/core": "~13.2.0", - "@angular/forms": "~13.2.0", - "@angular/platform-browser": "~13.2.0", - "@angular/platform-browser-dynamic": "~13.2.0", - "@angular/router": "~13.2.0", - "body-parser": "^1.19.0", - "dotenv": "^10.0.0", - "express": "^4.17.1", - "@angular/material": "^12.2.8", - "@angular/cdk": "^12.2.7", - "path": "^0.12.7", - "request": "^2.88.2", - "rxjs": "~7.5.0", - "tslib": "^2.3.0", - "zone.js": "~0.11.4" - }, - "devDependencies": { - "@angular-devkit/build-angular": "~13.2.4", - "@angular/cli": "~13.2.4", - "@angular/compiler-cli": "~13.2.0", - "@types/jasmine": "~3.10.0", - "@types/node": "^12.11.1", - "jasmine-core": "~4.0.0", - "karma": "~6.3.0", - "karma-chrome-launcher": "~3.1.0", - "karma-coverage": "~2.1.0", - "karma-jasmine": "~4.0.0", - "karma-jasmine-html-reporter": "~1.7.0", - "typescript": "~4.5.2", - "electron": "^17.0.1", - "electron-builder": "^22.11.7", - "electron-packager": "^15.4.0", - "nodemon": "^2.0.13" - } -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/proxy.conf.json b/ai-solutions/windows/angular-app-nlp/Electron app UI/proxy.conf.json deleted file mode 100644 index 192e7e4f..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/proxy.conf.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "/api": { - "target": "http://localhost:3000", - "secure": false - } -} \ No newline at end of file diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/removeLocales.js b/ai-solutions/windows/angular-app-nlp/Electron app UI/removeLocales.js deleted file mode 100644 index 1e8442ed..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/removeLocales.js +++ /dev/null @@ -1,38 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -/******************************************************************************* -# -# Copyright (c) 2021 Qualcomm Technologies, Inc. -# All Rights Reserved. -# Confidential and Proprietary - Qualcomm Technologies, Inc. -# -# All data and information contained in or disclosed by this document are -# confidential and proprietary information of Qualcomm Technologies, Inc., and -# all rights therein are expressly reserved. By accepting this material, the -# recipient agrees that this material and the information contained therein -# are held in confidence and in trust and will not be used, copied, reproduced -# in whole or in part, nor its contents revealed in any manner to others -# without the express written permission of Qualcomm Technologies, Inc. -# -*******************************************************************************/ - -exports.default = async function(context) { - var fs = require('fs'); - var localeDir = context.appOutDir+'/locales/'; - fs.readdir(localeDir, function(err, files){ - if(!(files && files.length)) return; - for (var i = 0, len = files.length; i < len; i++) { - var match = files[i].match(/en-US\.pak/); - if(match === null){ - fs.unlinkSync(localeDir+files[i]); - } - } - }); -} \ No newline at end of file diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/server.js b/ai-solutions/windows/angular-app-nlp/Electron app UI/server.js deleted file mode 100644 index 55dbedd8..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/server.js +++ /dev/null @@ -1,90 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -const express = require('express'); -const app = express(); -const path = require('path'); -const bodyParser = require('body-parser'); -const request = require('request'); -require('dotenv').config({path:path.join(__dirname, "/.env")}); -const fs = require('fs'); -const { execFile ,exec} = require('child_process'); - -app.use(bodyParser.urlencoded({ extended: false })); -app.use(bodyParser.json()); - -const topicsPath = fs.existsSync(path.join(__dirname, "/dist/assets/")) ? path.join(__dirname, "/dist/assets/") : path.join(__dirname, "/src/assets/") - -const PORT = process.env.PORT || 3000; - -app.use(express.static(path.join(__dirname, '/dist'))); - - -app.get('/api/getTopics', (req, res) => { - try{ - fs.readFile(topicsPath+'QA_List.json', (err, data) => { - if(err) { - console.log(err) - return res.status(400).json(err) - } - let jsonData = JSON.parse(data); - res.status(200).json(jsonData); - }); - } - catch(err){ - res.status(400).json(err); - } -}) - -app.get('/api/serverDetails', (req, res) => { - let info; - if (req.query.type === 'ip') { - info = process.env.IP - } - if (req.query.type === 'port') { - info = process.env.AGENT_PORT - } - let resp = {status:"successfull",info:info} - res.status(200).json(resp); -}) - -app.post('/api/fetchPredictionResults', (req, res) => { - const options = { - body: req.body.input, - json: true, - url: req.body.urlDetails.url, - method: req.body.urlDetails.method, - }; - request(options, function(err, response, body) { - if (err) { - console.log(err) - res.status(400).json(err); - return - } - let inference = body - let resp = {} - resp['question'] = req.body.input.question; - resp['answer'] = inference.answer; - resp['time'] = new Date(); - resp['executionTime'] = inference.exec_time; - resp['error'] = inference.error; - res.status(200).json(resp); - }); -}) - -app.get('/*',(req, res) => { - res.sendFile(path.join(__dirname, 'dist/index.html')); -}); - - -app.listen(PORT,() => { - console.log(`Running on port ${PORT}`) -}) - -module.exports = app; \ No newline at end of file diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/Thumbs.db b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/Thumbs.db deleted file mode 100644 index 7da08e4a..00000000 Binary files a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/Thumbs.db and /dev/null differ diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app-routing.module.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app-routing.module.ts deleted file mode 100644 index a257954e..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app-routing.module.ts +++ /dev/null @@ -1,27 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { NgModule } from '@angular/core'; -import { RouterModule, Routes } from '@angular/router'; -import { PredictionComponent } from './prediction/prediction.component'; -import { ResultsComponent } from './results/results.component'; -import { TopicsComponent } from './topics/topics.component'; - -const routes: Routes = [ - { path: 'prediction', component: PredictionComponent }, - { path: 'results', component: ResultsComponent }, - { path: 'topics', component: TopicsComponent }, -]; - -@NgModule({ - imports: [RouterModule.forRoot(routes)], - exports: [RouterModule] -}) -export class AppRoutingModule { } diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.css b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.css deleted file mode 100644 index 5681c580..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.css +++ /dev/null @@ -1,75 +0,0 @@ -.example-spacer { - flex: 1 1 auto; -} -.toolbar-items { - margin-left: 1%; - font-size: medium; - font-family: 'Segoe UI Italic 400', sans-serif; -} -.toolbar-title { - margin-right: 0.5%; - font-size: x-large; - font-family: 'Segoe UI Italic 400', sans-serif; -} -.drawer-container { - width: 100%; - height: 100%; -} -/*.drawer-sidenav-content { - display: flex; - height: 100%; - align-items: center; - justify-content: center; -}*/ - -.drawer-heading { - margin-left: 6%; -} - -.drawer-subheading { - font-size: 25px; - font-family: 'Segoe UI Italic 400', sans-serif; -} - -.drawer-headindicon { - pointer-events: none; -} -.drawer-sidenav { - padding: 10px; - width: 15%; -} -.drawer-icon { - float: left; - margin-top: 15%; - margin-right: 0.35em; - font-size: 25px; -} -.drawer-icon1 { - float: left; - margin-top: 15%; - margin-right: 0.35em; - font-size: 30px; -} -.drawer-item { - font-size: 20px; - font-family: 'Segoe UI Italic 400', sans-serif; -} -.open-file-dialog { - display: none; -} -.tool-bar{ - color:white; - height:5%; -} -.netron-false{ - top: 10%; - left: 15%; - position: absolute; - display: none; -} -.netron-true{ - top: 10%; - left: 15%; - position: absolute; - display: block; -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.html b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.html deleted file mode 100644 index a0c80740..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.html +++ /dev/null @@ -1,5 +0,0 @@ - - Q&A Application - - - \ No newline at end of file diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.spec.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.spec.ts deleted file mode 100644 index 77241a69..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.spec.ts +++ /dev/null @@ -1,45 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { TestBed } from '@angular/core/testing'; -import { RouterTestingModule } from '@angular/router/testing'; -import { AppComponent } from './app.component'; - -describe('AppComponent', () => { - beforeEach(async () => { - await TestBed.configureTestingModule({ - imports: [ - RouterTestingModule - ], - declarations: [ - AppComponent - ], - }).compileComponents(); - }); - - it('should create the app', () => { - const fixture = TestBed.createComponent(AppComponent); - const app = fixture.componentInstance; - expect(app).toBeTruthy(); - }); - - it(`should have as title 'testApp'`, () => { - const fixture = TestBed.createComponent(AppComponent); - const app = fixture.componentInstance; - expect(app.title).toEqual('testApp'); - }); - - it('should render title', () => { - const fixture = TestBed.createComponent(AppComponent); - fixture.detectChanges(); - const compiled = fixture.nativeElement as HTMLElement; - expect(compiled.querySelector('.content span')?.textContent).toContain('testApp app is running!'); - }); -}); diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.ts deleted file mode 100644 index c26064f4..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.component.ts +++ /dev/null @@ -1,30 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { Component,OnInit } from '@angular/core'; -import { Router } from '@angular/router'; - -@Component({ - selector: 'app-root', - templateUrl: './app.component.html', - styleUrls: ['./app.component.css'] -}) -export class AppComponent implements OnInit{ - title = 'QNN UI'; - - constructor(private router: Router) { - } - - ngOnInit(): void { - this.router.navigateByUrl('/topics'); - } -} - - diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.module.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.module.ts deleted file mode 100644 index b89f9dd1..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/app.module.ts +++ /dev/null @@ -1,69 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { NgModule } from '@angular/core'; -import { BrowserModule } from '@angular/platform-browser'; -import { BrowserAnimationsModule } from '@angular/platform-browser/animations'; -import { HttpClientModule } from '@angular/common/http'; - -import { AppRoutingModule } from './app-routing.module'; -import { AppComponent } from './app.component'; - -import {MatToolbarModule} from '@angular/material/toolbar'; -import {MatIconModule} from '@angular/material/icon'; -import {MatButtonModule} from '@angular/material/button'; -import {MatCardModule} from '@angular/material/card'; -import {MatInputModule} from '@angular/material/input'; -import {MatFormFieldModule} from '@angular/material/form-field'; -import {FormsModule, ReactiveFormsModule } from '@angular/forms'; -import {MatProgressSpinnerModule} from '@angular/material/progress-spinner'; -import {MatProgressBarModule} from '@angular/material/progress-bar'; -import { PredictionComponent } from './prediction/prediction.component'; -import { ResultsComponent } from './results/results.component'; -import {MatTableModule} from '@angular/material/table'; -import { TopicsComponent } from './topics/topics.component'; -import {MatListModule} from '@angular/material/list'; -import {MatCheckboxModule} from '@angular/material/checkbox'; -import {MatChipsModule} from '@angular/material/chips'; -import {MatSelectModule} from '@angular/material/select'; - - -@NgModule({ - declarations: [ - AppComponent, - PredictionComponent, - ResultsComponent, - TopicsComponent - ], - imports: [ - BrowserAnimationsModule, - BrowserModule, - AppRoutingModule, - MatToolbarModule, - MatIconModule, - MatButtonModule, - MatCardModule, - MatInputModule, - MatFormFieldModule, - FormsModule, - ReactiveFormsModule, - MatProgressSpinnerModule, - MatProgressBarModule, - MatTableModule, - HttpClientModule, - MatListModule, - MatCheckboxModule, - MatChipsModule, - MatSelectModule - ], - providers: [], - bootstrap: [AppComponent] -}) -export class AppModule { } diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/backend.service.spec.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/backend.service.spec.ts deleted file mode 100644 index e99261c5..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/backend.service.spec.ts +++ /dev/null @@ -1,26 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { TestBed } from '@angular/core/testing'; - -import { BackendService } from './backend.service'; - -describe('BackendService', () => { - let service: BackendService; - - beforeEach(() => { - TestBed.configureTestingModule({}); - service = TestBed.inject(BackendService); - }); - - it('should be created', () => { - expect(service).toBeTruthy(); - }); -}); diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/backend.service.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/backend.service.ts deleted file mode 100644 index d204e13e..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/backend.service.ts +++ /dev/null @@ -1,79 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { Injectable } from '@angular/core'; -import { HttpClient } from '@angular/common/http'; -import { Observable } from 'rxjs'; -import { map } from 'rxjs/operators'; -import { Router } from '@angular/router'; - -@Injectable({ - providedIn: 'root' -}) -export class BackendService { - - public results:any = []; - public topicContetnt:any; - public selectedTopic:any; - - constructor(private http: HttpClient, private router: Router) { } - - private request(method: 'post'|'get'|'patch'|'delete', type: any,data:any,param:any) { - let base; - if (method === 'post') - { - base = this.http.post(`http://127.0.0.1:9081/api/${type}`,data); - } - else if (method === 'patch') - { - base = this.http.patch(`/api/${type}/`+param,data); - } - else if (method === 'delete') { - base = this.http.delete(`/api/${type}`); - } - else - { - - base = this.http.get(`/api/${type}`); - - } - const request = base.pipe(map((data) => {return data})); - console.log("request"+request); - return request; - } - - - public getTopics(){ - return this.request('get', 'getTopics',null,null); - } - - public preprocess(data:any){ - return this.request('post', 'preprocess',data,null); - } - - public fetchPredictionResults(data:any){ - return this.request('post', 'fetchPredictionResults',data,null); - } - public BuildModel(data:any){ - return this.request('post', 'BuildModel',data,null); - } - - public postProcess(data:any){ - return this.request('post', 'postProcess',data,null); - } - - public dummyAPI(data:any){ - return this.request('post', 'dummyAPI',data,null); - } - - public fetchServerDetails(type:any){ - return this.request('get', 'serverDetails?type='+type,null,null); - } -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.css b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.css deleted file mode 100644 index 6ef992e0..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.css +++ /dev/null @@ -1,7 +0,0 @@ -::ng-deep .mat-chip-list-wrapper{ - width: 100%; -} - -.highlight { - background-color: yellow; -} \ No newline at end of file diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.html b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.html deleted file mode 100644 index f738c297..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.html +++ /dev/null @@ -1,39 +0,0 @@ - -

{{auth.selectedTopic[0]}}

-
-
- {{content}} -
-
-
- - - - - Runtime Option - - CPU - DSP - - -
- - - {{question}} - - -
-
- - -
- -
-
-
-
- -
-

Q&A Predicted Results

- -
diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.spec.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.spec.ts deleted file mode 100644 index 57633839..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.spec.ts +++ /dev/null @@ -1,35 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { ComponentFixture, TestBed } from '@angular/core/testing'; - -import { PredictionComponent } from './prediction.component'; - -describe('PredictionComponent', () => { - let component: PredictionComponent; - let fixture: ComponentFixture; - - beforeEach(async () => { - await TestBed.configureTestingModule({ - declarations: [ PredictionComponent ] - }) - .compileComponents(); - }); - - beforeEach(() => { - fixture = TestBed.createComponent(PredictionComponent); - component = fixture.componentInstance; - fixture.detectChanges(); - }); - - it('should create', () => { - expect(component).toBeTruthy(); - }); -}); diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.ts deleted file mode 100644 index 49f30a24..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/prediction/prediction.component.ts +++ /dev/null @@ -1,135 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { Component, OnInit, ViewChild } from '@angular/core'; -import { ResultsComponent } from '../results/results.component'; -import { FormBuilder, FormControl, FormGroup, FormArray, Validators,AbstractControl} from '@angular/forms'; -import { BackendService } from '../backend.service'; -import { MatTableDataSource } from "@angular/material/table"; -import { Router } from '@angular/router'; -import {LayoutModule} from '@angular/cdk/layout'; - - -@Component({ - providers:[ResultsComponent], - selector: 'app-prediction', - templateUrl: './prediction.component.html', - styleUrls: ['./prediction.component.css'] -}) -export class PredictionComponent implements OnInit { - - public PredictionFormGroup:FormGroup; - public showProgress:boolean = false; - public inferOutput:any; - public fetchedResults :any = []; - public content:any; - public questionList:any = []; - results = new MatTableDataSource(this.fetchedResults); - - constructor(private fb: FormBuilder,public auth: BackendService,public resultInfo:ResultsComponent,private router: Router) { } - - ngOnInit(): void { - this.PredictionFormGroup = this.PredictionFormGroupFn(); - let topicIndex = this.auth.topicContetnt.map( (x:any) => { return x.topic; }).indexOf(this.auth.selectedTopic[0]); - this.content = this.auth.topicContetnt[topicIndex].content; - this.questionList = this.auth.topicContetnt[topicIndex].sampleQuestions; - this.auth.results = []; - } - - PredictionFormGroupFn(){ - return this.fb.group({ - question: ['',Validators.required], - //htp:['Cloud AI 100'] - htp:['DSP'] - }); - } - - Back(){ - this.showProgress = false; - this.resetContent() - this.router.navigateByUrl('/topics'); - } - - async Start(){ - this.resetContent() - this.showProgress = true; - let inputInfo = {question:this.PredictionFormGroup.get('question')!.value,paragraph:this.content,runtime:this.PredictionFormGroup.get('htp')!.value} - - - try{ - console.log(inputInfo) - let ipInfo:any = await this.auth.fetchServerDetails('ip').toPromise(); - let portInfo:any = await this.auth.fetchServerDetails('port').toPromise(); - let ip = ipInfo.info; - let port = portInfo.info; - let method = 'POST'; - let api = '/predict' - let protocol = 'http://' - let urlDetails ={ - url: protocol+ip+':'+port+api, - method:method - } - let data = { - urlDetails:urlDetails, - input:inputInfo - } - console.log(urlDetails) - this.inferOutput = await this.auth.fetchPredictionResults(data).toPromise(); - console.log("******************* inferOutput ********************") - console.log(this.inferOutput) - - console.log("Before Fetched Results",this.fetchedResults) - this.fetchedResults.push(this.inferOutput) - console.log("After Fetched Results") - console.log(this.fetchedResults) - this.auth.results = this.fetchedResults; - this.fetchedResults = this.fetchedResults.sort(function compare(a:any, b:any) { - var dateA:any = new Date(a.time); - var dateB:any = new Date(b.time); - return dateB - dateA; - }); - console.log("After Sorting",this.fetchedResults) - this.results = new MatTableDataSource(this.fetchedResults); - - console.log("Results",this.results) - - console.log("this.auth") - this.highlight(this.inferOutput.answer) - this.showProgress = false; - this.resultInfo.results=this.auth.results; - - console.log("ResultComponent",this.resultInfo.results); - } - catch (err:any){ - console.log(err) - alert(err.message) - this.showProgress = false; - } - } - - updateQuestion(question:any){ - this.PredictionFormGroup.patchValue({ question: question }); - } - - highlight(text:any) { - var inputText = document.getElementById("style-4"); - var innerHTML = inputText!.innerHTML.toLowerCase(); - var index = innerHTML.indexOf(text); - if (index >= 0) { - innerHTML = innerHTML.substring(0,index) + "" + innerHTML.substring(index,index+text.length) + "" + innerHTML.substring(index + text.length); - inputText!.innerHTML = innerHTML; - } - } - - resetContent(){ - var inputText = document.getElementById("style-4"); - inputText!.innerHTML = this.content; - } -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.css b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.css deleted file mode 100644 index b7d8fd16..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.css +++ /dev/null @@ -1,64 +0,0 @@ -.time{ - color: #747474; - font-weight: 500; - display: inline-block; - margin-bottom: 0px; -} - - -.executionTime{ - color: #bd3232; - font-weight: 500; - display: inline-block; - float: right; - margin-bottom: 0px; -} - - -.input{ - font-weight: 500; - color: black; - font-style: italic; - font-size: 18px; - margin-bottom: 0px; -} - -.example-h2 { - margin: 10px; -} - -.example-section { - display: flex; - align-content: center; - align-items: center; - height: 40px; - width: 50%; -} - -.mat-progress-bar { - height: 15px; -} - -table { - width: 100%; -} - -.green{ - background-color: #a3e7a3; -} - -.red{ - background-color: #ff9a9a; -} - -.center { - margin: 0; - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); -} - -td{ - text-align: center; -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.html b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.html deleted file mode 100644 index 8c2969aa..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.html +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - -
Date {{element.time | date}} Question {{element.question}} Answer {{element.answer}} Execution Time {{element.executionTime}}
-
\ No newline at end of file diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.spec.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.spec.ts deleted file mode 100644 index d1444ad9..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.spec.ts +++ /dev/null @@ -1,35 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { ComponentFixture, TestBed } from '@angular/core/testing'; - -import { ResultsComponent } from './results.component'; - -describe('ResultsComponent', () => { - let component: ResultsComponent; - let fixture: ComponentFixture; - - beforeEach(async () => { - await TestBed.configureTestingModule({ - declarations: [ ResultsComponent ] - }) - .compileComponents(); - }); - - beforeEach(() => { - fixture = TestBed.createComponent(ResultsComponent); - component = fixture.componentInstance; - fixture.detectChanges(); - }); - - it('should create', () => { - expect(component).toBeTruthy(); - }); -}); diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.ts deleted file mode 100644 index e42c7c45..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/results/results.component.ts +++ /dev/null @@ -1,38 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { Component, OnInit,Input } from '@angular/core'; -import { BackendService } from '../backend.service'; -import { MatTableDataSource } from "@angular/material/table"; - -@Component({ - selector: 'app-results', - templateUrl: './results.component.html', - styleUrls: ['./results.component.css'] -}) - -export class ResultsComponent implements OnInit { - @Input() results: any; - - displayedColumns: string[] = ['Date', 'Question', 'Answer','Execution Time (ms)']; - - dataSource:MatTableDataSource; - - constructor(public auth: BackendService) { - this.dataSource=new MatTableDataSource(this.results); - console.log("datasource",this.dataSource) - } - - ngOnInit(): void { - - console.log("Inside Results Components",this.auth.results,this.dataSource) - } - -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.css b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.css deleted file mode 100644 index 11e1ffa8..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.css +++ /dev/null @@ -1,7 +0,0 @@ -.center { - margin: 0; - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); -} \ No newline at end of file diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.html b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.html deleted file mode 100644 index b3521dda..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.html +++ /dev/null @@ -1,23 +0,0 @@ -
-

Please choose an article from the list

- - - - {{topic}} - - - - -
-
- - Model Option - - mobile_bert - electrabase - distilbert - bert_base - - -
-
diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.spec.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.spec.ts deleted file mode 100644 index f4555ed5..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.spec.ts +++ /dev/null @@ -1,35 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { ComponentFixture, TestBed } from '@angular/core/testing'; - -import { TopicsComponent } from './topics.component'; - -describe('TopicsComponent', () => { - let component: TopicsComponent; - let fixture: ComponentFixture; - - beforeEach(async () => { - await TestBed.configureTestingModule({ - declarations: [ TopicsComponent ] - }) - .compileComponents(); - }); - - beforeEach(() => { - fixture = TestBed.createComponent(TopicsComponent); - component = fixture.componentInstance; - fixture.detectChanges(); - }); - - it('should create', () => { - expect(component).toBeTruthy(); - }); -}); diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.ts deleted file mode 100644 index bdafbf6e..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/app/topics/topics.component.ts +++ /dev/null @@ -1,85 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { Component, OnInit } from '@angular/core'; -import { BackendService } from '../backend.service'; -import { Router } from '@angular/router'; -import { FormBuilder, FormControl, FormGroup, FormArray, Validators,AbstractControl} from '@angular/forms'; - -@Component({ - selector: 'app-topics', - templateUrl: './topics.component.html', - styleUrls: ['./topics.component.css'] -}) -export class TopicsComponent implements OnInit { - [x: string]: any; - - public topics: any[]; - public ModelFormGroup:FormGroup; - - constructor(public auth: BackendService,private router: Router,private fb: FormBuilder) {} - - ngOnInit(): void { - this.fetchTopics(); - this.ModelFormGroup = this.ModelFormGroupFn(); - } - ModelFormGroupFn(){ - return this.fb.group({ - //htp: ['',Validators.required], - htp:['mobile_bert'] - }); - } - - fetchTopics(){ - this.auth.getTopics().subscribe((response:any) => { - this.auth.topicContetnt = response.topics; - this.topics = [...new Set(this.auth.topicContetnt.map((element :any) => element.topic))]; - },(err) => { - console.log(err); - }); - - - } - async Start(){ - - } - - Next(){ - let inputInfo = {model:this.ModelFormGroup.get('htp')!.value} - try{ - console.log(inputInfo) - let ipInfo:any = this.auth.fetchServerDetails('ip').toPromise(); - let portInfo:any = this.auth.fetchServerDetails('port').toPromise(); - let ip = ipInfo.info; - let port = portInfo.info; - let method = 'POST'; - let api = '/predict' - let protocol = 'http://' - let urlDetails ={ - url: protocol+ip+':'+port+api, - method:method - } - let data = { - urlDetails:urlDetails, - input:inputInfo - } - console.log(urlDetails) - this.auth.BuildModel(data).toPromise(); - - } - catch (err:any){ - console.log(err) - alert(err.message) - - } - this.router.navigateByUrl('/prediction'); - } - -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/assets/QA_List.json b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/assets/QA_List.json deleted file mode 100644 index 41942b15..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/assets/QA_List.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "topics": [ - { - "topic":"Qualcomm_Wiki", - "content":"Qualcomm is an American multinational corporation headquartered in San Diego, California, and incorporated in Delaware.It creates semiconductors, software, and services related to wireless technology. It owns patents critical to the 5G,4G,CDMA2000, TD-SCDMA and WCDMA mobile communications standards.Qualcomm was established in 1985 by Irwin M. Jacobs and six other co-founders. Its early research into CDMA wireless cell phone technology was funded by selling a two-way mobile digital satellite communications system known as Omnitracs. After a heated debate in the wireless industry, the 2G standard was adopted with Qualcomm's CDMA patents incorporated.Afterwards there was a series of legal disputes about pricing for licensing patents required by the standard.Over the years, Qualcomm has expanded into selling semiconductor products in a predominantly fabless manufacturing model. It also developed semiconductor components or software for vehicles, watches, laptops, wi-fi, smartphones, and other devices.", - "sampleQuestions":["When was Qualcomm founded ?","What is Omnitracs ?","Where is Qualcomm headquartered ?","What does Qualcomm create ?","Who is the founder of Qualcomm"] - }, - { - "topic":"Super Bowl 50", - "content":"Super Bowl 50 was an American football game to determine the champion of the National Football League (NFL) for the 2015 season. The American Football Conference (AFC) champion Denver Broncos defeated the National Football Conference (NFC) champion Carolina Panthers. The game was played on February 7, 2016, at Levi's Stadium in Santa Clara, California, in the San Francisco Bay Area. As this was the 50th Super Bowl game, the league emphasized the golden anniversary with various gold-themed initiatives during the 2015 season, as well as suspending the tradition of naming each Super Bowl game with Roman numerals (under which the game would have been known as Super Bowl), so the logo could prominently feature the number 50 in more familiar Arabic numerals", - "sampleQuestions":["What is Super Bowl 50 ?","When was Super Bowl 50 played ?","Where was the Super Bowl 50 played ?","Who on Super Bowl 50 ?"] - }, - { - "topic":"Warsaw", - "content":"Warsaw, officially the Capital City of Warsaw,is the capital and largest city of Poland. The metropolis stands on the River Vistula in east-central Poland and its population is officially estimated at 1.8 million residents within a greater metropolitan area of 3.1 million residents,which makes Warsaw the 7th most-populous capital city in the European Union. The city area measures 517 km2 (200 sq mi) and comprises 18 boroughs, while the metropolitan area covers 6,100 km2 (2,355 sq mi).Warsaw is an alpha- global city,a major cultural, political and economic hub, and the country's seat of government. Its historical Old Town was designated a UNESCO World Heritage Site.", - "sampleQuestions":["Which is the capital of Poland ?","What is the population of Warsaw city ?","On which river bank does the city stand ?","How big is Warsaw city ?"] - }, - { - "topic":"Normans", - "content":"Norman, member of those Vikings, or Norsemen, who settled in northern France (or the Frankish kingdom), together with their descendants. The Normans founded the duchy of Normandy and sent out expeditions of conquest and colonization to southern Italy and Sicily and to England, Wales, Scotland, and Ireland.", - "sampleQuestions":["Who are Normans ?","Who found the Duchy of Normandy ?","Where did Normans send out thr expiditions of conquest ?"] - }, - { - "topic":"Nikola Tesla", - "content":"Nikola Tesla was a Serbian-American inventor, electrical engineer, mechanical engineer, and futurist best known for his contributions to the design of the modern alternating current (AC) electricity supply system.Born and raised in the Austrian Empire, Tesla studied engineering and physics in the 1870s without receiving a degree, gaining practical experience in the early 1880s working in telephony and at Continental Edison in the new electric power industry. In 1884 he emigrated to the United States, where he became a naturalized citizen. He worked for a short time at the Edison Machine Works in New York City before he struck out on his own. With the help of partners to finance and market his ideas, Tesla set up laboratories and companies in New York to develop a range of electrical and mechanical devices. His alternating current (AC) induction motor and related polyphase AC patents, licensed by Westinghouse Electric in 1888, earned him a considerable amount of money and became the cornerstone of the polyphase system which that company eventually marketed.", - "sampleQuestions":["Who is Nikola Tesla ?","Where was Nikola Tesla born ?","When did Nikola Tesla migrate to The United States ?","Why did Nikola Tesla set up laboratories ?"] - }, - { - "topic":"Computational Complexity Theory", - "content":"Computational complexity theory focuses on classifying computational problems according to their resource usage, and relating these classes to each other. A computational problem is a task solved by a computer. A computation problem is solvable by mechanical application of mathematical steps, such as an algorithm.A problem is regarded as inherently difficult if its solution requires significant resources, whatever the algorithm used. The theory formalizes this intuition, by introducing mathematical models of computation to study these problems and quantifying their computational complexity, i.e., the amount of resources needed to solve them, such as time and storage. Other measures of complexity are also used, such as the amount of communication (used in communication complexity), the number of gates in a circuit (used in circuit complexity) and the number of processors (used in parallel computing). One of the roles of computational complexity theory is to determine the practical limits on what computers can and cannot do. The P versus NP problem, one of the seven Millennium Prize Problems, is dedicated to the field of computational complexity.", - "sampleQuestions":["What is Computational complexity theory ?","What is the need of Computational complexity theory ?","What is a computational problem ?","Which problem is dedicated to the field of computational complexity ?"] - }, - { - "topic":"Teacher", - "content":"A teacher, also called a schoolteacher or formally an educator, is a person who helps students to acquire knowledge, competence or virtue.Informally the role of teacher may be taken on by anyone (e.g. when showing a colleague how to perform a specific task). In some countries, teaching young people of school age may be carried out in an informal setting, such as within the family (homeschooling), rather than in a formal setting such as a school or college. Some other professions may involve a significant amount of teaching (e.g. youth worker, pastor).In most countries, formal teaching of students is usually carried out by paid professional teachers.", - "sampleQuestions":["Who is a teacher ?","What is a teacher formally called ?","Which professions invlove significnt amount of teaching ?"] - }, - { - "topic":"India", - "content":"India, officially the Republic of India ,is a country in South Asia. It is the seventh-largest country by area, the second-most populous country, and the most populous democracy in the world. Bounded by the Indian Ocean on the south, the Arabian Sea on the southwest, and the Bay of Bengal on the southeast, it shares land borders with Pakistan to the west;[f] China, Nepal, and Bhutan to the north; and Bangladesh and Myanmar to the east. In the Indian Ocean, India is in the vicinity of Sri Lanka and the Maldives; its Andaman and Nicobar Islands share a maritime border with Thailand, Myanmar and Indonesia.", - "sampleQuestions":["Which is the 7th largest country in the world ?","Which is the second moast populous country in the world ?","Which are the countires which shares the land border with India ?","Which are the 3 water bodies surounding India ?"] - }, - { - "topic":"Martin Luther", - "content":"Martin Luther was a German priest, theologian, author and hymnwriter. A former Augustinian friar,[3] he is best known as the seminal figure in the Protestant Reformation and as the namesake of Lutheranism.Luther was ordained to the priesthood in 1507. He came to reject several teachings and practices of the Roman Catholic Church; in particular, he disputed the view on indulgences. Luther proposed an academic discussion of the practice and efficacy of indulgences in his Ninety-five Theses of 1517. His refusal to renounce all of his writings at the demand of Pope Leo X in 1520 and the Holy Roman Emperor Charles V at the Diet of Worms in 1521 resulted in his excommunication by the pope and condemnation as an outlaw by the Holy Roman Emperor.", - "sampleQuestions":["Who is Martin Luther ?","When was Martin Luther born ?","When did Martin Luther die ?","When was Martin Luther ordained to the priesthood ?","What is Martin Luther known for ?"] - }, - { - "topic":"Mahatma Gandhi", - "content":"Mohandas Karamchand Gandhi, was an Indian lawyer,anti-colonial nationalist[5] and political ethicist who employed nonviolent resistance to lead the successful campaign for India's independence from British rule, and to later inspire movements for civil rights and freedom across the world. The honorific Mahātmā , first applied to him in 1914 in South Africa, is now used throughout the world.Born and raised in a Hindu family in coastal Gujarat, Gandhi trained in the law at the Inner Temple, London, and was called to the bar at age 22 in June 1891. After two uncertain years in India, where he was unable to start a successful law practice, he moved to South Africa in 1893 to represent an Indian merchant in a lawsuit. He went on to live in South Africa for 21 years. It was here that Gandhi raised a family and first employed nonviolent resistance in a campaign for civil rights. In 1915, aged 45, he returned to India and soon set about organising peasants, farmers, and urban labourers to protest against excessive land-tax and discrimination.", - "sampleQuestions":["Who is Mahatma Gandhi ?","When was Mahatma Gandhi born ?","When did Mahatma Gandhi die ?","What is the birth palce of Mahatma Gandhi ?","Where did Mahatma Gandhi train Law ?","When did Mahatma Gandhi move to South Arfica ?","When did Mahatma Gandhi return to India ?"] - } - ] -} \ No newline at end of file diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/assets/logo.png b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/assets/logo.png deleted file mode 100644 index 246afa0b..00000000 Binary files a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/assets/logo.png and /dev/null differ diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/environments/environment.prod.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/environments/environment.prod.ts deleted file mode 100644 index d524d510..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/environments/environment.prod.ts +++ /dev/null @@ -1,13 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -export const environment = { - production: true -}; diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/environments/environment.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/environments/environment.ts deleted file mode 100644 index 0790c962..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/environments/environment.ts +++ /dev/null @@ -1,26 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -// This file can be replaced during build by using the `fileReplacements` array. -// `ng build` replaces `environment.ts` with `environment.prod.ts`. -// The list of file replacements can be found in `angular.json`. - -export const environment = { - production: false -}; - -/* - * For easier debugging in development mode, you can import the following file - * to ignore zone related error stack frames such as `zone.run`, `zoneDelegate.invokeTask`. - * - * This import should be commented out in production mode because it will have a negative impact - * on performance if an error is thrown. - */ -// import 'zone.js/plugins/zone-error'; // Included with Angular CLI. diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/favicon.ico b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/favicon.ico deleted file mode 100644 index 997406ad..00000000 Binary files a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/favicon.ico and /dev/null differ diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/index.html b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/index.html deleted file mode 100644 index d6dede4a..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/index.html +++ /dev/null @@ -1,17 +0,0 @@ - - - - - QNN UI - - - - - - - - - - - - diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/main.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/main.ts deleted file mode 100644 index 9e9a896e..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/main.ts +++ /dev/null @@ -1,22 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -import { enableProdMode } from '@angular/core'; -import { platformBrowserDynamic } from '@angular/platform-browser-dynamic'; - -import { AppModule } from './app/app.module'; -import { environment } from './environments/environment'; - -if (environment.production) { - enableProdMode(); -} - -platformBrowserDynamic().bootstrapModule(AppModule) - .catch(err => console.error(err)); diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/polyfills.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/polyfills.ts deleted file mode 100644 index 0081e0da..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/polyfills.ts +++ /dev/null @@ -1,63 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -/** - * This file includes polyfills needed by Angular and is loaded before the app. - * You can add your own extra polyfills to this file. - * - * This file is divided into 2 sections: - * 1. Browser polyfills. These are applied before loading ZoneJS and are sorted by browsers. - * 2. Application imports. Files imported after ZoneJS that should be loaded before your main - * file. - * - * The current setup is for so-called "evergreen" browsers; the last versions of browsers that - * automatically update themselves. This includes recent versions of Safari, Chrome (including - * Opera), Edge on the desktop, and iOS and Chrome on mobile. - * - * Learn more in https://angular.io/guide/browser-support - */ - -/*************************************************************************************************** - * BROWSER POLYFILLS - */ - -/** - * By default, zone.js will patch all possible macroTask and DomEvents - * user can disable parts of macroTask/DomEvents patch by setting following flags - * because those flags need to be set before `zone.js` being loaded, and webpack - * will put import in the top of bundle, so user need to create a separate file - * in this directory (for example: zone-flags.ts), and put the following flags - * into that file, and then add the following code before importing zone.js. - * import './zone-flags'; - * - * The flags allowed in zone-flags.ts are listed here. - * - * The following flags will work for all browsers. - * - * (window as any).__Zone_disable_requestAnimationFrame = true; // disable patch requestAnimationFrame - * (window as any).__Zone_disable_on_property = true; // disable patch onProperty such as onclick - * (window as any).__zone_symbol__UNPATCHED_EVENTS = ['scroll', 'mousemove']; // disable patch specified eventNames - * - * in IE/Edge developer tools, the addEventListener will also be wrapped by zone.js - * with the following flag, it will bypass `zone.js` patch for IE/Edge - * - * (window as any).__Zone_enable_cross_context_check = true; - * - */ - -/*************************************************************************************************** - * Zone JS is required by default for Angular itself. - */ -import 'zone.js'; // Included with Angular CLI. - - -/*************************************************************************************************** - * APPLICATION IMPORTS - */ diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/styles.css b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/styles.css deleted file mode 100644 index ca908b71..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/styles.css +++ /dev/null @@ -1,72 +0,0 @@ -html, body { height: 100%; overflow: auto; } -body { margin: 0; font-family: Roboto, "Helvetica Neue", sans-serif; } - -#style-3::-webkit-scrollbar-track -{ - -webkit-box-shadow: inset 0 0 6px rgba(0,0,0,0.3); - background-color: #F5F5F5; -} - -#style-3::-webkit-scrollbar -{ - width: 6px; - background-color: #F5F5F5; -} - -#style-3::-webkit-scrollbar-thumb -{ - background-color: #C5C5C5; -} - -.scrollbar -{ - overflow: auto; -} - -#style-4::-webkit-scrollbar-track -{ - -webkit-box-shadow: inset 0 0 6px rgba(0,0,0,0.3); - background-color: #F5F5F5; -} - -#style-4::-webkit-scrollbar -{ - width: 10px; - background-color: #F5F5F5; -} - -#style-4::-webkit-scrollbar-thumb -{ - background-color: #bbb9b9; -} - -.snackbar-error{ - background-color: rgb(255, 45, 45); - color: white; -} - -.snackbar-success{ - background-color: rgb(0, 175, 15); - color: white; -} - -.mat-simple-snackbar-action button { - color: white!important; -} - -#style-44::-webkit-scrollbar-track -{ - -webkit-box-shadow: inset 0 0 6px rgba(0,0,0,0.3); - background-color: #F5F5F5; -} - -#style-44::-webkit-scrollbar -{ - width: 10px; - background-color: #F5F5F5; -} - -#style-44::-webkit-scrollbar-thumb -{ - background-color: #bbb9b9; -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/test.ts b/ai-solutions/windows/angular-app-nlp/Electron app UI/src/test.ts deleted file mode 100644 index ffabf805..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/src/test.ts +++ /dev/null @@ -1,36 +0,0 @@ -// -*- mode: ts -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= - -// This file is required by karma.conf.js and loads recursively all the .spec and framework files - -import 'zone.js/testing'; -import { getTestBed } from '@angular/core/testing'; -import { - BrowserDynamicTestingModule, - platformBrowserDynamicTesting -} from '@angular/platform-browser-dynamic/testing'; - -declare const require: { - context(path: string, deep?: boolean, filter?: RegExp): { - (id: string): T; - keys(): string[]; - }; -}; - -// First, initialize the Angular testing environment. -getTestBed().initTestEnvironment( - BrowserDynamicTestingModule, - platformBrowserDynamicTesting(), -); - -// Then we find all the tests. -const context = require.context('./', true, /\.spec\.ts$/); -// And load the modules. -context.keys().map(context); diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/tsconfig.app.json b/ai-solutions/windows/angular-app-nlp/Electron app UI/tsconfig.app.json deleted file mode 100644 index 1bb47992..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/tsconfig.app.json +++ /dev/null @@ -1,15 +0,0 @@ -/* To learn more about this file see: https://angular.io/config/tsconfig. */ -{ - "extends": "./tsconfig.json", - "compilerOptions": { - "outDir": "./out-tsc/app", - "types": [] - }, - "files": [ - "src/main.ts", - "src/polyfills.ts" - ], - "include": [ - "src/**/*.d.ts" - ] -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/tsconfig.json b/ai-solutions/windows/angular-app-nlp/Electron app UI/tsconfig.json deleted file mode 100644 index ee192750..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/tsconfig.json +++ /dev/null @@ -1,31 +0,0 @@ -/* To learn more about this file see: https://angular.io/config/tsconfig. */ -{ - "compileOnSave": false, - "compilerOptions": { - "baseUrl": "./", - "outDir": "./dist/out-tsc", - "forceConsistentCasingInFileNames": true, - "strict": true, - "noImplicitReturns": false, - "strictPropertyInitialization": false, - "noFallthroughCasesInSwitch": true, - "sourceMap": true, - "declaration": false, - "downlevelIteration": true, - "experimentalDecorators": true, - "moduleResolution": "node", - "importHelpers": true, - "target": "es2017", - "module": "es2020", - "lib": [ - "es2020", - "dom" - ] - }, - "angularCompilerOptions": { - "enableI18nLegacyMessageIdFormat": false, - "strictInjectionParameters": true, - "strictInputAccessModifiers": true, - "strictTemplates": true - } -} diff --git a/ai-solutions/windows/angular-app-nlp/Electron app UI/tsconfig.spec.json b/ai-solutions/windows/angular-app-nlp/Electron app UI/tsconfig.spec.json deleted file mode 100644 index 46d75eb0..00000000 --- a/ai-solutions/windows/angular-app-nlp/Electron app UI/tsconfig.spec.json +++ /dev/null @@ -1,18 +0,0 @@ -/* To learn more about this file see: https://angular.io/config/tsconfig. */ -{ - "extends": "./tsconfig.json", - "compilerOptions": { - "outDir": "./out-tsc/spec", - "types": [ - "jasmine" - ] - }, - "files": [ - "src/test.ts", - "src/polyfills.ts" - ], - "include": [ - "src/**/*.spec.ts", - "src/**/*.d.ts" - ] -} diff --git a/ai-solutions/windows/angular-app-nlp/Generate_DLC.ipynb b/ai-solutions/windows/angular-app-nlp/Generate_DLC.ipynb deleted file mode 100644 index 9e95cf81..00000000 --- a/ai-solutions/windows/angular-app-nlp/Generate_DLC.ipynb +++ /dev/null @@ -1,1376 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "416d10e9-51d0-45da-b016-970e1db53d26", - "metadata": {}, - "source": [ - "# Preparaing the dataset\n", - "- [ https://rajpurkar.github.io/SQuAD-explorer/ ] (Dataset link)\n", - "- Download the dataset from the above link" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "807742c0-4c3b-4a8c-af79-04fb2f5fdf7c", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "--2023-10-03 10:45:46-- https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json\n", - "Resolving rajpurkar.github.io (rajpurkar.github.io)... 185.199.111.153, 185.199.108.153, 185.199.109.153, ...\n", - "Connecting to rajpurkar.github.io (rajpurkar.github.io)|185.199.111.153|:443... connected.\n", - "HTTP request sent, awaiting response... 200 OK\n", - "Length: 4370528 (4.2M) [application/json]\n", - "Saving to: ‘dev-v2.0.json’\n", - "\n", - " 0K .......... .......... .......... .......... .......... 1% 879K 5s\n", - " 50K .......... .......... .......... .......... .......... 2% 1.07M 4s\n", - " 100K .......... .......... .......... .......... .......... 3% 4.82M 3s\n", - " 150K .......... .......... .......... .......... .......... 4% 1.64M 3s\n", - " 200K .......... .......... .......... .......... .......... 5% 5.56M 2s\n", - " 250K .......... .......... .......... .......... .......... 7% 7.47M 2s\n", - " 300K .......... .......... .......... .......... .......... 8% 8.04M 2s\n", - " 350K .......... .......... .......... .......... .......... 9% 9.28M 2s\n", - " 400K .......... .......... .......... .......... .......... 10% 2.22M 2s\n", - " 450K .......... .......... .......... .......... .......... 11% 10.6M 1s\n", - " 500K .......... .......... .......... .......... .......... 12% 12.8M 1s\n", - " 550K .......... .......... .......... .......... .......... 14% 15.8M 1s\n", - " 600K .......... .......... .......... .......... .......... 15% 12.5M 1s\n", - " 650K .......... .......... .......... .......... .......... 16% 11.4M 1s\n", - " 700K .......... .......... .......... .......... .......... 17% 22.4M 1s\n", - " 750K .......... .......... .......... .......... .......... 18% 15.9M 1s\n", - " 800K .......... .......... .......... .......... .......... 19% 19.6M 1s\n", - " 850K .......... .......... .......... .......... .......... 21% 2.66M 1s\n", - " 900K .......... .......... .......... .......... .......... 22% 16.9M 1s\n", - " 950K .......... .......... .......... .......... .......... 23% 19.1M 1s\n", - " 1000K .......... .......... .......... .......... .......... 24% 23.0M 1s\n", - " 1050K .......... .......... .......... .......... .......... 25% 46.0M 1s\n", - " 1100K .......... .......... .......... .......... .......... 26% 25.4M 1s\n", - " 1150K .......... .......... .......... .......... .......... 28% 20.3M 1s\n", - " 1200K .......... .......... .......... .......... .......... 29% 21.9M 1s\n", - " 1250K .......... .......... .......... .......... .......... 30% 40.2M 1s\n", - " 1300K .......... .......... .......... .......... .......... 31% 23.9M 1s\n", - " 1350K .......... .......... .......... .......... .......... 32% 43.6M 1s\n", - " 1400K .......... .......... .......... .......... .......... 33% 32.1M 1s\n", - " 1450K .......... .......... .......... .......... .......... 35% 28.0M 0s\n", - " 1500K .......... .......... .......... .......... .......... 36% 46.8M 0s\n", - " 1550K .......... .......... .......... .......... .......... 37% 20.3M 0s\n", - " 1600K .......... .......... .......... .......... .......... 38% 52.7M 0s\n", - " 1650K .......... .......... .......... .......... .......... 39% 41.2M 0s\n", - " 1700K .......... .......... .......... .......... .......... 41% 30.8M 0s\n", - " 1750K .......... .......... .......... .......... .......... 42% 2.81M 0s\n", - " 1800K .......... .......... .......... .......... .......... 43% 92.1M 0s\n", - " 1850K .......... .......... .......... .......... .......... 44% 45.8M 0s\n", - " 1900K .......... .......... .......... .......... .......... 45% 30.9M 0s\n", - " 1950K .......... .......... .......... .......... .......... 46% 42.5M 0s\n", - " 2000K .......... .......... .......... .......... .......... 48% 23.5M 0s\n", - " 2050K .......... .......... .......... .......... .......... 49% 100M 0s\n", - " 2100K .......... .......... .......... .......... .......... 50% 48.9M 0s\n", - " 2150K .......... .......... .......... .......... .......... 51% 41.3M 0s\n", - " 2200K .......... .......... .......... .......... .......... 52% 51.3M 0s\n", - " 2250K .......... .......... .......... .......... .......... 53% 40.3M 0s\n", - " 2300K .......... .......... .......... .......... .......... 55% 54.1M 0s\n", - " 2350K .......... .......... .......... .......... .......... 56% 60.7M 0s\n", - " 2400K .......... .......... .......... .......... .......... 57% 43.9M 0s\n", - " 2450K .......... .......... .......... .......... .......... 58% 51.5M 0s\n", - " 2500K .......... .......... .......... .......... .......... 59% 48.7M 0s\n", - " 2550K .......... .......... .......... .......... .......... 60% 68.6M 0s\n", - " 2600K .......... .......... .......... .......... .......... 62% 65.9M 0s\n", - " 2650K .......... .......... .......... .......... .......... 63% 50.6M 0s\n", - " 2700K .......... .......... .......... .......... .......... 64% 49.7M 0s\n", - " 2750K .......... .......... .......... .......... .......... 65% 49.8M 0s\n", - " 2800K .......... .......... .......... .......... .......... 66% 111M 0s\n", - " 2850K .......... .......... .......... .......... .......... 67% 45.0M 0s\n", - " 2900K .......... .......... .......... .......... .......... 69% 74.9M 0s\n", - " 2950K .......... .......... .......... .......... .......... 70% 49.2M 0s\n", - " 3000K .......... .......... .......... .......... .......... 71% 57.3M 0s\n", - " 3050K .......... .......... .......... .......... .......... 72% 108M 0s\n", - " 3100K .......... .......... .......... .......... .......... 73% 70.3M 0s\n", - " 3150K .......... .......... .......... .......... .......... 74% 57.5M 0s\n", - " 3200K .......... .......... .......... .......... .......... 76% 78.2M 0s\n", - " 3250K .......... .......... .......... .......... .......... 77% 53.0M 0s\n", - " 3300K .......... .......... .......... .......... .......... 78% 77.7M 0s\n", - " 3350K .......... .......... .......... .......... .......... 79% 87.4M 0s\n", - " 3400K .......... .......... .......... .......... .......... 80% 54.8M 0s\n", - " 3450K .......... .......... .......... .......... .......... 82% 77.5M 0s\n", - " 3500K .......... .......... .......... .......... .......... 83% 84.2M 0s\n", - " 3550K .......... .......... .......... .......... .......... 84% 79.8M 0s\n", - " 3600K .......... .......... .......... .......... .......... 85% 3.06M 0s\n", - " 3650K .......... .......... .......... .......... .......... 86% 159M 0s\n", - " 3700K .......... .......... .......... .......... .......... 87% 62.4M 0s\n", - " 3750K .......... .......... .......... .......... .......... 89% 88.1M 0s\n", - " 3800K .......... .......... .......... .......... .......... 90% 89.0M 0s\n", - " 3850K .......... .......... .......... .......... .......... 91% 52.6M 0s\n", - " 3900K .......... .......... .......... .......... .......... 92% 30.1M 0s\n", - " 3950K .......... .......... .......... .......... .......... 93% 110M 0s\n", - " 4000K .......... .......... .......... .......... .......... 94% 84.8M 0s\n", - " 4050K .......... .......... .......... .......... .......... 96% 102M 0s\n", - " 4100K .......... .......... .......... .......... .......... 97% 168M 0s\n", - " 4150K .......... .......... .......... .......... .......... 98% 98.2M 0s\n", - " 4200K .......... .......... .......... .......... .......... 99% 138M 0s\n", - " 4250K .......... ........ 100% 59.3M=0.3s\n", - "\n", - "2023-10-03 10:45:47 (12.3 MB/s) - ‘dev-v2.0.json’ saved [4370528/4370528]\n", - "\n" - ] - } - ], - "source": [ - "%%bash\n", - "\n", - "wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "d8cbcc78-e8ba-43c2-b351-f86695041835", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
contextquestionanswers
0The Normans (Norman: Nourmands; French: Norman...In what country is Normandy located?France
1The Normans (Norman: Nourmands; French: Norman...When were the Normans in Normandy?10th and 11th centuries
2The Normans (Norman: Nourmands; French: Norman...From which countries did the Norse originate?Denmark, Iceland and Norway
\n", - "
" - ], - "text/plain": [ - " context \\\n", - "0 The Normans (Norman: Nourmands; French: Norman... \n", - "1 The Normans (Norman: Nourmands; French: Norman... \n", - "2 The Normans (Norman: Nourmands; French: Norman... \n", - "\n", - " question answers \n", - "0 In what country is Normandy located? France \n", - "1 When were the Normans in Normandy? 10th and 11th centuries \n", - "2 From which countries did the Norse originate? Denmark, Iceland and Norway " - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import json\n", - "import pandas as pd\n", - "\n", - "data_path=\"dev-v2.0.json\"\n", - "\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "\n", - "context_qa_triples=[]\n", - "\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "markdown", - "id": "e4a9faed-f7ac-4f45-a557-d5adc8762dca", - "metadata": {}, - "source": [ - "# Generating Albert Model\n", - "- [Albert Model](https://huggingface.co/docs/transformers/model_doc/albert) You can Learn More about this model from this link\n", - "- You can also check different version of Albert for different usecases from here." - ] - }, - { - "cell_type": "markdown", - "id": "fb83206f-1109-4b87-aea2-11bfede84967", - "metadata": {}, - "source": [ - "### Converting the Model to ONNX format using optimum\n", - "- [ https://github.com/huggingface/optimum ] (Link for optimum)\n", - "- Using optimum we can directly convert any pytorch or tensorflow model to onnx format.\n", - "- Then from this onnx file we can convert to DLC format using SNPE" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "83500540-ebc7-45aa-a675-ea863f1db3b2", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 23:14:16.106219: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-09-30 23:14:16.180686: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-09-30 23:14:16.197835: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-09-30 23:14:16.507801: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 23:14:16.507840: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 23:14:16.507843: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Framework not specified. Using pt to export to ONNX.\n", - "Some weights of the model checkpoint at twmkn9/albert-base-v2-squad2 were not used when initializing AlbertForQuestionAnswering: ['albert.pooler.weight', 'albert.pooler.bias']\n", - "- This IS expected if you are initializing AlbertForQuestionAnswering from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", - "- This IS NOT expected if you are initializing AlbertForQuestionAnswering from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", - "Automatic task detection to question-answering.\n", - "Using framework PyTorch: 2.0.1+cu117\n", - "Post-processing the exported models...\n", - "Validating models in subprocesses...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "============= Diagnostic Run torch.onnx.export version 2.0.1+cu117 =============\n", - "verbose: False, log level: Level.ERROR\n", - "======================= 0 NONE 0 NOTE 0 WARNING 0 ERROR ========================\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 23:14:21.945196: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-09-30 23:14:22.021237: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-09-30 23:14:22.039349: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-09-30 23:14:22.350549: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 23:14:22.350586: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 23:14:22.350590: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Validating ONNX model alberta-onnx/model.onnx...\n", - "\t-[✓] ONNX model output names match reference model (start_logits, end_logits)\n", - "\t- Validating ONNX Model output \"start_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "\t- Validating ONNX Model output \"end_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "The ONNX export succeeded and the exported model was saved at: alberta-onnx\n" - ] - } - ], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model twmkn9/albert-base-v2-squad2 alberta-onnx/" - ] - }, - { - "cell_type": "markdown", - "id": "0f23e961-0113-4ee6-87d9-6391f9105fe2", - "metadata": {}, - "source": [ - "### DLC Conversion with fixed size\n", - "- Now as we get the ONNX Model we'll now convert this to DLC Format" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "5f98873c-afd1-4ba0-b3c2-1f28a5e0b574", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n", - "\u001b[1;31mWARNING: The argument `input_shapes` is deprecated. Please use \u001b[0m\n", - "\u001b[1;31m`overwrite_input_shapes` and/or `test_input_shapes` instead. An error will be \u001b[0m\n", - "\u001b[1;31mraised in the future.\u001b[0m\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 23:14:31,801 - 235 - INFO - Successfully simplified the onnx model in child process\n", - "2023-09-30 23:14:31,937 - 235 - INFO - Successfully receive the simplified onnx model in main process\n", - "2023-09-30 23:14:32,000 - 235 - INFO - Successfully run shape inference in child process\n", - "2023-09-30 23:14:32,152 - 235 - INFO - Successfully receive the inferred model in main process\n", - "2023-09-30 23:14:32,165 - 240 - WARNING - WARNING_CAST_TYPE: Only numerical type cast is supported. The op: /albert/Cast will be interpreted at conversion time\n", - "2023-09-30 23:14:33,860 - 235 - INFO - INFO_INITIALIZATION_SUCCESS: \n", - "2023-09-30 23:14:34,016 - 235 - INFO - INFO_CONVERSION_SUCCESS: Conversion completed successfully\n", - "2023-09-30 23:14:34,076 - 235 - INFO - INFO_WRITE_SUCCESS: \n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-onnx-to-dlc -i alberta-onnx/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o alberta.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "ca0c0356-9a15-4ef1-9f63-3f1baf41eed7", - "metadata": {}, - "source": [ - "### Creating FP16 Model\n", - "1. First of all we need to create the RAW File\n", - "2. Then we'll convert this FP32 DLC to FP16 DLC" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a0710645-b945-486f-9050-945e1058faaa", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "mkdir input_ids\n", - "mkdir attention_mask\n", - "mkdir token_type_ids" - ] - }, - { - "cell_type": "markdown", - "id": "9ebe8f2a-e140-4828-ba36-5171fbba9b6e", - "metadata": {}, - "source": [ - "#### Creating the RAW Files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7a27bfe7-d501-4b57-9b55-a3e3374e2dec", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, AlbertForQuestionAnswering\n", - "import torch\n", - "\n", - "# Getting the tokenizer to convert it to particular inputs that the model needed\n", - "tokenizer = AutoTokenizer.from_pretrained(\"twmkn9/albert-base-v2-squad2\")\n", - "\n", - "question_token={}\n", - "\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "31780ce6-882d-48dc-b589-e893e429d9fd", - "metadata": {}, - "source": [ - "#### Creating the List " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0ca58728-3401-4653-8542-b823f735264f", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "total_iter = 30\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw token_type_ids:=token_type_ids/token_type_id_{}.raw\\n\".format(i,i,i))\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "id": "13f4ca35-caf7-4ffa-a954-bc8405386276", - "metadata": {}, - "source": [ - "### Creating the FP16 Model\n", - "- This cached model is optimized for sm8550\n", - "- if you've different processor please change it accordingly" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "d8c82fc7-3b70-4898-be64-0ad9d0dd0312", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] InitializeStderr: DebugLog initialized.\n", - "[INFO] SNPE HTP Offline Prepare: Attempting to create cache for SM8550\n", - "[INFO] Attempting to open dynamically linked lib: libHtpPrepare.so\n", - "[INFO] dlopen libHtpPrepare.so SUCCESS handle 0x2e303b0\n", - "[INFO] Found Interface Provider (v2.8)\n", - "[USER_WARNING] QnnDsp Initializing HtpProvider\n", - "[USER_WARNING] QnnDsp Cost Based unsupported on soc SM8550\n", - "[USER_INFO] FP16 precision enabled for graph with id=0\n", - "[USER_INFO] Offline Prepare VTCM size(MB) selected = 8\n", - "[USER_INFO] Offline Prepare DLBC enablement passed = 0\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[USER_INFO] Cleaning up backend manager resources\n", - "[USER_INFO] Cleaning up Contexts\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] SNPE HTP Offline Prepare: Successfully created cache for SM8550\n", - "[INFO] SNPE HTP Offline Prepare: Saved cached DLC to alberta_float.dlc\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] DebugLog shutting down.\n" - ] - } - ], - "source": [ - "%%bash\n", - "\n", - "snpe-dlc-graph-prepare --input_dlc alberta.dlc --input_list tf_raw_list.txt --output_dlc alberta_float.dlc --set_output_tensors end_logits,start_logits --use_float_io --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "fd975495-2aef-4f3d-87be-9370bbf605da", - "metadata": {}, - "source": [ - "# Generating Mobilebert Model\n", - "- [Mobile bert ](https://huggingface.co/csarron/mobilebert-uncased-squad-v2/tree/main) You can Learn More about this model from this link\n", - "- To check more about different use cases of Mobilebert you can use this [link](https://huggingface.co/docs/transformers/model_doc/mobilebert)" - ] - }, - { - "cell_type": "markdown", - "id": "bccfc9e5-4134-4656-94fb-f3641848818f", - "metadata": {}, - "source": [ - "### Generating the ONNX Model" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "650cebf7-1697-4a9d-bb53-4903a7b304b4", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 21:59:36.311479: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-09-30 21:59:36.385244: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-09-30 21:59:36.402226: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-09-30 21:59:36.706875: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 21:59:36.706914: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 21:59:36.706918: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Framework not specified. Using pt to export to ONNX.\n", - "Automatic task detection to question-answering.\n", - "Using framework PyTorch: 2.0.1+cu117\n", - "/local/mnt/workspace/sahinhos/sahinenv/lib/python3.8/site-packages/transformers/models/mobilebert/modeling_mobilebert.py:549: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", - " torch.tensor(1000),\n", - "/local/mnt/workspace/sahinhos/sahinenv/lib/python3.8/site-packages/torch/onnx/_internal/jit_utils.py:306: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.)\n", - " _C._jit_pass_onnx_node_shape_type_inference(node, params_dict, opset_version)\n", - "/local/mnt/workspace/sahinhos/sahinenv/lib/python3.8/site-packages/torch/onnx/utils.py:689: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.)\n", - " _C._jit_pass_onnx_graph_shape_type_inference(\n", - "/local/mnt/workspace/sahinhos/sahinenv/lib/python3.8/site-packages/torch/onnx/utils.py:1186: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.)\n", - " _C._jit_pass_onnx_graph_shape_type_inference(\n", - "Post-processing the exported models...\n", - "Validating models in subprocesses...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "============= Diagnostic Run torch.onnx.export version 2.0.1+cu117 =============\n", - "verbose: False, log level: Level.ERROR\n", - "======================= 0 NONE 0 NOTE 0 WARNING 0 ERROR ========================\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 21:59:51.377048: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-09-30 21:59:51.452021: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-09-30 21:59:51.469676: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-09-30 21:59:51.779129: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 21:59:51.779165: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 21:59:51.779169: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Validating ONNX model mobilebert-onnx/model.onnx...\n", - "\t-[✓] ONNX model output names match reference model (end_logits, start_logits)\n", - "\t- Validating ONNX Model output \"start_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "\t- Validating ONNX Model output \"end_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "The ONNX export succeeded and the exported model was saved at: mobilebert-onnx\n" - ] - } - ], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model csarron/mobilebert-uncased-squad-v2 mobilebert-onnx/" - ] - }, - { - "cell_type": "markdown", - "id": "a4c6f6ef-1088-4ac2-b4fe-8aeceaa5d42a", - "metadata": {}, - "source": [ - "### Converting to DLC" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "219fc000-a9bb-483e-891e-2e9db570ea28", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n", - "\u001b[1;31mWARNING: The argument `input_shapes` is deprecated. Please use \u001b[0m\n", - "\u001b[1;31m`overwrite_input_shapes` and/or `test_input_shapes` instead. An error will be \u001b[0m\n", - "\u001b[1;31mraised in the future.\u001b[0m\n", - "WARNING: the simplification stopped because of timeout. Please set environment variable `ONNXSIM_FIXED_POINT_ITERS` to a number higher than 50if you want further simplification.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 22:00:38,259 - 235 - INFO - Successfully simplified the onnx model in child process\n", - "2023-09-30 22:00:38,608 - 235 - INFO - Successfully receive the simplified onnx model in main process\n", - "2023-09-30 22:00:38,756 - 235 - INFO - Successfully run shape inference in child process\n", - "2023-09-30 22:00:39,093 - 235 - INFO - Successfully receive the inferred model in main process\n", - "2023-09-30 22:00:39,127 - 240 - WARNING - WARNING_CAST_TYPE: Only numerical type cast is supported. The op: /mobilebert/Cast will be interpreted at conversion time\n", - "2023-09-30 22:00:45,555 - 235 - INFO - INFO_INITIALIZATION_SUCCESS: \n", - "2023-09-30 22:00:46,066 - 235 - INFO - INFO_CONVERSION_SUCCESS: Conversion completed successfully\n", - "2023-09-30 22:00:46,188 - 235 - INFO - INFO_WRITE_SUCCESS: \n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-onnx-to-dlc -i mobilebert-onnx/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o mobile_bert.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "f75c34ca-0e44-4e4f-9fed-d67da3e83a3b", - "metadata": {}, - "source": [ - "### Creating the RAW file" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "7aca1352-6a30-4614-9734-f3930774f388", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, MobileBertForQuestionAnswering\n", - "import torch\n", - "\n", - "tokenizer = AutoTokenizer.from_pretrained(\"csarron/mobilebert-uncased-squad-v2\")\n", - "\n", - "\n", - "question_token={}\n", - "\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "bf159fd3-32b9-4416-ad89-6fe1b3516ded", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Generating input_list \"small_raw_list.txt\" with 30 iterations\n" - ] - } - ], - "source": [ - "\n", - "total_iter = 30\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw token_type_ids:=token_type_ids/token_type_id_{}.raw\\n\".format(i,i,i))\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "id": "d3f7c71a-ac10-4a31-bcc2-3ddba9dc0a86", - "metadata": {}, - "source": [ - "#### Creating the FP 16 Model" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "d6f2d588-01e0-4a0e-88d8-7b4823e89143", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] InitializeStderr: DebugLog initialized.\n", - "[INFO] SNPE HTP Offline Prepare: Attempting to create cache for SM8550\n", - "[INFO] Attempting to open dynamically linked lib: libHtpPrepare.so\n", - "[INFO] dlopen libHtpPrepare.so SUCCESS handle 0x17b9a90\n", - "[INFO] Found Interface Provider (v2.8)\n", - "[USER_WARNING] QnnDsp Initializing HtpProvider\n", - "[USER_WARNING] QnnDsp Cost Based unsupported on soc SM8550\n", - "[USER_INFO] FP16 precision enabled for graph with id=0\n", - "[USER_INFO] Offline Prepare VTCM size(MB) selected = 8\n", - "[USER_INFO] Offline Prepare DLBC enablement passed = 0\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[USER_INFO] Cleaning up backend manager resources\n", - "[USER_INFO] Cleaning up Contexts\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] SNPE HTP Offline Prepare: Successfully created cache for SM8550\n", - "[INFO] SNPE HTP Offline Prepare: Saved cached DLC to mobile_bert_float.dlc\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] DebugLog shutting down.\n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-dlc-graph-prepare --input_dlc mobile_bert.dlc --input_list tf_raw_list.txt --output_dlc mobile_bert_float.dlc --use_float_io --set_output_tensors end_logits,start_logits --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "f6941fa4-a8f4-4bd3-997c-823d44c2338b", - "metadata": {}, - "source": [ - "# Generating DistilBert Model\n", - "- [Mobile bert ](https://huggingface.co/csarron/mobilebert-uncased-squad-v2/tree/main) You can Learn More about this model from this link\n", - "- To check more about different use cases of Mobilebert you can use this [link](https://huggingface.co/docs/transformers/model_doc/mobilebert)" - ] - }, - { - "cell_type": "markdown", - "id": "707c5d40-452d-4f15-a5e8-5adfb60a6b87", - "metadata": {}, - "source": [ - "### Generating the ONNX Model" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "6c159f63-d842-481f-9f41-ebf34c522cb8", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:44:52.392730: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-10-03 10:44:52.517845: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-10-03 10:44:52.558733: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-10-03 10:44:53.007277: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:44:53.007318: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:44:53.007322: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Framework not specified. Using pt to export to ONNX.\n", - "Automatic task detection to question-answering.\n", - "Using framework PyTorch: 2.0.1+cu117\n", - "/local/mnt/workspace/sahinhos/sahinenv/lib/python3.8/site-packages/transformers/models/distilbert/modeling_distilbert.py:223: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", - " mask, torch.tensor(torch.finfo(scores.dtype).min)\n", - "Post-processing the exported models...\n", - "Validating models in subprocesses...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "============= Diagnostic Run torch.onnx.export version 2.0.1+cu117 =============\n", - "verbose: False, log level: Level.ERROR\n", - "======================= 0 NONE 0 NOTE 0 WARNING 0 ERROR ========================\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:45:02.060867: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-10-03 10:45:02.135422: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-10-03 10:45:02.153106: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-10-03 10:45:02.470090: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:45:02.470129: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:45:02.470133: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Validating ONNX model distilbert-uncased-onnx/model.onnx...\n", - "\t-[✓] ONNX model output names match reference model (end_logits, start_logits)\n", - "\t- Validating ONNX Model output \"start_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "\t- Validating ONNX Model output \"end_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "The ONNX export succeeded and the exported model was saved at: distilbert-uncased-onnx\n" - ] - } - ], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model distilbert-base-uncased-distilled-squad distilbert-uncased-onnx/" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "366aff04-6160-4f85-b0ea-711132fdc06d", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "NodeArg(name='input_ids', type='tensor(int64)', shape=['batch_size', 'sequence_length'])\n", - "NodeArg(name='attention_mask', type='tensor(int64)', shape=['batch_size', 'sequence_length'])\n" - ] - } - ], - "source": [ - "import onnxruntime\n", - "\n", - "model_path='distilbert-uncased-onnx/model.onnx'\n", - "sess=onnxruntime.InferenceSession(model_path)\n", - "\n", - "input_layer_names=sess.get_inputs()\n", - "\n", - "for input_layer in input_layer_names:\n", - " print(input_layer)" - ] - }, - { - "cell_type": "markdown", - "id": "45293bb1-fb07-457b-85a7-7b590b0cc483", - "metadata": {}, - "source": [ - "### Generating the DLC Model" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "64e4b107-1a84-41d9-9301-b42b094f4e3b", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[1;31mWARNING: The argument `input_shapes` is deprecated. Please use \u001b[0m\n", - "\u001b[1;31m`overwrite_input_shapes` and/or `test_input_shapes` instead. An error will be \u001b[0m\n", - "\u001b[1;31mraised in the future.\u001b[0m\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:45:21,467 - 235 - INFO - Successfully simplified the onnx model in child process\n", - "2023-10-03 10:45:22,316 - 235 - INFO - Successfully receive the simplified onnx model in main process\n", - "2023-10-03 10:45:22,710 - 235 - INFO - Successfully run shape inference in child process\n", - "2023-10-03 10:45:23,659 - 235 - INFO - Successfully receive the inferred model in main process\n", - "2023-10-03 10:45:24,924 - 235 - INFO - INFO_INITIALIZATION_SUCCESS: \n", - "2023-10-03 10:45:25,704 - 235 - INFO - INFO_CONVERSION_SUCCESS: Conversion completed successfully\n", - "2023-10-03 10:45:26,041 - 235 - INFO - INFO_WRITE_SUCCESS: \n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-onnx-to-dlc -i distilbert-uncased-onnx/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -o distilbert.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "c662141a-548d-4035-8c58-e3dd289d942d", - "metadata": {}, - "source": [ - "### Generating the RAW file" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "abc71553-f164-4ca6-9f38-31e487d68e6d", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import DistilBertTokenizer, TFDistilBertForQuestionAnswering\n", - "import tensorflow as tf\n", - "tokenizer = DistilBertTokenizer.from_pretrained(\"distilbert-base-uncased-distilled-squad\")\n", - "\n", - "question_token={}\n", - "\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "d3943565-f0eb-4786-b07f-53990b47e131", - "metadata": {}, - "source": [ - "#### Creating the list" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "74089306-1087-476e-953a-c3ee1c24f2c0", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Generating input_list \"small_raw_list.txt\" with 30 iterations\n" - ] - } - ], - "source": [ - "\n", - "total_iter = 30\n", - "print(\"Generating input_list \\\"tf_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw\\n\".format(i,i)) # add token mask if needed\n" - ] - }, - { - "cell_type": "markdown", - "id": "5438afa2-7d0c-431a-a430-14c3c93df69f", - "metadata": {}, - "source": [ - "### Creating the FP16 DLC Model" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "e4420c91-d0a8-4e9d-913c-014485696284", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] InitializeStderr: DebugLog initialized.\n", - "[INFO] SNPE HTP Offline Prepare: Attempting to create cache for SM8550\n", - "[INFO] Attempting to open dynamically linked lib: libHtpPrepare.so\n", - "[INFO] dlopen libHtpPrepare.so SUCCESS handle 0x15d8c70\n", - "[INFO] Found Interface Provider (v2.8)\n", - "[USER_WARNING] QnnDsp Initializing HtpProvider\n", - "[USER_WARNING] QnnDsp Cost Based unsupported on soc SM8550\n", - "[USER_INFO] FP16 precision enabled for graph with id=0\n", - "[USER_INFO] Offline Prepare VTCM size(MB) selected = 8\n", - "[USER_INFO] Offline Prepare DLBC enablement passed = 0\n", - "[USER_INFO] Cleaning up backend manager resources\n", - "[USER_INFO] Cleaning up Contexts\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] SNPE HTP Offline Prepare: Successfully created cache for SM8550\n", - "[INFO] SNPE HTP Offline Prepare: Saved cached DLC to distilbert_float.dlc\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] DebugLog shutting down.\n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-dlc-graph-prepare --input_dlc distilbert.dlc --input_list tf_raw_list.txt --output_dlc distilbert_float.dlc --use_float_io --set_output_tensors end_logits,start_logits --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "86af6701-8ec8-4f69-bf10-6fe0272596f4", - "metadata": {}, - "source": [ - "# Generating Bertbase Model\n", - "- [Bert Base ](https://huggingface.co/bert-base-uncased) You can Learn More about this model from this link\n", - "- To check more about different use cases of Mobilebert you can use this [link](https://huggingface.co/docs/transformers/model_doc/bert)" - ] - }, - { - "cell_type": "markdown", - "id": "bed7ff7a-c1fd-4ffc-8d0f-fc273d8d6c98", - "metadata": {}, - "source": [ - "### Generating the ONNX Model" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "591c04a2-ad03-4920-ae01-97796d712f4f", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:46:14.040440: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-10-03 10:46:14.116542: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-10-03 10:46:14.134480: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-10-03 10:46:14.449560: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:46:14.449599: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:46:14.449602: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Framework not specified. Using pt to export to ONNX.\n", - "Downloading (…)lve/main/config.json: 100%|██████████| 508/508 [00:00<00:00, 171kB/s]\n", - "Downloading model.safetensors: 100%|██████████| 433M/433M [00:03<00:00, 114MB/s] \n", - "Some weights of the model checkpoint at deepset/bert-base-cased-squad2 were not used when initializing BertForQuestionAnswering: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight']\n", - "- This IS expected if you are initializing BertForQuestionAnswering from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", - "- This IS NOT expected if you are initializing BertForQuestionAnswering from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", - "Automatic task detection to question-answering.\n", - "Downloading (…)okenizer_config.json: 100%|██████████| 152/152 [00:00<00:00, 27.5kB/s]\n", - "Downloading (…)solve/main/vocab.txt: 100%|██████████| 213k/213k [00:00<00:00, 543kB/s]\n", - "Downloading (…)cial_tokens_map.json: 100%|██████████| 112/112 [00:00<00:00, 68.4kB/s]\n", - "Using framework PyTorch: 2.0.1+cu117\n", - "Overriding 1 configuration item(s)\n", - "\t- use_cache -> False\n", - "Post-processing the exported models...\n", - "Validating models in subprocesses...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "============= Diagnostic Run torch.onnx.export version 2.0.1+cu117 =============\n", - "verbose: False, log level: Level.ERROR\n", - "======================= 0 NONE 0 NOTE 0 WARNING 0 ERROR ========================\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:46:36.885804: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-10-03 10:46:36.962408: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-10-03 10:46:36.979785: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-10-03 10:46:37.302037: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:46:37.302087: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:46:37.302090: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Validating ONNX model bertbase-onnx-2/model.onnx...\n", - "\t-[✓] ONNX model output names match reference model (end_logits, start_logits)\n", - "\t- Validating ONNX Model output \"start_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "\t- Validating ONNX Model output \"end_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "The ONNX export succeeded and the exported model was saved at: bertbase-onnx-2\n" - ] - } - ], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model deepset/bert-base-cased-squad2 bertbase-onnx-2/" - ] - }, - { - "cell_type": "markdown", - "id": "0cb359c2-c903-458f-ad73-eaf49f64f439", - "metadata": {}, - "source": [ - "### Generating the DLC Model" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "4ab009ba-bb55-4f6e-8d4d-cc8f72722944", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[1;31mWARNING: The argument `input_shapes` is deprecated. Please use \u001b[0m\n", - "\u001b[1;31m`overwrite_input_shapes` and/or `test_input_shapes` instead. An error will be \u001b[0m\n", - "\u001b[1;31mraised in the future.\u001b[0m\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:47:35,407 - 235 - INFO - Successfully simplified the onnx model in child process\n", - "2023-10-03 10:47:36,773 - 235 - INFO - Successfully receive the simplified onnx model in main process\n", - "2023-10-03 10:47:37,477 - 235 - INFO - Successfully run shape inference in child process\n", - "2023-10-03 10:47:39,019 - 235 - INFO - Successfully receive the inferred model in main process\n", - "2023-10-03 10:47:39,186 - 240 - WARNING - WARNING_CAST_TYPE: Only numerical type cast is supported. The op: /bert/Cast will be interpreted at conversion time\n", - "2023-10-03 10:47:41,322 - 235 - INFO - INFO_INITIALIZATION_SUCCESS: \n", - "2023-10-03 10:47:42,763 - 235 - INFO - INFO_CONVERSION_SUCCESS: Conversion completed successfully\n", - "2023-10-03 10:47:43,292 - 235 - INFO - INFO_WRITE_SUCCESS: \n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-onnx-to-dlc -i bertbase-onnx-2/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o bert_base.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "22ba7189-d6e4-4008-baa8-6985a2dd6eec", - "metadata": {}, - "source": [ - "### Creating the RAW files " - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "26693698-46f1-4083-a141-7f664480a8df", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, BertForQuestionAnswering\n", - "import torch\n", - "\n", - "tokenizer = AutoTokenizer.from_pretrained(\"deepset/bert-base-cased-squad2\")\n", - "\n", - "question_token={}\n", - "\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "76bc5298-bd90-41a5-ad5f-5fac45b00540", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Generating input_list \"small_raw_list.txt\" with 30 iterations\n" - ] - } - ], - "source": [ - "\n", - "total_iter = 30\n", - "print(\"Generating input_list \\\"sma_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw token_type_ids:=token_type_ids/token_type_id_{}.raw\\n\".format(i,i,i))\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "daba430f-ebed-4d10-bfc2-c7254c5a4dad", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] InitializeStderr: DebugLog initialized.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] SNPE HTP Offline Prepare: Attempting to create cache for SM8550\n", - "[INFO] Attempting to open dynamically linked lib: libHtpPrepare.so\n", - "[INFO] dlopen libHtpPrepare.so SUCCESS handle 0x18f87a0\n", - "[INFO] Found Interface Provider (v2.8)\n", - "[USER_WARNING] QnnDsp Initializing HtpProvider\n", - "[USER_WARNING] QnnDsp Cost Based unsupported on soc SM8550\n", - "[USER_INFO] FP16 precision enabled for graph with id=0\n", - "[USER_INFO] Offline Prepare VTCM size(MB) selected = 8\n", - "[USER_INFO] Offline Prepare DLBC enablement passed = 0\n", - "[USER_INFO] Cleaning up backend manager resources\n", - "[USER_INFO] Cleaning up Contexts\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] SNPE HTP Offline Prepare: Successfully created cache for SM8550\n", - "[INFO] SNPE HTP Offline Prepare: Saved cached DLC to bert_base_float.dlc\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] DebugLog shutting down.\n" - ] - } - ], - "source": [ - "%%bash\n", - "\n", - "snpe-dlc-graph-prepare --input_dlc bert_base.dlc --input_list tf_raw_list.txt --output_dlc bert_base_float.dlc --set_output_tensors end_logits,start_logits --use_float_io --htp_socs sm8550" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f01fc02b-a5fd-4367-817e-f44ff61506d8", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4611b710-a8e2-47bb-950d-652b88da8abd", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6033aeec-8756-4291-82df-6f0f10f59382", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.17" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/ai-solutions/windows/angular-app-nlp/Python_flask_server/server.py b/ai-solutions/windows/angular-app-nlp/Python_flask_server/server.py deleted file mode 100644 index f2a6b751..00000000 --- a/ai-solutions/windows/angular-app-nlp/Python_flask_server/server.py +++ /dev/null @@ -1,310 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -import datetime -from sqlite3 import Date -from flask import Flask, render_template, request, jsonify, make_response, send_file -from flask_cors import CORS -from PIL import Image -from empatches import EMPatches -import io, os -import cv2 -import numpy as np -import time -import functools -import zmq -import sys -import torch -import tensorflow as tf - -model=None -app = Flask(__name__, - static_url_path='', - static_folder='static') -CORS(app) - -time_taken_model = "" -upscaled_img_dims = "" -old_runtime = "" -old_model_name = "" - -def pyinstaller_absolute_path(relative_path): - """ For PyInstaller, getting absolute path of resources""" - base_path = getattr( sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__))) - abs_path = os.path.join(base_path, relative_path) - return abs_path - -def func(start_logits,end_logits,inputs,tokenizer): - answer_start_index = int(tf.math.argmax(start_logits, axis=-1)[0]) - answer_end_index = int(tf.math.argmax(end_logits, axis=-1)[0]) - - start=min(answer_start_index,answer_end_index) - end=max(answer_start_index,answer_end_index) - print("start_index:",answer_start_index,"end index:",answer_end_index) - predict_answer_tokens = inputs.input_ids[0, start : end+ 1] - return tokenizer.decode(predict_answer_tokens) - - -@app.route('/api/BuildModel', methods=['POST']) -def getModelName(): - - data=request.json - - inp=data['input'] - global model - model=inp['model'] - print(model,data) - return model - - - -def buildnetwork(socket, model_name, run_time): - - print("BUILDING NETWORK") - first_str = b"networkbuild" - - runtime_name_decoder={'DSP':b"DSP",'GPU':b"GPU", 'CPU':b"CPU"} - dlc_name_decoder={'distilbert':'distilbert2_float.dlc','alberta':'alberta_float.dlc','mobile_bert':'mobile_bert_updated_float.dlc','electrabase':'electrabase_float.dlc','bert_base':'bert_base2_float.dlc'} - dlc_path = bytes(pyinstaller_absolute_path(os.path.join("dlc", dlc_name_decoder.get(model_name))),'utf-8') - - socket.send_multipart([first_str,dlc_path, runtime_name_decoder.get(run_time)]) - - print("Messages sent for building network, waiting for reply") - message_build = socket.recv() - print(message_build) - - -def preprocess(question,text,model_name,tokenizer,socket): - inputs = tokenizer(question, text, return_tensors="np", - padding='max_length', - truncation="longest_first", - max_length=384) - if model_name!='distilbert': - attention_mask =inputs['attention_mask'].tobytes() - input_ids=inputs['input_ids'].tobytes() - token_type_ids=inputs['token_type_ids'].tobytes() - #Sending multiple messages to the snpe cpp server - socket.send_multipart([b"infer",b"3",attention_mask,input_ids,token_type_ids]) - - - else: - attention_mask =inputs['attention_mask'].tobytes() - input_ids=inputs['input_ids'].tobytes() - #Sending multiple messages to the snpe cpp server - socket.send_multipart([b"infer",b"2",input_ids,attention_mask]) - - return inputs - - - -def predict(socket,question,text, model_name, run_time ): - - - runtime_name_decoder={'DSP':"--use_dsp",'GPU':"--use_gpu", 'CPU':""} - - if model_name=='alberta': - from transformers import AutoTokenizer, AlbertForQuestionAnswering - tokenizer = AutoTokenizer.from_pretrained("twmkn9/albert-base-v2-squad2") - - elif model_name=='distilbert': - from transformers import DistilBertTokenizer, TFDistilBertForQuestionAnswering - tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased-distilled-squad") - inputs=preprocess(question,text,'distilbert',tokenizer,socket) - - elif model_name=='electrabase': - from transformers import AutoTokenizer, ElectraForQuestionAnswering - tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/electra-base-squad2") - inputs=preprocess(question,text,'electrabase',tokenizer,socket) - elif model_name=='mobile_bert': - from transformers import AutoTokenizer, MobileBertForQuestionAnswering - tokenizer = AutoTokenizer.from_pretrained("csarron/mobilebert-uncased-squad-v2") - inputs=preprocess(question,text,'mobile_bert',tokenizer,socket) - - elif model_name=='bert_base': - from transformers import AutoTokenizer, BertForQuestionAnswering - tokenizer = AutoTokenizer.from_pretrained("deepset/bert-base-cased-squad2") - inputs=preprocess(question,text,'bert_base',tokenizer,socket) - - dlc_name_decoder={'distilbert':'distilbert2_float.dlc','alberta':'alberta_float.dlc','mobile_bert':'mobile_bert_updated_float.dlc','electrabase':'electrabase_float.dlc','bert_base':'bert_base2_float.dlc'} - dlc_path = os.path.join("dlc", dlc_name_decoder.get(model_name)) - - print("Messages sent, waiting for reply") - message_img_out1 = socket.recv() - message_img_out2 = socket.recv() - - end_logits= np.frombuffer(message_img_out1, dtype=np.float32) - start_logits= np.frombuffer(message_img_out2, dtype=np.float32) - - start_logits = start_logits.reshape(1,384) - end_logits = end_logits.reshape(1,384) - - result=func(start_logits,end_logits,inputs,tokenizer) - print("Result is :",result) - socket.send(b"get_infer_time") - message_infer_time = socket.recv() - print("message_infer_time", message_infer_time.decode('UTF-8')) - return result,message_infer_time.decode('UTF-8') - - - -# Serve INDEX HTML file -@app.route('/') -def index(): - return render_template('index.html') - -# Endpoint for super resolution -@app.route('/timer_string', methods=['POST']) -def timer_string(): - print("Fetching image data from the POST request") - - -@app.route('/api/fetchPredictionResults', methods=['POST']) -def initialization(): - print("Fetching image data from the POST request") - data=request.json - - inp=data['input'] - question=inp['question'] - paragraph=inp['paragraph'] - runtime=inp['runtime'] - print("question:",question) - print("paragraph:",paragraph) - - - print("Model Name",model) - old_model_name='mobilebert' - old_runtime="DSP" - ## MAKING CONNECTION WITH SNPE EXE ## - context = zmq.Context() - # Create a REQ (request) socket - socket = context.socket(zmq.REQ) - server_address = "tcp://localhost:5555" # Replace with your server's address - socket.connect(server_address) - if model != old_model_name or runtime != old_runtime: - print("___________________BUILDINGNETWORK________________") - print("old_model_name: ", old_model_name, "::model_name: ",model) - print("old_runtime: ", old_runtime, "::runtime: ",runtime) - buildnetwork(socket, model, runtime) ##build network when there is some change other than image - old_model_name = model - old_runtime = runtime - - result,infer_time=predict(socket,question,paragraph, model,runtime ) - - current_time=datetime.datetime.now().strftime('%Y-%m-%d %H:%M') - response_data={ - 'question':question, - 'answer':result, - 'time':current_time, - 'executionTime':infer_time, - 'error_message':None - } - print(response_data) - response=make_response(jsonify(response_data),200) - return response - # # image_data = request.files['imageData'] - - # model_name = request.form['model_name'] - # print("MODEL NAME:",model_name) - - # runtime = request.form['runtime'] - # print("RUN TIME:",runtime) - - # print("load as PIL IMG") - # image_data = Image.open(image_data) - # #image_data.save("input_img.png") - # width, height = image_data.size - # print(f"Received img height = {height} ; width = {width}") - - - # ## MAKING CONNECTION WITH SNPE EXE ## - # context = zmq.Context() - # # Create a REQ (request) socket - # socket = context.socket(zmq.REQ) - # server_address = "tcp://localhost:5555" # Replace with your server's address - # socket.connect(server_address) - - - # ## BUILDING NETWORK ## - # global old_model_name - # global old_runtime - - # if model_name != old_model_name or runtime != old_runtime: - # print("___________________BUILDINGNETWORK________________") - # print("old_model_name: ", old_model_name, "::model_name: ",model_name) - # print("old_runtime: ", old_runtime, "::runtime: ",runtime) - # buildnetwork(socket, model_name, runtime) ##build network when there is some change other than image - # old_model_name = model_name - # old_runtime = runtime - - - # ## INFERENCING ON NETWORK ## - - - # # Step 0: Set upscaling params - # patch_size = 128 - # overlap_factor = 0.1 - # scaling_factor= 4 - - - # # Step 1: Read Image and Extract 128x128 patches from the image - # image_np = np.array(image_data) - - # # Dividing image into small patches - # emp = EMPatches() - # img_patches, indices = emp.extract_patches(image_np, patchsize=patch_size, overlap=overlap_factor) - # print(f"Num of patches of 128 = {len(img_patches)}") - - - # # Step 2: Upscale each patch by a factor of 4 - # upscaled_patches= [] - # infer_time_list = [] - # time_taken = 0 - # for patch in img_patches: - # pt, single_infer_time = upscale_patch(socket, patch, model_name, runtime) - # upscaled_patches.append(pt) - # time_taken = time_taken + single_infer_time ##Adding time for all patches - - # print("Received upscaled patches") - - # global time_taken_model - # global upscaled_img_dims - # time_taken_model = str(f'{time_taken*1000:.2f}')+" ms" - - - - # # Step 3: Stitch back the upscaled patches into a single image - - # # Calculate the upscaled stiching indices - # up_img = np.zeros((image_np.shape[0]*scaling_factor, image_np.shape[1]*scaling_factor, image_np.shape[2]), np.uint8) - # _, new_indices = emp.extract_patches(up_img, patchsize=patch_size*scaling_factor, overlap=overlap_factor) - - # # merge with new_indices - # merged_img = emp.merge_patches(upscaled_patches, new_indices, mode='min') - # upscaled_img_dims = str(merged_img.shape[0]) + " x " +str(merged_img.shape[1]); - # print("upscaled_img_dims: ",upscaled_img_dims) - - # merged_img = Image.fromarray(np.uint8(merged_img)) - # #merged_img.save("upscaled.png") - - # # Convert the upscaled image to a binary response - # output_buffer = io.BytesIO() - - # merged_img.save(output_buffer, format='PNG') - - # print("Sending upscaled image as output to electron ...") - # output_buffer.seek(0) - # return send_file(output_buffer, mimetype='image/png') - - # except Exception as e: - # print("#############EXCEPTION####################") - # print(str(e)) - # return jsonify({'error': str(e)}), 400 - -if __name__ == '__main__': - app.run(host='0.0.0.0', port=9081, debug=True) diff --git a/ai-solutions/windows/angular-app-nlp/README.md b/ai-solutions/windows/angular-app-nlp/README.md deleted file mode 100644 index 51861a2f..00000000 --- a/ai-solutions/windows/angular-app-nlp/README.md +++ /dev/null @@ -1,96 +0,0 @@ -### For Internal Purpose -* dlc: Paste your dlc model in the pyton_flask_server directory -* SNPE LIBS: paste at any appropriate location and mention the path in CMakelists.txt -* SNPE INCLUE: paste at any appropriate location and mention the path in CMakelists.txt -* ZeroMQ LIBS: paste it in C:\Program Files (x86) and mention the path in CMakelists.txt -* cppzmq: paste it in C:\Program Files (x86) -* SNPE_CPP_CODE: paste all files present in **\\upagrah\hyderabad_qidk\sahinhos\Language Models\Windows Asset** directory to SNPE_CPP_CODE folder of this github repo, After that you can simply run from SNPE_CPP_CODE/build/Debug/snpe-sample.exe. - -### Dependencies: -* Python 3.8 is installed. -* Visual Studio 2022 is installed based on instructions given at https://docs.qualcomm.com/bundle/publicresource/topics/80-62010-1/Install-Visual-Studio-2022.html?product=Windows%20on%20Snapdragon -* This Windows Application has to run on SM8550 or Above Architecture. - - -## Directory Structure : -This Repo Contains 3 directories, which handle different aspects of this project. - -1. **Electron app UI**: This directory contains UI code and handles the part to start UI and connecting it to flask server. Here, User provides input question and the corresponding paragraph,selects the AI model and runtime for their use. All this information is sent to python using ajax request. - -2. **Python Flask Server**: Electron UI acts as foreground, and Flask server works in background to handle request from elecron UI. It takes all information given by elecron UI and pre-process the received question and paragraph here, and then give the processed inputs to SNPE_CPP_CODE for running the selected model. SNPE_CPP_CODE returns the output of the model and then we process the data given by model into human understandable form and return that back to Electron UI for display. - -3. **SNPE_CPP_CODE**: This works as a service for flask server. This runs the preprocessed inputs on network and return the output given by model back to Flask Server. -4. **Generating_DLC**: This .ipynb file is to generate the DLC. - - -

- -

- -## Commands for Making Standalone - -* In python_flask_server: - - Python pkg dependencies : `pip install empatches flask opencv-python pillow flask_cors zmq pyinstaller transformer` - - Create dlc Directory and put DLC(s) there, Please follow Generating_DLC.ipynb to generate dlc. - - To start flask server, please run - `python server.py` - - It will start server at port : 9081 - - To view webpage on browser, please use this URL : http://localhost:9081 - -* In SNPE_CPP_Code: - - **Apply zmq_support.patch to the SNPE SampleCode_Windows present in SNPE sdk. After that please copy all the files in that folder to SNPE_CPP_CODE folder in this github repo.** - - For ZeroMQ, clone following gits and use their instructions to build those libs for your system, or you can follow below instructions to build. - - For libzmq: - - `git clone https://github.com/zeromq/libzmq.git` - - `cd libzmq` - - `mkdir build` - - `cd build` - - `cmake ../. -G "Visual Studio 17 2022" -D WITH_PERF_TOOL=OFF -D ZMQ_BUILD_TESTS=OFF -D ENABLE_CPACK=OFF -D CMAKE_BUILD_TYPE=Release` - - Open _ZeroMQ.sln_ in Visual Studio - - In Solution Directory, right click on INSTALL and _build_ solution - - See that the _ZeroMQ_ is installed in C Drive. - - - For Cppzmq: - - `git clone https://github.com/zeromq/cppzmq.git` - - `cd cppzmq` - - `mkdir build` - - `cd build` - - `cmake ../. -G "Visual Studio 17 2022" - - Open _cppZMQ.sln_ in Visual Studio - - In Solution Directory, right click on INSTALL and _build_ solution - - Confirm that _cppzmq_ is installed in C Drive. - - - - Change following paths in CmakeLists.txt of SNPE_CPP_Code according to your setup: - - set (SNPE_INCLUDE_DIR _"C:/Qualcomm/AIStack/SNPE/2.12.0.230626/include/SNPE"_) - - set (SNPE_LIB_PREFIX _"C:/Qualcomm/AIStack/SNPE/2.12.0.230626/lib"_) - - set (ZERO_MQ_PATH _"C:/Program Files (x86)/ZeroMQ"_) - - Change DLL filename, according to your setup: get_filename_component(ZMQ_DLL_PATH "${ZERO_MQ_PATH}/bin/_libzmq-v143-mt-gd-4_3_5.dll_" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) - - Create a build folder and build files. - - `Mkdir build` - - `Cd build` - - `cmake ../ -G "Visual Studio 17 2022" -A ARM64` - - `cmake --build ./ --config Release` - - - For running, please go to build/Release folder and run snpe-sample.exe - - * Inside electron_app_ui: - - Execute `npm install`. This will make node modules directory which will contain all necessary npm packages. - - Then Execute `npm run build` - - Then Execute `npm run dist` - - After this it'll create .exe file inside release folder, please install the app then it's ready to run. - - If you want to check in browser just use `npm start` command, this will open the Application in the broswer. - -

- - - - -

- - -https://github.qualcomm.com/storage/user/11796/files/2f187531-98ea-4671-b3c2-bf0b07ac216d - - -Note: Make sure that you have resolved all dependencies mentioned in "Making Standalone" Section, like setting SNPE and ZMQ libs, installing python packages etc. - diff --git a/ai-solutions/windows/angular-app-nlp/SNPE_CPP_Code/zmq_support.patch b/ai-solutions/windows/angular-app-nlp/SNPE_CPP_Code/zmq_support.patch deleted file mode 100644 index cdc5e6dc..00000000 --- a/ai-solutions/windows/angular-app-nlp/SNPE_CPP_Code/zmq_support.patch +++ /dev/null @@ -1,1422 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index c9be8f0..81b1636 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -1,73 +1,111 @@ --#============================================================================== --# --# Copyright (c) 2020-2023 Qualcomm Technologies, Inc. --# All Rights Reserved. --# Confidential and Proprietary - Qualcomm Technologies, Inc. --# --#============================================================================== -- --cmake_minimum_required (VERSION 3.14) --project (snpe-sample) --set (APP "snpe-sample") -- --# CMake option to control whether to build with VCRuntime libraries --option(BUILD_WITH_VCRUNTIME "Build the snpe-sample with static vcruntime libraries." OFF) --message("Build snpe-sample with vcruntime: ${BUILD_WITH_VCRUNTIME}") -- --set( APP_SOURCES -- "main.cpp" -- "Util.cpp" -- "SetBuilderOptions.cpp" -- "SetBuilderOptions.hpp" -- "SaveOutputTensor.cpp" -- "GetOpt.cpp" -- "GetOpt.hpp" -- "SaveOutputTensor.hpp" -- "Util.hpp" -- "NV21Load.cpp" -- "NV21Load.hpp" -- "LoadInputTensor.cpp" -- "LoadInputTensor.hpp" -- "CheckRuntime.cpp" -- "CheckRuntime.hpp" -- "LoadContainer.cpp" -- "LoadContainer.hpp" -- "PreprocessInput.cpp" -- "PreprocessInput.hpp" -- "CreateUserBuffer.cpp" -- "CreateUserBuffer.hpp" --) -- --set (SNPE_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../../include/SNPE) --set (SNPE_LIB_PREFIX ../../../../lib) --set (_dtuple_POSTFIX windows-msvc) -- --if(CMAKE_GENERATOR_PLATFORM STREQUAL "x64") -- message("Linking with x64 SNPE") -- get_filename_component(SNPE_DLL_PATH "${SNPE_LIB_PREFIX}/x86_64-${_dtuple_POSTFIX}/SNPE.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -- get_filename_component(SNPE_IMPLIB_PATH "${SNPE_LIB_PREFIX}/x86_64-${_dtuple_POSTFIX}/SNPE.lib" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) --elseif(CMAKE_GENERATOR_PLATFORM STREQUAL "ARM64") -- message("Linking with ARM64 SNPE") -- get_filename_component(SNPE_DLL_PATH "${SNPE_LIB_PREFIX}/aarch64-${_dtuple_POSTFIX}/SNPE.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -- get_filename_component(SNPE_IMPLIB_PATH "${SNPE_LIB_PREFIX}/aarch64-${_dtuple_POSTFIX}/SNPE.lib" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) --else() -- message(FATAL "Not Supported Platform") --endif() -- --add_library( SNPE SHARED IMPORTED ) --set_target_properties(SNPE PROPERTIES -- IMPORTED_LOCATION ${SNPE_DLL_PATH} -- IMPORTED_IMPLIB ${SNPE_IMPLIB_PATH} -- INTERFACE_INCLUDE_DIRECTORIES ${SNPE_INCLUDE_DIR} --) -- --add_executable(${APP} ${APP_SOURCES}) --target_compile_definitions(${APP} PUBLIC -D_CRT_SECURE_NO_WARNINGS) --if(${BUILD_WITH_VCRUNTIME}) -- target_compile_options(${APP} PUBLIC /MT) --endif() --target_link_libraries (${APP} SNPE) --add_custom_command(TARGET ${APP} POST_BUILD -- COMMAND ${CMAKE_COMMAND} -E copy_if_different -- ${SNPE_DLL_PATH} -- $) -+#============================================================================== -+# -+# Copyright (c) 2020-2023 Qualcomm Technologies, Inc. -+# All Rights Reserved. -+# Confidential and Proprietary - Qualcomm Technologies, Inc. -+# -+#============================================================================== -+ -+cmake_minimum_required (VERSION 3.14) -+project (snpe-sample) -+set (APP "snpe-sample") -+ -+# CMake option to control whether to build with VCRuntime libraries -+option(BUILD_WITH_VCRUNTIME "Build the snpe-sample with static vcruntime libraries." OFF) -+message("Build snpe-sample with vcruntime: ${BUILD_WITH_VCRUNTIME}") -+ -+set( APP_SOURCES -+ "main.cpp" -+ "Util.cpp" -+ "SetBuilderOptions.cpp" -+ "SetBuilderOptions.hpp" -+ "SaveOutputTensor.cpp" -+ "GetOpt.cpp" -+ "GetOpt.hpp" -+ "SaveOutputTensor.hpp" -+ "Util.hpp" -+ "NV21Load.cpp" -+ "NV21Load.hpp" -+ "LoadInputTensor.cpp" -+ "LoadInputTensor.hpp" -+ "CheckRuntime.cpp" -+ "CheckRuntime.hpp" -+ "LoadContainer.cpp" -+ "LoadContainer.hpp" -+ "PreprocessInput.cpp" -+ "PreprocessInput.hpp" -+ "CreateUserBuffer.cpp" -+ "CreateUserBuffer.hpp" -+) -+ -+set (SNPE_INCLUDE_DIR "C:/Users/HCKTest/Desktop/sahinhos/QIDK_AI_Suite_Artifacts/Assets_ZIP/SNPE_include/SNPE") -+set (SNPE_LIB_PREFIX "C:/Users/HCKTest/Desktop/sahinhos/QIDK_AI_Suite_Artifacts/Assets_ZIP/SNPE_lib/SNPE_lib/lib") -+set (ZERO_MQ_PATH "C:/Program Files (x86)/ZeroMQ") -+set (_dtuple_POSTFIX windows-msvc) -+ -+if(CMAKE_GENERATOR_PLATFORM STREQUAL "x64") -+ message("Linking with x64 SNPE") -+ get_filename_component(SNPE_DLL_PATH "${SNPE_LIB_PREFIX}/x86_64-${_dtuple_POSTFIX}/SNPE.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_IMPLIB_PATH "${SNPE_LIB_PREFIX}/x86_64-${_dtuple_POSTFIX}/SNPE.lib" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+elseif(CMAKE_GENERATOR_PLATFORM STREQUAL "ARM64") -+ message("Linking with ARM64 SNPE") -+ get_filename_component(SNPE_DLL_PATH "${SNPE_LIB_PREFIX}/aarch64-${_dtuple_POSTFIX}/SNPE.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_IMPLIB_PATH "${SNPE_LIB_PREFIX}/aarch64-${_dtuple_POSTFIX}/SNPE.lib" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_STUB66_PATH "${SNPE_LIB_PREFIX}/aarch64-${_dtuple_POSTFIX}/SnpeDspV66Stub.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_STUB68_PATH "${SNPE_LIB_PREFIX}/aarch64-${_dtuple_POSTFIX}/SnpeHtpV68Stub.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_HTP_PATH "${SNPE_LIB_PREFIX}/aarch64-${_dtuple_POSTFIX}/SnpeHtpPrepare.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ -+ get_filename_component(ZMQ_DLL_PATH "${ZERO_MQ_PATH}/bin/libzmq-v143-mt-4_3_5.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_SKEL66_PATH "${SNPE_LIB_PREFIX}/hexagon-v66/unsigned/libSnpeDspV66Skel.so" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_SKEL68_PATH "${SNPE_LIB_PREFIX}/hexagon-v68/unsigned/libSnpeHtpV68Skel.so" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ -+else() -+ message(FATAL "Not Supported Platform") -+endif() -+ -+add_library( SNPE SHARED IMPORTED ) -+set_target_properties(SNPE PROPERTIES -+ IMPORTED_LOCATION ${SNPE_DLL_PATH} -+ IMPORTED_IMPLIB ${SNPE_IMPLIB_PATH} -+ INTERFACE_INCLUDE_DIRECTORIES ${SNPE_INCLUDE_DIR} -+) -+ -+find_package(cppzmq) -+add_executable(${APP} ${APP_SOURCES}) -+target_compile_definitions(${APP} PUBLIC -D_CRT_SECURE_NO_WARNINGS) -+if(${BUILD_WITH_VCRUNTIME}) -+ target_compile_options(${APP} PUBLIC /MT) -+endif() -+target_link_libraries (${APP} SNPE cppzmq) -+add_custom_command(TARGET ${APP} POST_BUILD -+ COMMAND ${CMAKE_COMMAND} -E copy_if_different -+ ${SNPE_DLL_PATH} -+ $) -+ -+add_custom_command(TARGET ${APP} POST_BUILD -+ COMMAND ${CMAKE_COMMAND} -E copy_if_different -+ ${SNPE_STUB66_PATH} -+ $) -+ -+add_custom_command(TARGET ${APP} POST_BUILD -+ COMMAND ${CMAKE_COMMAND} -E copy_if_different -+ ${SNPE_STUB68_PATH} -+ $) -+ -+add_custom_command(TARGET ${APP} POST_BUILD -+ COMMAND ${CMAKE_COMMAND} -E copy_if_different -+ ${SNPE_HTP_PATH} -+ $) -+ -+add_custom_command(TARGET ${APP} POST_BUILD -+ COMMAND ${CMAKE_COMMAND} -E copy_if_different -+ ${SNPE_SKEL66_PATH} -+ $) -+add_custom_command(TARGET ${APP} POST_BUILD -+ COMMAND ${CMAKE_COMMAND} -E copy_if_different -+ ${SNPE_SKEL68_PATH} -+ $) -+add_custom_command(TARGET ${APP} POST_BUILD -+ COMMAND ${CMAKE_COMMAND} -E copy_if_different -+ ${ZMQ_DLL_PATH} -+ $) -\ No newline at end of file -diff --git a/CreateUserBuffer.cpp b/CreateUserBuffer.cpp -index fa500e5..d2a5bbe 100644 ---- a/CreateUserBuffer.cpp -+++ b/CreateUserBuffer.cpp -@@ -199,3 +199,131 @@ void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, - createUserBuffer(inputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name); - } - } -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+void createUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ const char * name, -+ const bool isTfNBuffer, -+ bool staticQuantization, -+ int bitWidth) -+{ -+ // get attributes of buffer by name -+ auto bufferAttributesOpt = snpe->getInputOutputBufferAttributes(name); -+ if (!bufferAttributesOpt) throw std::runtime_error(std::string("Error obtaining attributes for input tensor ") + name); -+ -+ // calculate the size of buffer required by the input tensor -+ const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); -+ -+ size_t bufferElementSize = 0; -+ if (isTfNBuffer) { -+ bufferElementSize = bitWidth / 8; -+ } -+ else { -+ bufferElementSize = sizeof(float); -+ } -+ -+ // Calculate the stride based on buffer strides. -+ // Note: Strides = Number of bytes to advance to the next element in each dimension. -+ // For example, if a float tensor of dimension 2x4x3 is tightly packed in a buffer of 96 bytes, then the strides would be (48,12,4) -+ // Note: Buffer stride is usually known and does not need to be calculated. -+ std::vector strides(bufferShape.rank()); -+ strides[strides.size() - 1] = bufferElementSize; -+ size_t stride = strides[strides.size() - 1]; -+ for (size_t i = bufferShape.rank() - 1; i > 0; i--) -+ { -+ (bufferShape[i] == 0) ? stride *= getResizableDim() : stride *= bufferShape[i]; -+ strides[i-1] = stride; -+ } -+ -+ size_t bufSize = calcSizeFromDims(bufferShape.getDimensions(), bufferShape.rank(), bufferElementSize); -+ -+ // set the buffer encoding type -+ std::unique_ptr userBufferEncoding; -+ if (isTfNBuffer) -+ { -+ if((*bufferAttributesOpt)->getEncodingType() == zdl::DlSystem::UserBufferEncoding::ElementType_t::FLOAT && staticQuantization){ -+ std::cerr << "ERROR: Quantization parameters not present in model" << std::endl; -+ std::exit(EXIT_FAILURE); -+ } -+ -+ const zdl::DlSystem::UserBufferEncodingTfN* ubeTfN = dynamic_cast((*bufferAttributesOpt)->getEncoding()); -+ uint64_t stepEquivalentTo0 = ubeTfN->getStepExactly0(); -+ float quantizedStepSize = ubeTfN->getQuantizedStepSize(); -+ userBufferEncoding = std::unique_ptr(new zdl::DlSystem::UserBufferEncodingTfN(stepEquivalentTo0,quantizedStepSize, bitWidth)); -+ } -+ else -+ { -+ userBufferEncoding = std::unique_ptr(new zdl::DlSystem::UserBufferEncodingFloat()); -+ } -+ -+ // create user-backed storage to load input data onto it -+ applicationBuffers.emplace(name, std::vector(bufSize)); -+ -+ // create SNPE user buffer from the user-backed buffer -+ zdl::DlSystem::IUserBufferFactory& ubFactory = zdl::SNPE::SNPEFactory::getUserBufferFactory(); -+ snpeUserBackedBuffers.push_back(ubFactory.createUserBuffer(applicationBuffers.at(name).data(), -+ bufSize, -+ strides, -+ userBufferEncoding.get())); -+ if (snpeUserBackedBuffers.back() == nullptr) -+ { -+ std::cerr << "Error while creating user buffer." << std::endl; -+ } -+ // add the user-backed buffer to the inputMap, which is later on fed to the network for execution -+ userBufferMap.add(name, snpeUserBackedBuffers.back().get()); -+} -+ -+void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ bool isTfNBuffer, -+ bool staticQuantization, -+ int bitWidth) -+{ -+ // get input tensor names of the network that need to be populated -+ const auto& inputNamesOpt = snpe->getInputTensorNames(); -+ if (!inputNamesOpt) throw std::runtime_error("Error obtaining input tensor names"); -+ const zdl::DlSystem::StringList& inputNames = *inputNamesOpt; -+ assert(inputNames.size() > 0); -+ -+ // create SNPE user buffers for each application storage buffer -+ for (const char *name : inputNames) { -+ createUserBuffer(inputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name, isTfNBuffer, staticQuantization, bitWidth); -+ } -+} -+ -+void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ bool isTfNBuffer, -+ int bitWidth) -+{ -+ // get input tensor names of the network that need to be populated -+ const auto& outputNamesOpt = snpe->getOutputTensorNames(); -+ if (!outputNamesOpt) throw std::runtime_error("Error obtaining output tensor names"); -+ const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; -+ -+ // create SNPE user buffers for each application storage buffer -+ for (const char *name : outputNames) { -+ createUserBuffer(outputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name, isTfNBuffer, false, bitWidth); -+ } -+} -\ No newline at end of file -diff --git a/CreateUserBuffer.hpp b/CreateUserBuffer.hpp -index 138a7c4..4a6cf60 100644 ---- a/CreateUserBuffer.hpp -+++ b/CreateUserBuffer.hpp -@@ -53,3 +53,33 @@ void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, - std::unordered_map& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe); -+ -+ -+ -+ -+ -+void createUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ const char * name, -+ const bool isTfNBuffer, -+ bool staticQuantization, -+ int bitWidth); -+ -+// Create a UserBufferMap of the SNPE network inputs -+void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ const bool isTfNBuffer, -+ bool staticQuantization, -+ int bitWidth); -+ -+// Create a UserBufferMap of the SNPE network outputs -+void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ const bool isTfNBuffer, -+ int bitWidth); -diff --git a/SetBuilderOptions.cpp b/SetBuilderOptions.cpp -index 20bbeca..3ee29a6 100644 ---- a/SetBuilderOptions.cpp -+++ b/SetBuilderOptions.cpp -@@ -1,41 +1,42 @@ --//============================================================================== --// --// Copyright (c) 2020 Qualcomm Technologies, Inc. --// All Rights Reserved. --// Confidential and Proprietary - Qualcomm Technologies, Inc. --// --//============================================================================== -- --#include "SetBuilderOptions.hpp" -- --#include "SNPE/SNPE.hpp" --#include "DlContainer/IDlContainer.hpp" --#include "SNPE/SNPEBuilder.hpp" -- --/* Windows Modification --* Remove UDL code --*/ -- --std::unique_ptr setBuilderOptions(std::unique_ptr & container, -- zdl::DlSystem::Runtime_t runtime, -- zdl::DlSystem::RuntimeList runtimeList, -- bool useUserSuppliedBuffers, -- zdl::DlSystem::PlatformConfig platformConfig, -- bool useCaching) --{ -- std::unique_ptr snpe; -- zdl::SNPE::SNPEBuilder snpeBuilder(container.get()); -- -- if(runtimeList.empty()) -- { -- runtimeList.add(runtime); -- } -- -- snpe = snpeBuilder.setOutputLayers({}) -- .setRuntimeProcessorOrder(runtimeList) -- .setUseUserSuppliedBuffers(useUserSuppliedBuffers) -- .setPlatformConfig(platformConfig) -- .setInitCacheMode(useCaching) -- .build(); -- return snpe; --} -+//============================================================================== -+// -+// Copyright (c) 2020 Qualcomm Technologies, Inc. -+// All Rights Reserved. -+// Confidential and Proprietary - Qualcomm Technologies, Inc. -+// -+//============================================================================== -+ -+#include "SetBuilderOptions.hpp" -+ -+#include "SNPE/SNPE.hpp" -+#include "DlContainer/IDlContainer.hpp" -+#include "SNPE/SNPEBuilder.hpp" -+ -+/* Windows Modification -+* Remove UDL code -+*/ -+ -+std::unique_ptr setBuilderOptions(std::unique_ptr & container, -+ zdl::DlSystem::Runtime_t runtime, -+ zdl::DlSystem::RuntimeList runtimeList, -+ bool useUserSuppliedBuffers, -+ zdl::DlSystem::PlatformConfig platformConfig, -+ bool useCaching) -+{ -+ std::unique_ptr snpe; -+ zdl::SNPE::SNPEBuilder snpeBuilder(container.get()); -+ -+ if(runtimeList.empty()) -+ { -+ runtimeList.add(runtime); -+ } -+ -+ snpe = snpeBuilder.setOutputLayers({}) -+ .setRuntimeProcessorOrder(runtimeList) -+ .setUseUserSuppliedBuffers(useUserSuppliedBuffers) -+ .setPlatformConfig(platformConfig) -+ .setInitCacheMode(useCaching) -+ .setUnconsumedTensorsAsOutputs(true) -+ .build(); -+ return snpe; -+} -diff --git a/main.cpp b/main.cpp -index 729e03b..05ed9a8 100644 ---- a/main.cpp -+++ b/main.cpp -@@ -9,7 +9,7 @@ - // This file contains an example application that loads and executes a neural - // network using the SNPE C++ API and saves the layer output to a file. - // Inputs to and outputs from the network are conveyed in binary form as single --// precision floating point values. -+// precision uint8_ting point values. - // - - #include -@@ -20,6 +20,9 @@ - #include - #include - #include -+#include -+#include -+#include - - #include "GetOpt.hpp" - #include "CheckRuntime.hpp" -@@ -36,232 +39,79 @@ - #include "DlSystem/IUserBuffer.hpp" - #include "DlContainer/IDlContainer.hpp" - #include "SNPE/SNPE.hpp" -- -+using namespace std; - /* Windows Modification - * Replace to and refactor the "Process command line arguments" part - */ -- - const int FAILURE = 1; - const int SUCCESS = 0; - --int main(int argc, char** argv) -+enum { UNKNOWN, USERBUFFER_FLOAT, USERBUFFER_TF8, ITENSOR, USERBUFFER_TF16 }; -+enum { CPUBUFFER, GLBUFFER }; -+bool useUserSuppliedBuffers = false; -+int bufferType; -+int bitWidth = 0; -+ -+std::string getCurrentDir() { -+ char buff[MAX_PATH]; -+ GetModuleFileName(NULL, buff, MAX_PATH); -+ std::string::size_type position = std::string(buff).find_last_of("\\/"); -+ return std::string(buff).substr(0, position); -+} -+ -+std::unique_ptr build_network(std::string dlc, static zdl::DlSystem::Runtime_t runtime, bool runtimeSpecified, bool usingInitCaching, std::string bufferTypeStr, std::string userBufferSourceStr, std::string staticQuantizationStr) - { -- enum {UNKNOWN, USERBUFFER_FLOAT, USERBUFFER_TF8, ITENSOR, USERBUFFER_TF16}; -- enum {CPUBUFFER, GLBUFFER}; - -- // Command line arguments -- static std::string dlc = ""; -- static std::string OutputDir = "./output/"; -- const char* inputFile = ""; -- std::string bufferTypeStr = "ITENSOR"; -- std::string userBufferSourceStr = "CPUBUFFER"; -- std::string staticQuantizationStr = "false"; -- static zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::Runtime_t::CPU; - static zdl::DlSystem::RuntimeList runtimeList; -- bool runtimeSpecified = false; -- bool execStatus = false; -- bool usingInitCaching = false; -- bool staticQuantization = false; -- -- // Process command line arguments -- int opt = 0; -- enum OPTIONS -+ bool staticQuantization; -+ -+ if (staticQuantizationStr == "true") - { -- OPT_HELP = 0, -- OPT_CONTAINER = 1, -- OPT_INPUT_LIST = 2, -- OPT_OUTPUT_DIR = 3, -- OPT_USERBUFFER = 4, -- OPT_RUNTIME = 5, -- OPT_RESIZABLE_DIM = 6, -- OPT_INITBLOBSCACHE = 7, -- OPT_RUNTIME_ORDER = 8, -- OPT_STATIC_QUANTIZATION = 9, -- }; -- static struct WinOpt::option long_options[] = { -- {"help", WinOpt::no_argument, NULL, OPT_HELP}, -- {"container", WinOpt::required_argument, NULL, OPT_CONTAINER}, -- {"input_list", WinOpt::required_argument, NULL, OPT_INPUT_LIST}, -- {"output_dir", WinOpt::required_argument, NULL, OPT_OUTPUT_DIR}, -- {"userbuffer", WinOpt::required_argument, NULL, OPT_USERBUFFER}, -- {"runtime", WinOpt::required_argument, NULL, OPT_RUNTIME}, -- {"resizable_dim", WinOpt::required_argument, NULL, OPT_RESIZABLE_DIM}, -- {"enable_init_cache", WinOpt::no_argument, NULL, OPT_INITBLOBSCACHE}, -- {"runtime_order", WinOpt::required_argument, NULL, OPT_RUNTIME_ORDER}, -- {"static_quantization", WinOpt::required_argument, NULL, OPT_STATIC_QUANTIZATION}, -- {NULL, 0, NULL, 0 } -- }; -- int long_index = 0; -- while ((opt = WinOpt::GetOptLongOnly(argc, argv, "", long_options, &long_index)) != -1) -+ staticQuantization = true; -+ } -+ else if (staticQuantizationStr == "false") - { -- switch (opt) -- { -- case OPT_HELP: -- std::cout -- << "\nDESCRIPTION:\n" -- << "------------\n" -- << "Example application demonstrating how to load and execute a neural network\n" -- << "using the SNPE C++ API.\n" -- << "\n\n" -- << "REQUIRED ARGUMENTS:\n" -- << "-------------------\n" -- << " --container Path to the DL container containing the network.\n" -- << " --input_list Path to a file listing the inputs for the network.\n" -- << " --output_dir Path to directory to store output results.\n" -- << "\n" -- << "OPTIONAL ARGUMENTS:\n" -- << "-------------------\n" -- << " --userbuffer Type of buffers to use [USERBUFFER_FLOAT, USERBUFFER_TF8, ITENSOR, USERBUFFER_TF16] (" << bufferTypeStr << " is default).\n" -- << " --static_quantization Specifies to use static quantization parameters from the model instead of input specific quantization [true, false]. Used in conjunction with USERBUFFER_TF8. \n" -- << " --runtime The runtime to be used [gpu, dsp, aip, cpu] (cpu is default). \n" -- << " --resizable_dim The maximum number that resizable dimensions can grow into. \n" -- << " Used as a hint to create UserBuffers for models with dynamic sized outputs. Should be a positive integer and is not applicable when using ITensor. \n" -- << " --enable_init_cache Enable init caching to accelerate the initialization process of SNPE. Defaults to disable.\n" -- << " --runtime_order Specifies the order of precedence for runtime e.g cpu_float32, dsp_fixed8_tf etc. Valid values are:- \n" -- << " cpu_float32 (Snapdragon CPU) = Data & Math: float 32bit \n" -- << " gpu_float32_16_hybrid (Adreno GPU) = Data: float 16bit Math: float 32bit \n" -- << " dsp_fixed8_tf (Hexagon DSP) = Data & Math: 8bit fixed point Tensorflow style format \n" -- << " gpu_float16 (Adreno GPU) = Data: float 16bit Math: float 16bit \n" -- << " cpu (Snapdragon CPU) = Same as cpu_float32 \n" -- << " gpu (Adreno GPU) = Same as gpu_float32_16_hybrid \n" -- << " dsp (Hexagon DSP) = Same as dsp_fixed8_tf \n" -- << std::endl; -- -- std::exit(SUCCESS); -- case OPT_CONTAINER: -- dlc = WinOpt::optarg; -- break; -- case OPT_INPUT_LIST: -- inputFile = WinOpt::optarg; -- break; -- case OPT_OUTPUT_DIR: -- OutputDir = WinOpt::optarg; -- break; -- case OPT_USERBUFFER: -- bufferTypeStr = WinOpt::optarg; -- break; -- case OPT_RESIZABLE_DIM: -- setResizableDim(atoi(WinOpt::optarg)); -- break; -- case OPT_RUNTIME: -- runtimeSpecified = true; -- if (strcmp(WinOpt::optarg, "gpu") == 0) -- { -- runtime = zdl::DlSystem::Runtime_t::GPU; -- } -- else if (strcmp(WinOpt::optarg, "aip") == 0) -- { -- runtime = zdl::DlSystem::Runtime_t::AIP_FIXED8_TF; -- } -- else if (strcmp(WinOpt::optarg, "dsp") == 0) -- { -- runtime = zdl::DlSystem::Runtime_t::DSP; -- } -- else if (strcmp(WinOpt::optarg, "cpu") == 0) -- { -- runtime = zdl::DlSystem::Runtime_t::CPU; -- } -- else -- { -- std::cerr << "The runtime option provide is not valid. Defaulting to the CPU runtime." << std::endl; -- -- } -- break; -- -- case OPT_RUNTIME_ORDER: -- { -- std::string inputString = WinOpt::optarg; -- //std::cout<<"Input String: "< runtimeStrVector; -- split(runtimeStrVector, inputString, ','); -- -- //Check for dups -- for(auto it = runtimeStrVector.begin(); it != runtimeStrVector.end()-1; it++) -- { -- auto found = std::find(it+1, runtimeStrVector.end(), *it); -- if(found != runtimeStrVector.end()) -- { -- std::cerr << "Error: Invalid values passed to the argument "<< argv[WinOpt::optind-2] << ". Duplicate entries in runtime order" << std::endl; -- std::exit(FAILURE); -- } -- } -- -- runtimeList.clear(); -- for(auto& runtimeStr : runtimeStrVector) -- { -- //std::cout< container = loadContainerFromFile(dlc); - if (container == nullptr) - { - std::cerr << "Error while opening the container file." << std::endl; -- return EXIT_FAILURE; -+ return nullptr; - } - -- bool useUserSuppliedBuffers = (bufferType == USERBUFFER_FLOAT || -+ useUserSuppliedBuffers = (bufferType == USERBUFFER_FLOAT || - bufferType == USERBUFFER_TF8 || - bufferType == USERBUFFER_TF16); - - std::unique_ptr snpe; - zdl::DlSystem::PlatformConfig platformConfig; - -+ std::cout << "\nbuilding..................\n"; - snpe = setBuilderOptions(container, runtime, runtimeList, useUserSuppliedBuffers, platformConfig, usingInitCaching); -- if (snpe == nullptr) -- { -- std::cerr << "Error while building SNPE object." << std::endl; -- return EXIT_FAILURE; -- } -- if (usingInitCaching) -- { -- if (container->save(dlc)) -- { -- std::cout << "Saved container into archive successfully" << std::endl; -- } -- else -- { -- std::cout << "Failed to save container into archive" << std::endl; -- } -- } -+ return snpe; -+ -+} -+int main() -+{ -+ // Initialize a ZeroMQ context -+ zmq::context_t context(1); -+ -+ // Create a REP (reply) socket -+ zmq::socket_t socket(context, ZMQ_REP); -+ // zmq::socket_t socket(context, ZMQ_PULL); -+ -+ // Bind the socket to a TCP address -+ std::string serverAddress = "tcp://*:5555"; // Replace with your desired address -+ socket.bind(serverAddress.c_str()); -+ -+ -+ enum {UNKNOWN, USERBUFFER_FLOAT, USERBUFFER_TF8, ITENSOR, USERBUFFER_TF16}; -+ enum {CPUBUFFER, GLBUFFER}; -+ -+ std::unique_ptr snpe; -+ -+ zmq::message_t first_msg; //TODO: 5 is hardcoded -+ zmq::message_t infer_time_reply; -+ //struct timeval start_time, end_time; -+ //float seconds, useconds, milli_time; -+ -+ -+ while(true) -+ { -+ //Waiting for first msg from client; -+ std::cout << "Waiting for first msg from socket:"<DSP -+ -+ bool runtimeSpecified = true; //shubham change false -> true -+ bool usingInitCaching = false; -+ -+ if (runtime_socket.compare("GPU") == 0) -+ { -+ runtime = zdl::DlSystem::Runtime_t::GPU; -+ } -+ else if (runtime_socket.compare("DSP") == 0) -+ { -+ runtime = zdl::DlSystem::Runtime_t::DSP; -+ } -+ else if (runtime_socket.compare("CPU") == 0) -+ { -+ runtime = zdl::DlSystem::Runtime_t::CPU; -+ } -+ else -+ { -+ std::cerr<<"\nCorrect Runtime not specified, choosing default(CPU)"<getInputDimensions(); -+ size_t batchSize = tensorShape.getDimensions()[0]; -+ std::cout << "Batch size for the container is " << batchSize << std::endl; -+ -+ // Open the input file listing and group input files into batches -+ // std::vector> inputs = preprocessInput(inputFile, batchSize); -+ -+ try { -+ std::cout << "Waiting for socket:" << std::endl; -+ zmq::message_t messages1; -+ zmq::message_t messages2; -+ -+ //Receiving 2 messages -+ //first message is input_ids -+ //second message is attention_mask -+ socket.recv(&messages1); -+ socket.recv(&messages2); -+ std::cout << "Message received:" << std::endl; -+ -+ //Creating vector for the 2 messages -+ std::vector receivedData1(static_cast(messages1.data()), static_cast(messages1.data()) + messages1.size() / sizeof(int)); -+ std::vector receivedData2(static_cast(messages2.data()), static_cast(messages2.data()) + messages2.size() / sizeof(int)); -+ std::cout << "\n Inputs ids size:" << messages1.size() << std::endl; -+ std::cout << "\n Attention Masks size:" << messages2.size() << std::endl; -+ -+ -+ -+ -+ if (useUserSuppliedBuffers) -+ { -+ // SNPE allows its input and output buffers that are fed to the network -+ // to come from user-backed buffers. First, SNPE buffers are created from -+ // user-backed storage. These SNPE buffers are then supplied to the network -+ // and the results are stored in user-backed output buffers. This allows for -+ // reusing the same buffers for multiple inputs and outputs. -+ -+ zdl::DlSystem::UserBufferMap inputMap, outputMap; -+ std::vector > snpeUserBackedInputBuffers, snpeUserBackedOutputBuffers; -+ // std::unordered_map > applicationOutputBuffers; -+ std::unordered_map > applicationOutputBuffersFloat; -+ -+ if (bufferType == USERBUFFER_FLOAT) -+ { -+ std::cout << "userbuffer float" << std::endl; -+ // createOutputBufferMap(outputMap, applicationOutputBuffers, snpeUserBackedOutputBuffers, snpe, false, bitWidth); -+ createOutputBufferMap(outputMap, applicationOutputBuffersFloat, snpeUserBackedOutputBuffers, snpe, false, bitWidth); -+ std::cout << "createOutputBufferMap done" << std::endl; -+ -+ // std::unordered_map > applicationInputBuffers; -+ std::unordered_map > applicationInputBuffersFloat; -+ std::cout << "creating Inputbuffermap" << std::endl; -+ // createInputBufferMap(inputMap, applicationInputBuffers, snpeUserBackedInputBuffers, snpe, false, false, bitWidth); -+ createInputBufferMap(inputMap, applicationInputBuffersFloat, snpeUserBackedInputBuffers, snpe, false, false, bitWidth); -+ std::cout << "createInputBufferMap done" << std::endl; -+ -+ -+ -+ const auto& outputNamesOpt = snpe->getOutputTensorNames(); -+ const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; -+ const char* out_name1 = outputNames.at(0); -+ const char* out_name2 = outputNames.at(1); -+ -+ std::cout << "Out Name 1 " << outputNames.at(0) << std::endl; -+ std::cout << "Out Name 2 " << outputNames.at(1) << std::endl; -+ const auto& inputNamesOpt = snpe->getInputTensorNames(); -+ const zdl::DlSystem::StringList& inputNames = *inputNamesOpt; -+ const char* in_name1 = inputNames.at(0); -+ const char* in_name2 = inputNames.at(1); -+ -+ std::cout << "IN Name 1 " << inputNames.at(0) << std::endl; -+ std::cout << "IN Name 2 " << inputNames.at(1) << std::endl; -+ -+ applicationInputBuffersFloat.at(in_name1) = receivedData1; //shubham change -+ applicationInputBuffersFloat.at(in_name2) = receivedData2; -+ std::cout << "wrote data" << std::endl; -+ std::cout << "Received input" << std::endl; -+ for (size_t i = 0; i < batchSize; i++) -+ { -+ // Load input user buffer(s) with values from file(s) -+ if (batchSize > 1) -+ std::cout << "Batch " << i << ":" << std::endl; -+ -+ uint64_t start_ms = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); -+ -+ // Execute the input buffer map on the model with SNPE -+ execStatus = snpe->execute(inputMap, outputMap); -+ std::cout << "\nExecStatius: " << execStatus << std::endl; -+ -+ uint64_t end_ms = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); -+ -+ int infer_time = end_ms - start_ms; -+ -+ std::string mill_str = std::to_string(infer_time); -+ std::cout << "Inference time:" << mill_str << std::endl; -+ infer_time_reply.rebuild(mill_str.size()); -+ memcpy((void*)infer_time_reply.data(), (mill_str.c_str()), mill_str.size()); -+ -+ // Prepare the vectors(execution result) to be sent -+ std::vector vector1 = applicationOutputBuffersFloat.at(out_name1); -+ std::vector vector2 = applicationOutputBuffersFloat.at(out_name2); -+ //for (int i = vector1.size(); i < vector1.size()+5; i++) { -+ // std::cout << vector1[i] << std::endl; -+ //} -+ -+ // Send the result -+ if (execStatus == true) -+ { -+ std::cout << "execStatus is true" << std::endl; -+ -+ -+ zmq::message_t message1(vector1.size()); -+ zmq::message_t message2(vector2.size()); -+ std::cout << "Copying msg" << std::endl; -+ -+ std::cout << "vector1 size: " << vector1.size() << std::endl; -+ std::cout << "vector2 size: " << vector2.size() << std::endl; -+ std::memcpy(message1.data(), vector1.data(), vector1.size()); -+ std::memcpy(message2.data(), vector2.data(), vector2.size()); -+ //socket.send(message1); -+ socket.send(message1, zmq::send_flags::sndmore); -+ socket.send(message2); -+ //socket.send_multipart([b"infer", input_ids, attention_mask]) -+ //std::cout << "image sent"<getInputDimensions(); -+ size_t batchSize = tensorShape.getDimensions()[0]; -+ std::cout << "Batch size for the container is " << batchSize << std::endl; -+ -+ // Open the input file listing and group input files into batches -+ // std::vector> inputs = preprocessInput(inputFile, batchSize); -+ -+ try { -+ std::cout << "Waiting for socket:" << std::endl; -+ zmq::message_t messages1; -+ zmq::message_t messages2; -+ zmq::message_t messages3; -+ -+ //Receiving 2 messages -+ //first message is input_ids -+ //second message is attention_mask -+ socket.recv(&messages1); -+ socket.recv(&messages2); -+ socket.recv(&messages3); -+ std::cout << "Message received:" << std::endl; -+ -+ //Creating vector for the 2 messages -+ std::vector receivedData1(static_cast(messages1.data()), static_cast(messages1.data()) + messages1.size() / sizeof(int)); -+ std::vector receivedData2(static_cast(messages2.data()), static_cast(messages2.data()) + messages2.size() / sizeof(int)); -+ std::vector receivedData3(static_cast(messages3.data()), static_cast(messages3.data()) + messages3.size() / sizeof(int)); -+ std::cout << "\n Inputs ids size:" << messages1.size() << std::endl; -+ std::cout << "\n Attention Masks size:" << messages2.size() << std::endl; -+ std::cout << "\n Token Type Ids size:" << messages3.size() << std::endl; -+ -+ -+ -+ -+ if (useUserSuppliedBuffers) -+ { -+ // SNPE allows its input and output buffers that are fed to the network -+ // to come from user-backed buffers. First, SNPE buffers are created from -+ // user-backed storage. These SNPE buffers are then supplied to the network -+ // and the results are stored in user-backed output buffers. This allows for -+ // reusing the same buffers for multiple inputs and outputs. -+ -+ zdl::DlSystem::UserBufferMap inputMap, outputMap; -+ std::vector > snpeUserBackedInputBuffers, snpeUserBackedOutputBuffers; -+ // std::unordered_map > applicationOutputBuffers; -+ std::unordered_map > applicationOutputBuffersFloat; -+ -+ if (bufferType == USERBUFFER_FLOAT) -+ { -+ std::cout << "userbuffer float" << std::endl; -+ // createOutputBufferMap(outputMap, applicationOutputBuffers, snpeUserBackedOutputBuffers, snpe, false, bitWidth); -+ createOutputBufferMap(outputMap, applicationOutputBuffersFloat, snpeUserBackedOutputBuffers, snpe, false, bitWidth); -+ std::cout << "createOutputBufferMap done" << std::endl; -+ -+ // std::unordered_map > applicationInputBuffers; -+ std::unordered_map > applicationInputBuffersFloat; -+ std::cout << "creating Inputbuffermap" << std::endl; -+ // createInputBufferMap(inputMap, applicationInputBuffers, snpeUserBackedInputBuffers, snpe, false, false, bitWidth); -+ createInputBufferMap(inputMap, applicationInputBuffersFloat, snpeUserBackedInputBuffers, snpe, false, false, bitWidth); -+ std::cout << "createInputBufferMap done" << std::endl; -+ -+ -+ -+ const auto& outputNamesOpt = snpe->getOutputTensorNames(); -+ const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; -+ const char* out_name1 = outputNames.at(0); -+ const char* out_name2 = outputNames.at(1); -+ -+ std::cout << "Out Name 1 " << outputNames.at(0) << std::endl; -+ std::cout << "Out Name 2 " << outputNames.at(1) << std::endl; -+ const auto& inputNamesOpt = snpe->getInputTensorNames(); -+ const zdl::DlSystem::StringList& inputNames = *inputNamesOpt; -+ const char* in_name1 = inputNames.at(0); -+ const char* in_name2 = inputNames.at(1); -+ const char* in_name3 = inputNames.at(2); -+ -+ std::cout << "IN Name 1 " << inputNames.at(0) << std::endl; -+ std::cout << "IN Name 2 " << inputNames.at(1) << std::endl; -+ std::cout << "IN Name 3 " << inputNames.at(2) << std::endl; -+ applicationInputBuffersFloat.at(in_name1) = receivedData1; -+ applicationInputBuffersFloat.at(in_name2) = receivedData2; -+ applicationInputBuffersFloat.at(in_name3) = receivedData3; -+ std::cout << "wrote data" << std::endl; -+ std::cout << "Received input" << std::endl; -+ for (size_t i = 0; i < batchSize; i++) -+ { -+ // Load input user buffer(s) with values from file(s) -+ if (batchSize > 1) -+ std::cout << "Batch " << i << ":" << std::endl; -+ -+ uint64_t start_ms = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); -+ -+ // Execute the input buffer map on the model with SNPE -+ execStatus = snpe->execute(inputMap, outputMap); -+ std::cout << "\nExecStatius: " << execStatus << std::endl; -+ -+ uint64_t end_ms = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); -+ -+ int infer_time = end_ms - start_ms; -+ -+ std::string mill_str = std::to_string(infer_time); -+ std::cout << "Inference time:" << mill_str << std::endl; -+ infer_time_reply.rebuild(mill_str.size()); -+ memcpy((void*)infer_time_reply.data(), (mill_str.c_str()), mill_str.size()); -+ -+ // Prepare the vectors(execution result) to be sent -+ std::vector vector1 = applicationOutputBuffersFloat.at(out_name1); -+ std::vector vector2 = applicationOutputBuffersFloat.at(out_name2); -+ //for (int i = vector1.size(); i < vector1.size()+5; i++) { -+ // std::cout << vector1[i] << std::endl; -+ //} -+ -+ // Send the result -+ if (execStatus == true) -+ { -+ std::cout << "execStatus is true" << std::endl; -+ -+ -+ zmq::message_t message1(vector1.size()); -+ zmq::message_t message2(vector2.size()); -+ std::cout << "Copying msg" << std::endl; -+ -+ std::cout << "vector1 size: " << vector1.size() << std::endl; -+ std::cout << "vector2 size: " << vector2.size() << std::endl; -+ std::memcpy(message1.data(), vector1.data(), vector1.size()); -+ std::memcpy(message2.data(), vector2.data(), vector2.size()); -+ //socket.send(message1); -+ socket.send(message1, zmq::send_flags::sndmore); -+ socket.send(message2); -+ //socket.send_multipart([b"infer", input_ids, attention_mask]) -+ //std::cout << "image sent"<getInputDimensions(); -- size_t batchSize = tensorShape.getDimensions()[0]; -- std::cout << "Batch size for the container is " << batchSize << std::endl; -- -- // Open the input file listing and group input files into batches -- std::vector> inputs = preprocessInput(inputFile, batchSize); -- -- // Load contents of input file batches ino a SNPE tensor or user buffer, -- // user buffer include cpu buffer and OpenGL buffer, -- // execute the network with the input and save each of the returned output to a file. -- if(useUserSuppliedBuffers) -- { -- // SNPE allows its input and output buffers that are fed to the network -- // to come from user-backed buffers. First, SNPE buffers are created from -- // user-backed storage. These SNPE buffers are then supplied to the network -- // and the results are stored in user-backed output buffers. This allows for -- // reusing the same buffers for multiple inputs and outputs. -- zdl::DlSystem::UserBufferMap inputMap, outputMap; -- std::vector > snpeUserBackedInputBuffers, snpeUserBackedOutputBuffers; -- std::unordered_map > applicationOutputBuffers; -- -- if( bufferType == USERBUFFER_TF8 || bufferType == USERBUFFER_TF16 ) -- { -- createOutputBufferMap(outputMap, applicationOutputBuffers, snpeUserBackedOutputBuffers, snpe, true, bitWidth); -- -- std::unordered_map > applicationInputBuffers; -- createInputBufferMap(inputMap, applicationInputBuffers, snpeUserBackedInputBuffers, snpe, true, staticQuantization, bitWidth); -- -- for( size_t i = 0; i < inputs.size(); i++ ) -- { -- // Load input user buffer(s) with values from file(s) -- if( batchSize > 1 ) -- std::cout << "Batch " << i << ":" << std::endl; -- if(!loadInputUserBufferTfN(applicationInputBuffers, snpe, inputs[i], inputMap, staticQuantization, bitWidth)) -- { -- return EXIT_FAILURE; -- } -- // Execute the input buffer map on the model with SNPE -- execStatus = snpe->execute(inputMap, outputMap); -- // Save the execution results only if successful -- if (execStatus == true) -- { -- if(!saveOutput(outputMap, applicationOutputBuffers, OutputDir, i * batchSize, batchSize, true, bitWidth)) -- { -- return EXIT_FAILURE; -- } -- -- } -- else -- { -- std::cerr << "Error while executing the network." << std::endl; -- } -- } -- } -- else if( bufferType == USERBUFFER_FLOAT ) -- { -- createOutputBufferMap(outputMap, applicationOutputBuffers, snpeUserBackedOutputBuffers, snpe, false, bitWidth); -- -- if( userBufferSourceType == CPUBUFFER ) -- { -- std::unordered_map > applicationInputBuffers; -- createInputBufferMap(inputMap, applicationInputBuffers, snpeUserBackedInputBuffers, snpe, false, false, bitWidth); -- -- for( size_t i = 0; i < inputs.size(); i++ ) -- { -- // Load input user buffer(s) with values from file(s) -- if( batchSize > 1 ) -- std::cout << "Batch " << i << ":" << std::endl; -- if(!loadInputUserBufferFloat(applicationInputBuffers, snpe, inputs[i])) -- { -- return EXIT_FAILURE; -- } -- // Execute the input buffer map on the model with SNPE -- execStatus = snpe->execute(inputMap, outputMap); -- // Save the execution results only if successful -- if (execStatus == true) -- { -- if(!saveOutput(outputMap, applicationOutputBuffers, OutputDir, i * batchSize, batchSize, false, bitWidth)) -- { -- return EXIT_FAILURE; -- } -- } -- else -- { -- std::cerr << "Error while executing the network." << std::endl; -- } -- } -- } -- } -- } -- else if(bufferType == ITENSOR) -- { -- // A tensor map for SNPE execution outputs -- zdl::DlSystem::TensorMap outputTensorMap; -- //Get input names and number -- const auto& inputTensorNamesRef = snpe->getInputTensorNames(); -- if (!inputTensorNamesRef) throw std::runtime_error("Error obtaining Input tensor names"); -- const auto &inputTensorNames = *inputTensorNamesRef; -- -- for (size_t i = 0; i < inputs.size(); i++) { -- // Load input/output buffers with ITensor -- if(batchSize > 1) -- std::cout << "Batch " << i << ":" << std::endl; -- if (inputTensorNames.size() == 1) -- { -- // Load input/output buffers with ITensor -- std::unique_ptr inputTensor = loadInputTensor(snpe, inputs[i], inputTensorNames); -- if(!inputTensor) -- { -- return EXIT_FAILURE; -- } -- // Execute the input tensor on the model with SNPE -- execStatus = snpe->execute(inputTensor.get(), outputTensorMap); -- } -- else -- { -- std::vector> inputTensors(inputTensorNames.size()); -- zdl::DlSystem::TensorMap inputTensorMap; -- bool inputLoadStatus = false; -- // Load input/output buffers with TensorMap -- std::tie(inputTensorMap, inputLoadStatus) = loadMultipleInput(snpe, inputs[i], inputTensorNames, inputTensors); -- if(!inputLoadStatus) -- { -- return EXIT_FAILURE; -- } -- // Execute the multiple input tensorMap on the model with SNPE -- execStatus = snpe->execute(inputTensorMap, outputTensorMap); -- } -- // Save the execution results if execution successful -- if (execStatus == true) -- { -- if(!saveOutput(outputTensorMap, OutputDir, i * batchSize, batchSize)) -- { -- return EXIT_FAILURE; -- } -- } -- else -- { -- std::cerr << "Error while executing the network." << std::endl; -- } -- } -- } - // Freeing of snpe object - snpe.reset(); - return SUCCESS; diff --git a/ai-solutions/windows/angular-app-nlp/generating_model.ipynb b/ai-solutions/windows/angular-app-nlp/generating_model.ipynb deleted file mode 100644 index 52ce514a..00000000 --- a/ai-solutions/windows/angular-app-nlp/generating_model.ipynb +++ /dev/null @@ -1,1376 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "416d10e9-51d0-45da-b016-970e1db53d26", - "metadata": {}, - "source": [ - "# Preparaing the dataset\n", - "- [ https://rajpurkar.github.io/SQuAD-explorer/ ] (Dataset link)\n", - "- Download the dataset from the above link" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "807742c0-4c3b-4a8c-af79-04fb2f5fdf7c", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "--2023-10-03 10:45:46-- https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json\n", - "Resolving rajpurkar.github.io (rajpurkar.github.io)... 185.199.111.153, 185.199.108.153, 185.199.109.153, ...\n", - "Connecting to rajpurkar.github.io (rajpurkar.github.io)|185.199.111.153|:443... connected.\n", - "HTTP request sent, awaiting response... 200 OK\n", - "Length: 4370528 (4.2M) [application/json]\n", - "Saving to: ‘dev-v2.0.json’\n", - "\n", - " 0K .......... .......... .......... .......... .......... 1% 879K 5s\n", - " 50K .......... .......... .......... .......... .......... 2% 1.07M 4s\n", - " 100K .......... .......... .......... .......... .......... 3% 4.82M 3s\n", - " 150K .......... .......... .......... .......... .......... 4% 1.64M 3s\n", - " 200K .......... .......... .......... .......... .......... 5% 5.56M 2s\n", - " 250K .......... .......... .......... .......... .......... 7% 7.47M 2s\n", - " 300K .......... .......... .......... .......... .......... 8% 8.04M 2s\n", - " 350K .......... .......... .......... .......... .......... 9% 9.28M 2s\n", - " 400K .......... .......... .......... .......... .......... 10% 2.22M 2s\n", - " 450K .......... .......... .......... .......... .......... 11% 10.6M 1s\n", - " 500K .......... .......... .......... .......... .......... 12% 12.8M 1s\n", - " 550K .......... .......... .......... .......... .......... 14% 15.8M 1s\n", - " 600K .......... .......... .......... .......... .......... 15% 12.5M 1s\n", - " 650K .......... .......... .......... .......... .......... 16% 11.4M 1s\n", - " 700K .......... .......... .......... .......... .......... 17% 22.4M 1s\n", - " 750K .......... .......... .......... .......... .......... 18% 15.9M 1s\n", - " 800K .......... .......... .......... .......... .......... 19% 19.6M 1s\n", - " 850K .......... .......... .......... .......... .......... 21% 2.66M 1s\n", - " 900K .......... .......... .......... .......... .......... 22% 16.9M 1s\n", - " 950K .......... .......... .......... .......... .......... 23% 19.1M 1s\n", - " 1000K .......... .......... .......... .......... .......... 24% 23.0M 1s\n", - " 1050K .......... .......... .......... .......... .......... 25% 46.0M 1s\n", - " 1100K .......... .......... .......... .......... .......... 26% 25.4M 1s\n", - " 1150K .......... .......... .......... .......... .......... 28% 20.3M 1s\n", - " 1200K .......... .......... .......... .......... .......... 29% 21.9M 1s\n", - " 1250K .......... .......... .......... .......... .......... 30% 40.2M 1s\n", - " 1300K .......... .......... .......... .......... .......... 31% 23.9M 1s\n", - " 1350K .......... .......... .......... .......... .......... 32% 43.6M 1s\n", - " 1400K .......... .......... .......... .......... .......... 33% 32.1M 1s\n", - " 1450K .......... .......... .......... .......... .......... 35% 28.0M 0s\n", - " 1500K .......... .......... .......... .......... .......... 36% 46.8M 0s\n", - " 1550K .......... .......... .......... .......... .......... 37% 20.3M 0s\n", - " 1600K .......... .......... .......... .......... .......... 38% 52.7M 0s\n", - " 1650K .......... .......... .......... .......... .......... 39% 41.2M 0s\n", - " 1700K .......... .......... .......... .......... .......... 41% 30.8M 0s\n", - " 1750K .......... .......... .......... .......... .......... 42% 2.81M 0s\n", - " 1800K .......... .......... .......... .......... .......... 43% 92.1M 0s\n", - " 1850K .......... .......... .......... .......... .......... 44% 45.8M 0s\n", - " 1900K .......... .......... .......... .......... .......... 45% 30.9M 0s\n", - " 1950K .......... .......... .......... .......... .......... 46% 42.5M 0s\n", - " 2000K .......... .......... .......... .......... .......... 48% 23.5M 0s\n", - " 2050K .......... .......... .......... .......... .......... 49% 100M 0s\n", - " 2100K .......... .......... .......... .......... .......... 50% 48.9M 0s\n", - " 2150K .......... .......... .......... .......... .......... 51% 41.3M 0s\n", - " 2200K .......... .......... .......... .......... .......... 52% 51.3M 0s\n", - " 2250K .......... .......... .......... .......... .......... 53% 40.3M 0s\n", - " 2300K .......... .......... .......... .......... .......... 55% 54.1M 0s\n", - " 2350K .......... .......... .......... .......... .......... 56% 60.7M 0s\n", - " 2400K .......... .......... .......... .......... .......... 57% 43.9M 0s\n", - " 2450K .......... .......... .......... .......... .......... 58% 51.5M 0s\n", - " 2500K .......... .......... .......... .......... .......... 59% 48.7M 0s\n", - " 2550K .......... .......... .......... .......... .......... 60% 68.6M 0s\n", - " 2600K .......... .......... .......... .......... .......... 62% 65.9M 0s\n", - " 2650K .......... .......... .......... .......... .......... 63% 50.6M 0s\n", - " 2700K .......... .......... .......... .......... .......... 64% 49.7M 0s\n", - " 2750K .......... .......... .......... .......... .......... 65% 49.8M 0s\n", - " 2800K .......... .......... .......... .......... .......... 66% 111M 0s\n", - " 2850K .......... .......... .......... .......... .......... 67% 45.0M 0s\n", - " 2900K .......... .......... .......... .......... .......... 69% 74.9M 0s\n", - " 2950K .......... .......... .......... .......... .......... 70% 49.2M 0s\n", - " 3000K .......... .......... .......... .......... .......... 71% 57.3M 0s\n", - " 3050K .......... .......... .......... .......... .......... 72% 108M 0s\n", - " 3100K .......... .......... .......... .......... .......... 73% 70.3M 0s\n", - " 3150K .......... .......... .......... .......... .......... 74% 57.5M 0s\n", - " 3200K .......... .......... .......... .......... .......... 76% 78.2M 0s\n", - " 3250K .......... .......... .......... .......... .......... 77% 53.0M 0s\n", - " 3300K .......... .......... .......... .......... .......... 78% 77.7M 0s\n", - " 3350K .......... .......... .......... .......... .......... 79% 87.4M 0s\n", - " 3400K .......... .......... .......... .......... .......... 80% 54.8M 0s\n", - " 3450K .......... .......... .......... .......... .......... 82% 77.5M 0s\n", - " 3500K .......... .......... .......... .......... .......... 83% 84.2M 0s\n", - " 3550K .......... .......... .......... .......... .......... 84% 79.8M 0s\n", - " 3600K .......... .......... .......... .......... .......... 85% 3.06M 0s\n", - " 3650K .......... .......... .......... .......... .......... 86% 159M 0s\n", - " 3700K .......... .......... .......... .......... .......... 87% 62.4M 0s\n", - " 3750K .......... .......... .......... .......... .......... 89% 88.1M 0s\n", - " 3800K .......... .......... .......... .......... .......... 90% 89.0M 0s\n", - " 3850K .......... .......... .......... .......... .......... 91% 52.6M 0s\n", - " 3900K .......... .......... .......... .......... .......... 92% 30.1M 0s\n", - " 3950K .......... .......... .......... .......... .......... 93% 110M 0s\n", - " 4000K .......... .......... .......... .......... .......... 94% 84.8M 0s\n", - " 4050K .......... .......... .......... .......... .......... 96% 102M 0s\n", - " 4100K .......... .......... .......... .......... .......... 97% 168M 0s\n", - " 4150K .......... .......... .......... .......... .......... 98% 98.2M 0s\n", - " 4200K .......... .......... .......... .......... .......... 99% 138M 0s\n", - " 4250K .......... ........ 100% 59.3M=0.3s\n", - "\n", - "2023-10-03 10:45:47 (12.3 MB/s) - ‘dev-v2.0.json’ saved [4370528/4370528]\n", - "\n" - ] - } - ], - "source": [ - "%%bash\n", - "\n", - "wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "d8cbcc78-e8ba-43c2-b351-f86695041835", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
contextquestionanswers
0The Normans (Norman: Nourmands; French: Norman...In what country is Normandy located?France
1The Normans (Norman: Nourmands; French: Norman...When were the Normans in Normandy?10th and 11th centuries
2The Normans (Norman: Nourmands; French: Norman...From which countries did the Norse originate?Denmark, Iceland and Norway
\n", - "
" - ], - "text/plain": [ - " context \\\n", - "0 The Normans (Norman: Nourmands; French: Norman... \n", - "1 The Normans (Norman: Nourmands; French: Norman... \n", - "2 The Normans (Norman: Nourmands; French: Norman... \n", - "\n", - " question answers \n", - "0 In what country is Normandy located? France \n", - "1 When were the Normans in Normandy? 10th and 11th centuries \n", - "2 From which countries did the Norse originate? Denmark, Iceland and Norway " - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import json\n", - "import pandas as pd\n", - "\n", - "data_path=\"dev-v2.0.json\"\n", - "\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "\n", - "context_qa_triples=[]\n", - "\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "markdown", - "id": "e4a9faed-f7ac-4f45-a557-d5adc8762dca", - "metadata": {}, - "source": [ - "# Generating Albert Model\n", - "- [Albert Model](https://huggingface.co/docs/transformers/model_doc/albert) You can Learn More about this model from this link\n", - "- You can also check different version of Albert for different usecases from here." - ] - }, - { - "cell_type": "markdown", - "id": "fb83206f-1109-4b87-aea2-11bfede84967", - "metadata": {}, - "source": [ - "### Converting the Model to ONNX format using optimum\n", - "- [ https://github.com/huggingface/optimum ] (Link for optimum)\n", - "- Using optimum we can directly convert any pytorch or tensorflow model to onnx format.\n", - "- Then from this onnx file we can convert to DLC format using SNPE" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "83500540-ebc7-45aa-a675-ea863f1db3b2", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 23:14:16.106219: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-09-30 23:14:16.180686: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-09-30 23:14:16.197835: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-09-30 23:14:16.507801: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 23:14:16.507840: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 23:14:16.507843: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Framework not specified. Using pt to export to ONNX.\n", - "Some weights of the model checkpoint at twmkn9/albert-base-v2-squad2 were not used when initializing AlbertForQuestionAnswering: ['albert.pooler.weight', 'albert.pooler.bias']\n", - "- This IS expected if you are initializing AlbertForQuestionAnswering from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", - "- This IS NOT expected if you are initializing AlbertForQuestionAnswering from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", - "Automatic task detection to question-answering.\n", - "Using framework PyTorch: 2.0.1+cu117\n", - "Post-processing the exported models...\n", - "Validating models in subprocesses...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "============= Diagnostic Run torch.onnx.export version 2.0.1+cu117 =============\n", - "verbose: False, log level: Level.ERROR\n", - "======================= 0 NONE 0 NOTE 0 WARNING 0 ERROR ========================\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 23:14:21.945196: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-09-30 23:14:22.021237: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-09-30 23:14:22.039349: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-09-30 23:14:22.350549: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 23:14:22.350586: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 23:14:22.350590: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Validating ONNX model alberta-onnx/model.onnx...\n", - "\t-[✓] ONNX model output names match reference model (start_logits, end_logits)\n", - "\t- Validating ONNX Model output \"start_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "\t- Validating ONNX Model output \"end_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "The ONNX export succeeded and the exported model was saved at: alberta-onnx\n" - ] - } - ], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model twmkn9/albert-base-v2-squad2 alberta-onnx/" - ] - }, - { - "cell_type": "markdown", - "id": "0f23e961-0113-4ee6-87d9-6391f9105fe2", - "metadata": {}, - "source": [ - "### DLC Conversion with fixed size\n", - "- Now as we get the ONNX Model we'll now convert this to DLC Format" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "5f98873c-afd1-4ba0-b3c2-1f28a5e0b574", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n", - "\u001b[1;31mWARNING: The argument `input_shapes` is deprecated. Please use \u001b[0m\n", - "\u001b[1;31m`overwrite_input_shapes` and/or `test_input_shapes` instead. An error will be \u001b[0m\n", - "\u001b[1;31mraised in the future.\u001b[0m\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 23:14:31,801 - 235 - INFO - Successfully simplified the onnx model in child process\n", - "2023-09-30 23:14:31,937 - 235 - INFO - Successfully receive the simplified onnx model in main process\n", - "2023-09-30 23:14:32,000 - 235 - INFO - Successfully run shape inference in child process\n", - "2023-09-30 23:14:32,152 - 235 - INFO - Successfully receive the inferred model in main process\n", - "2023-09-30 23:14:32,165 - 240 - WARNING - WARNING_CAST_TYPE: Only numerical type cast is supported. The op: /albert/Cast will be interpreted at conversion time\n", - "2023-09-30 23:14:33,860 - 235 - INFO - INFO_INITIALIZATION_SUCCESS: \n", - "2023-09-30 23:14:34,016 - 235 - INFO - INFO_CONVERSION_SUCCESS: Conversion completed successfully\n", - "2023-09-30 23:14:34,076 - 235 - INFO - INFO_WRITE_SUCCESS: \n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-onnx-to-dlc -i alberta-onnx/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o alberta.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "ca0c0356-9a15-4ef1-9f63-3f1baf41eed7", - "metadata": {}, - "source": [ - "### Creating FP16 Model\n", - "1. First of all we need to create the RAW File\n", - "2. Then we'll convert this FP32 DLC to FP16 DLC" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a0710645-b945-486f-9050-945e1058faaa", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "mkdir input_ids\n", - "mkdir attention_mask\n", - "mkdir token_type_ids" - ] - }, - { - "cell_type": "markdown", - "id": "9ebe8f2a-e140-4828-ba36-5171fbba9b6e", - "metadata": {}, - "source": [ - "#### Creating the RAW Files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7a27bfe7-d501-4b57-9b55-a3e3374e2dec", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, AlbertForQuestionAnswering\n", - "import torch\n", - "\n", - "# Getting the tokenizer to convert it to particular inputs that the model needed\n", - "tokenizer = AutoTokenizer.from_pretrained(\"twmkn9/albert-base-v2-squad2\")\n", - "\n", - "question_token={}\n", - "\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "31780ce6-882d-48dc-b589-e893e429d9fd", - "metadata": {}, - "source": [ - "#### Creating the List " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0ca58728-3401-4653-8542-b823f735264f", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "total_iter = 30\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw token_type_ids:=token_type_ids/token_type_id_{}.raw\\n\".format(i,i,i))\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "id": "13f4ca35-caf7-4ffa-a954-bc8405386276", - "metadata": {}, - "source": [ - "### Creating the FP16 Model\n", - "- This cached model is optimized for sm8550\n", - "- if you've different processor please change it accordingly" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "d8c82fc7-3b70-4898-be64-0ad9d0dd0312", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] InitializeStderr: DebugLog initialized.\n", - "[INFO] SNPE HTP Offline Prepare: Attempting to create cache for SM8550\n", - "[INFO] Attempting to open dynamically linked lib: libHtpPrepare.so\n", - "[INFO] dlopen libHtpPrepare.so SUCCESS handle 0x2e303b0\n", - "[INFO] Found Interface Provider (v2.8)\n", - "[USER_WARNING] QnnDsp Initializing HtpProvider\n", - "[USER_WARNING] QnnDsp Cost Based unsupported on soc SM8550\n", - "[USER_INFO] FP16 precision enabled for graph with id=0\n", - "[USER_INFO] Offline Prepare VTCM size(MB) selected = 8\n", - "[USER_INFO] Offline Prepare DLBC enablement passed = 0\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[USER_INFO] Cleaning up backend manager resources\n", - "[USER_INFO] Cleaning up Contexts\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] SNPE HTP Offline Prepare: Successfully created cache for SM8550\n", - "[INFO] SNPE HTP Offline Prepare: Saved cached DLC to alberta_float.dlc\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] DebugLog shutting down.\n" - ] - } - ], - "source": [ - "%%bash\n", - "\n", - "snpe-dlc-graph-prepare --input_dlc alberta.dlc --input_list tf_raw_list.txt --output_dlc alberta_float.dlc --set_output_tensors end_logits,start_logits --use_float_io --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "fd975495-2aef-4f3d-87be-9370bbf605da", - "metadata": {}, - "source": [ - "# Generating Mobilebert Model\n", - "- [Mobile bert ](https://huggingface.co/csarron/mobilebert-uncased-squad-v2/tree/main) You can Learn More about this model from this link\n", - "- To check more about different use cases of Mobilebert you can use this [link](https://huggingface.co/docs/transformers/model_doc/mobilebert)" - ] - }, - { - "cell_type": "markdown", - "id": "bccfc9e5-4134-4656-94fb-f3641848818f", - "metadata": {}, - "source": [ - "### Generating the ONNX Model" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "650cebf7-1697-4a9d-bb53-4903a7b304b4", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 21:59:36.311479: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-09-30 21:59:36.385244: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-09-30 21:59:36.402226: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-09-30 21:59:36.706875: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 21:59:36.706914: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 21:59:36.706918: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Framework not specified. Using pt to export to ONNX.\n", - "Automatic task detection to question-answering.\n", - "Using framework PyTorch: 2.0.1+cu117\n", - "/local/mnt/workspace/sahinhos/sahinenv/lib/python3.8/site-packages/transformers/models/mobilebert/modeling_mobilebert.py:549: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", - " torch.tensor(1000),\n", - "/local/mnt/workspace/sahinhos/sahinenv/lib/python3.8/site-packages/torch/onnx/_internal/jit_utils.py:306: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.)\n", - " _C._jit_pass_onnx_node_shape_type_inference(node, params_dict, opset_version)\n", - "/local/mnt/workspace/sahinhos/sahinenv/lib/python3.8/site-packages/torch/onnx/utils.py:689: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.)\n", - " _C._jit_pass_onnx_graph_shape_type_inference(\n", - "/local/mnt/workspace/sahinhos/sahinenv/lib/python3.8/site-packages/torch/onnx/utils.py:1186: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.)\n", - " _C._jit_pass_onnx_graph_shape_type_inference(\n", - "Post-processing the exported models...\n", - "Validating models in subprocesses...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "============= Diagnostic Run torch.onnx.export version 2.0.1+cu117 =============\n", - "verbose: False, log level: Level.ERROR\n", - "======================= 0 NONE 0 NOTE 0 WARNING 0 ERROR ========================\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 21:59:51.377048: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-09-30 21:59:51.452021: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-09-30 21:59:51.469676: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-09-30 21:59:51.779129: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 21:59:51.779165: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-09-30 21:59:51.779169: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Validating ONNX model mobilebert-onnx/model.onnx...\n", - "\t-[✓] ONNX model output names match reference model (end_logits, start_logits)\n", - "\t- Validating ONNX Model output \"start_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "\t- Validating ONNX Model output \"end_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "The ONNX export succeeded and the exported model was saved at: mobilebert-onnx\n" - ] - } - ], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model csarron/mobilebert-uncased-squad-v2 mobilebert-onnx/" - ] - }, - { - "cell_type": "markdown", - "id": "a4c6f6ef-1088-4ac2-b4fe-8aeceaa5d42a", - "metadata": {}, - "source": [ - "### Converting to DLC" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "219fc000-a9bb-483e-891e-2e9db570ea28", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n", - "\u001b[1;31mWARNING: The argument `input_shapes` is deprecated. Please use \u001b[0m\n", - "\u001b[1;31m`overwrite_input_shapes` and/or `test_input_shapes` instead. An error will be \u001b[0m\n", - "\u001b[1;31mraised in the future.\u001b[0m\n", - "WARNING: the simplification stopped because of timeout. Please set environment variable `ONNXSIM_FIXED_POINT_ITERS` to a number higher than 50if you want further simplification.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-09-30 22:00:38,259 - 235 - INFO - Successfully simplified the onnx model in child process\n", - "2023-09-30 22:00:38,608 - 235 - INFO - Successfully receive the simplified onnx model in main process\n", - "2023-09-30 22:00:38,756 - 235 - INFO - Successfully run shape inference in child process\n", - "2023-09-30 22:00:39,093 - 235 - INFO - Successfully receive the inferred model in main process\n", - "2023-09-30 22:00:39,127 - 240 - WARNING - WARNING_CAST_TYPE: Only numerical type cast is supported. The op: /mobilebert/Cast will be interpreted at conversion time\n", - "2023-09-30 22:00:45,555 - 235 - INFO - INFO_INITIALIZATION_SUCCESS: \n", - "2023-09-30 22:00:46,066 - 235 - INFO - INFO_CONVERSION_SUCCESS: Conversion completed successfully\n", - "2023-09-30 22:00:46,188 - 235 - INFO - INFO_WRITE_SUCCESS: \n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-onnx-to-dlc -i mobilebert-onnx/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o mobile_bert.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "f75c34ca-0e44-4e4f-9fed-d67da3e83a3b", - "metadata": {}, - "source": [ - "### Creating the RAW file" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "7aca1352-6a30-4614-9734-f3930774f388", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, MobileBertForQuestionAnswering\n", - "import torch\n", - "\n", - "tokenizer = AutoTokenizer.from_pretrained(\"csarron/mobilebert-uncased-squad-v2\")\n", - "\n", - "\n", - "question_token={}\n", - "\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "bf159fd3-32b9-4416-ad89-6fe1b3516ded", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Generating input_list \"small_raw_list.txt\" with 30 iterations\n" - ] - } - ], - "source": [ - "\n", - "total_iter = 30\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw token_type_ids:=token_type_ids/token_type_id_{}.raw\\n\".format(i,i,i))\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "id": "d3f7c71a-ac10-4a31-bcc2-3ddba9dc0a86", - "metadata": {}, - "source": [ - "#### Creating the FP 16 Model" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "d6f2d588-01e0-4a0e-88d8-7b4823e89143", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] InitializeStderr: DebugLog initialized.\n", - "[INFO] SNPE HTP Offline Prepare: Attempting to create cache for SM8550\n", - "[INFO] Attempting to open dynamically linked lib: libHtpPrepare.so\n", - "[INFO] dlopen libHtpPrepare.so SUCCESS handle 0x17b9a90\n", - "[INFO] Found Interface Provider (v2.8)\n", - "[USER_WARNING] QnnDsp Initializing HtpProvider\n", - "[USER_WARNING] QnnDsp Cost Based unsupported on soc SM8550\n", - "[USER_INFO] FP16 precision enabled for graph with id=0\n", - "[USER_INFO] Offline Prepare VTCM size(MB) selected = 8\n", - "[USER_INFO] Offline Prepare DLBC enablement passed = 0\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[USER_INFO] Cleaning up backend manager resources\n", - "[USER_INFO] Cleaning up Contexts\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] SNPE HTP Offline Prepare: Successfully created cache for SM8550\n", - "[INFO] SNPE HTP Offline Prepare: Saved cached DLC to mobile_bert_float.dlc\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] DebugLog shutting down.\n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-dlc-graph-prepare --input_dlc mobile_bert.dlc --input_list tf_raw_list.txt --output_dlc mobile_bert_float.dlc --use_float_io --set_output_tensors end_logits,start_logits --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "f6941fa4-a8f4-4bd3-997c-823d44c2338b", - "metadata": {}, - "source": [ - "# Generating DistilBert Model\n", - "- [DistilBert ](https://huggingface.co/distilbert-base-uncased) You can Learn More about this model from this link\n", - "- To check more about different use cases of Mobilebert you can use this [link](https://huggingface.co/docs/transformers/model_doc/distilbert)" - ] - }, - { - "cell_type": "markdown", - "id": "707c5d40-452d-4f15-a5e8-5adfb60a6b87", - "metadata": {}, - "source": [ - "### Generating the ONNX Model" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "6c159f63-d842-481f-9f41-ebf34c522cb8", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:44:52.392730: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-10-03 10:44:52.517845: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-10-03 10:44:52.558733: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-10-03 10:44:53.007277: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:44:53.007318: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:44:53.007322: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Framework not specified. Using pt to export to ONNX.\n", - "Automatic task detection to question-answering.\n", - "Using framework PyTorch: 2.0.1+cu117\n", - "/local/mnt/workspace/sahinhos/sahinenv/lib/python3.8/site-packages/transformers/models/distilbert/modeling_distilbert.py:223: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", - " mask, torch.tensor(torch.finfo(scores.dtype).min)\n", - "Post-processing the exported models...\n", - "Validating models in subprocesses...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "============= Diagnostic Run torch.onnx.export version 2.0.1+cu117 =============\n", - "verbose: False, log level: Level.ERROR\n", - "======================= 0 NONE 0 NOTE 0 WARNING 0 ERROR ========================\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:45:02.060867: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-10-03 10:45:02.135422: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-10-03 10:45:02.153106: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-10-03 10:45:02.470090: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:45:02.470129: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:45:02.470133: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Validating ONNX model distilbert-uncased-onnx/model.onnx...\n", - "\t-[✓] ONNX model output names match reference model (end_logits, start_logits)\n", - "\t- Validating ONNX Model output \"start_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "\t- Validating ONNX Model output \"end_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "The ONNX export succeeded and the exported model was saved at: distilbert-uncased-onnx\n" - ] - } - ], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model distilbert-base-uncased-distilled-squad distilbert-uncased-onnx/" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "366aff04-6160-4f85-b0ea-711132fdc06d", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "NodeArg(name='input_ids', type='tensor(int64)', shape=['batch_size', 'sequence_length'])\n", - "NodeArg(name='attention_mask', type='tensor(int64)', shape=['batch_size', 'sequence_length'])\n" - ] - } - ], - "source": [ - "import onnxruntime\n", - "\n", - "model_path='distilbert-uncased-onnx/model.onnx'\n", - "sess=onnxruntime.InferenceSession(model_path)\n", - "\n", - "input_layer_names=sess.get_inputs()\n", - "\n", - "for input_layer in input_layer_names:\n", - " print(input_layer)" - ] - }, - { - "cell_type": "markdown", - "id": "45293bb1-fb07-457b-85a7-7b590b0cc483", - "metadata": {}, - "source": [ - "### Generating the DLC Model" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "64e4b107-1a84-41d9-9301-b42b094f4e3b", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[1;31mWARNING: The argument `input_shapes` is deprecated. Please use \u001b[0m\n", - "\u001b[1;31m`overwrite_input_shapes` and/or `test_input_shapes` instead. An error will be \u001b[0m\n", - "\u001b[1;31mraised in the future.\u001b[0m\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:45:21,467 - 235 - INFO - Successfully simplified the onnx model in child process\n", - "2023-10-03 10:45:22,316 - 235 - INFO - Successfully receive the simplified onnx model in main process\n", - "2023-10-03 10:45:22,710 - 235 - INFO - Successfully run shape inference in child process\n", - "2023-10-03 10:45:23,659 - 235 - INFO - Successfully receive the inferred model in main process\n", - "2023-10-03 10:45:24,924 - 235 - INFO - INFO_INITIALIZATION_SUCCESS: \n", - "2023-10-03 10:45:25,704 - 235 - INFO - INFO_CONVERSION_SUCCESS: Conversion completed successfully\n", - "2023-10-03 10:45:26,041 - 235 - INFO - INFO_WRITE_SUCCESS: \n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-onnx-to-dlc -i distilbert-uncased-onnx/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -o distilbert.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "c662141a-548d-4035-8c58-e3dd289d942d", - "metadata": {}, - "source": [ - "### Generating the RAW file" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "abc71553-f164-4ca6-9f38-31e487d68e6d", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import DistilBertTokenizer, TFDistilBertForQuestionAnswering\n", - "import tensorflow as tf\n", - "tokenizer = DistilBertTokenizer.from_pretrained(\"distilbert-base-uncased-distilled-squad\")\n", - "\n", - "question_token={}\n", - "\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "d3943565-f0eb-4786-b07f-53990b47e131", - "metadata": {}, - "source": [ - "#### Creating the list" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "74089306-1087-476e-953a-c3ee1c24f2c0", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Generating input_list \"small_raw_list.txt\" with 30 iterations\n" - ] - } - ], - "source": [ - "\n", - "total_iter = 30\n", - "print(\"Generating input_list \\\"tf_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw\\n\".format(i,i)) # add token mask if needed\n" - ] - }, - { - "cell_type": "markdown", - "id": "5438afa2-7d0c-431a-a430-14c3c93df69f", - "metadata": {}, - "source": [ - "### Creating the FP16 DLC Model" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "e4420c91-d0a8-4e9d-913c-014485696284", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] InitializeStderr: DebugLog initialized.\n", - "[INFO] SNPE HTP Offline Prepare: Attempting to create cache for SM8550\n", - "[INFO] Attempting to open dynamically linked lib: libHtpPrepare.so\n", - "[INFO] dlopen libHtpPrepare.so SUCCESS handle 0x15d8c70\n", - "[INFO] Found Interface Provider (v2.8)\n", - "[USER_WARNING] QnnDsp Initializing HtpProvider\n", - "[USER_WARNING] QnnDsp Cost Based unsupported on soc SM8550\n", - "[USER_INFO] FP16 precision enabled for graph with id=0\n", - "[USER_INFO] Offline Prepare VTCM size(MB) selected = 8\n", - "[USER_INFO] Offline Prepare DLBC enablement passed = 0\n", - "[USER_INFO] Cleaning up backend manager resources\n", - "[USER_INFO] Cleaning up Contexts\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] SNPE HTP Offline Prepare: Successfully created cache for SM8550\n", - "[INFO] SNPE HTP Offline Prepare: Saved cached DLC to distilbert_float.dlc\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] DebugLog shutting down.\n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-dlc-graph-prepare --input_dlc distilbert.dlc --input_list tf_raw_list.txt --output_dlc distilbert_float.dlc --use_float_io --set_output_tensors end_logits,start_logits --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "86af6701-8ec8-4f69-bf10-6fe0272596f4", - "metadata": {}, - "source": [ - "# Generating Bertbase Model\n", - "- [Bert Base ](https://huggingface.co/bert-base-uncased) You can Learn More about this model from this link\n", - "- To check more about different use cases of Mobilebert you can use this [link](https://huggingface.co/docs/transformers/model_doc/bert)" - ] - }, - { - "cell_type": "markdown", - "id": "bed7ff7a-c1fd-4ffc-8d0f-fc273d8d6c98", - "metadata": {}, - "source": [ - "### Generating the ONNX Model" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "591c04a2-ad03-4920-ae01-97796d712f4f", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:46:14.040440: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-10-03 10:46:14.116542: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-10-03 10:46:14.134480: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-10-03 10:46:14.449560: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:46:14.449599: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:46:14.449602: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Framework not specified. Using pt to export to ONNX.\n", - "Downloading (…)lve/main/config.json: 100%|██████████| 508/508 [00:00<00:00, 171kB/s]\n", - "Downloading model.safetensors: 100%|██████████| 433M/433M [00:03<00:00, 114MB/s] \n", - "Some weights of the model checkpoint at deepset/bert-base-cased-squad2 were not used when initializing BertForQuestionAnswering: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight']\n", - "- This IS expected if you are initializing BertForQuestionAnswering from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", - "- This IS NOT expected if you are initializing BertForQuestionAnswering from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", - "Automatic task detection to question-answering.\n", - "Downloading (…)okenizer_config.json: 100%|██████████| 152/152 [00:00<00:00, 27.5kB/s]\n", - "Downloading (…)solve/main/vocab.txt: 100%|██████████| 213k/213k [00:00<00:00, 543kB/s]\n", - "Downloading (…)cial_tokens_map.json: 100%|██████████| 112/112 [00:00<00:00, 68.4kB/s]\n", - "Using framework PyTorch: 2.0.1+cu117\n", - "Overriding 1 configuration item(s)\n", - "\t- use_cache -> False\n", - "Post-processing the exported models...\n", - "Validating models in subprocesses...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "============= Diagnostic Run torch.onnx.export version 2.0.1+cu117 =============\n", - "verbose: False, log level: Level.ERROR\n", - "======================= 0 NONE 0 NOTE 0 WARNING 0 ERROR ========================\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:46:36.885804: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX_VNNI FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-10-03 10:46:36.962408: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", - "2023-10-03 10:46:36.979785: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2023-10-03 10:46:37.302037: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:46:37.302087: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /local/mnt/workspace/snpe/snpe-2.14.0.230828/lib/x86_64-linux-clang\n", - "2023-10-03 10:46:37.302090: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "Validating ONNX model bertbase-onnx-2/model.onnx...\n", - "\t-[✓] ONNX model output names match reference model (end_logits, start_logits)\n", - "\t- Validating ONNX Model output \"start_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "\t- Validating ONNX Model output \"end_logits\":\n", - "\t\t-[✓] (2, 16) matches (2, 16)\n", - "\t\t-[✓] all values close (atol: 0.0001)\n", - "The ONNX export succeeded and the exported model was saved at: bertbase-onnx-2\n" - ] - } - ], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model deepset/bert-base-cased-squad2 bertbase-onnx-2/" - ] - }, - { - "cell_type": "markdown", - "id": "0cb359c2-c903-458f-ad73-eaf49f64f439", - "metadata": {}, - "source": [ - "### Generating the DLC Model" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "4ab009ba-bb55-4f6e-8d4d-cc8f72722944", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[1;31mWARNING: The argument `input_shapes` is deprecated. Please use \u001b[0m\n", - "\u001b[1;31m`overwrite_input_shapes` and/or `test_input_shapes` instead. An error will be \u001b[0m\n", - "\u001b[1;31mraised in the future.\u001b[0m\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-03 10:47:35,407 - 235 - INFO - Successfully simplified the onnx model in child process\n", - "2023-10-03 10:47:36,773 - 235 - INFO - Successfully receive the simplified onnx model in main process\n", - "2023-10-03 10:47:37,477 - 235 - INFO - Successfully run shape inference in child process\n", - "2023-10-03 10:47:39,019 - 235 - INFO - Successfully receive the inferred model in main process\n", - "2023-10-03 10:47:39,186 - 240 - WARNING - WARNING_CAST_TYPE: Only numerical type cast is supported. The op: /bert/Cast will be interpreted at conversion time\n", - "2023-10-03 10:47:41,322 - 235 - INFO - INFO_INITIALIZATION_SUCCESS: \n", - "2023-10-03 10:47:42,763 - 235 - INFO - INFO_CONVERSION_SUCCESS: Conversion completed successfully\n", - "2023-10-03 10:47:43,292 - 235 - INFO - INFO_WRITE_SUCCESS: \n" - ] - } - ], - "source": [ - "%%bash\n", - "snpe-onnx-to-dlc -i bertbase-onnx-2/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o bert_base.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "22ba7189-d6e4-4008-baa8-6985a2dd6eec", - "metadata": {}, - "source": [ - "### Creating the RAW files " - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "26693698-46f1-4083-a141-7f664480a8df", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, BertForQuestionAnswering\n", - "import torch\n", - "\n", - "tokenizer = AutoTokenizer.from_pretrained(\"deepset/bert-base-cased-squad2\")\n", - "\n", - "question_token={}\n", - "\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "76bc5298-bd90-41a5-ad5f-5fac45b00540", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Generating input_list \"small_raw_list.txt\" with 30 iterations\n" - ] - } - ], - "source": [ - "\n", - "total_iter = 30\n", - "print(\"Generating input_list \\\"sma_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw token_type_ids:=token_type_ids/token_type_id_{}.raw\\n\".format(i,i,i))\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "daba430f-ebed-4d10-bfc2-c7254c5a4dad", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] InitializeStderr: DebugLog initialized.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] SNPE HTP Offline Prepare: Attempting to create cache for SM8550\n", - "[INFO] Attempting to open dynamically linked lib: libHtpPrepare.so\n", - "[INFO] dlopen libHtpPrepare.so SUCCESS handle 0x18f87a0\n", - "[INFO] Found Interface Provider (v2.8)\n", - "[USER_WARNING] QnnDsp Initializing HtpProvider\n", - "[USER_WARNING] QnnDsp Cost Based unsupported on soc SM8550\n", - "[USER_INFO] FP16 precision enabled for graph with id=0\n", - "[USER_INFO] Offline Prepare VTCM size(MB) selected = 8\n", - "[USER_INFO] Offline Prepare DLBC enablement passed = 0\n", - "[USER_INFO] Cleaning up backend manager resources\n", - "[USER_INFO] Cleaning up Contexts\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] SNPE HTP Offline Prepare: Successfully created cache for SM8550\n", - "[INFO] SNPE HTP Offline Prepare: Saved cached DLC to bert_base_float.dlc\n", - "[USER_INFO] BackendTerminate triggered\n", - "[INFO] DebugLog shutting down.\n" - ] - } - ], - "source": [ - "%%bash\n", - "\n", - "snpe-dlc-graph-prepare --input_dlc bert_base.dlc --input_list tf_raw_list.txt --output_dlc bert_base_float.dlc --set_output_tensors end_logits,start_logits --use_float_io --htp_socs sm8550" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f01fc02b-a5fd-4367-817e-f44ff61506d8", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4611b710-a8e2-47bb-950d-652b88da8abd", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6033aeec-8756-4291-82df-6f0f10f59382", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.17" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/ai-solutions/windows/angular-app-nlp/readme_assets/1.png b/ai-solutions/windows/angular-app-nlp/readme_assets/1.png deleted file mode 100644 index 973b4b69..00000000 Binary files a/ai-solutions/windows/angular-app-nlp/readme_assets/1.png and /dev/null differ diff --git a/ai-solutions/windows/angular-app-nlp/readme_assets/2.png b/ai-solutions/windows/angular-app-nlp/readme_assets/2.png deleted file mode 100644 index 5591adb2..00000000 Binary files a/ai-solutions/windows/angular-app-nlp/readme_assets/2.png and /dev/null differ diff --git a/ai-solutions/windows/angular-app-nlp/readme_assets/3.png b/ai-solutions/windows/angular-app-nlp/readme_assets/3.png deleted file mode 100644 index 5f4ff333..00000000 Binary files a/ai-solutions/windows/angular-app-nlp/readme_assets/3.png and /dev/null differ diff --git a/ai-solutions/windows/angular-app-nlp/readme_assets/4.png b/ai-solutions/windows/angular-app-nlp/readme_assets/4.png deleted file mode 100644 index 260051dc..00000000 Binary files a/ai-solutions/windows/angular-app-nlp/readme_assets/4.png and /dev/null differ diff --git a/ai-solutions/windows/angular-app-nlp/readme_assets/QA.gif b/ai-solutions/windows/angular-app-nlp/readme_assets/QA.gif deleted file mode 100644 index c40befc3..00000000 Binary files a/ai-solutions/windows/angular-app-nlp/readme_assets/QA.gif and /dev/null differ diff --git a/ai-solutions/windows/angular-app-nlp/readme_assets/Thumbs.db b/ai-solutions/windows/angular-app-nlp/readme_assets/Thumbs.db deleted file mode 100644 index fb4d173f..00000000 Binary files a/ai-solutions/windows/angular-app-nlp/readme_assets/Thumbs.db and /dev/null differ diff --git a/ai-solutions/windows/electron-app-cv/Demo/DemoWithTileVideo.mp4 b/ai-solutions/windows/electron-app-cv/Demo/DemoWithTileVideo.mp4 deleted file mode 100644 index 4ad11a17..00000000 Binary files a/ai-solutions/windows/electron-app-cv/Demo/DemoWithTileVideo.mp4 and /dev/null differ diff --git a/ai-solutions/windows/electron-app-cv/Docs_assets/FlowChart.png b/ai-solutions/windows/electron-app-cv/Docs_assets/FlowChart.png deleted file mode 100644 index 70ba1cf0..00000000 Binary files a/ai-solutions/windows/electron-app-cv/Docs_assets/FlowChart.png and /dev/null differ diff --git a/ai-solutions/windows/electron-app-cv/README.md b/ai-solutions/windows/electron-app-cv/README.md deleted file mode 100644 index afaa06c3..00000000 --- a/ai-solutions/windows/electron-app-cv/README.md +++ /dev/null @@ -1,137 +0,0 @@ -## Table of Contents - -- [Table of Contents](#table-of-contents) -- [Environment Setup](#environment-setup) -- [Dependencies](#dependencies) -- [Directory Structure](#directory-structure) -- [Prepare Executable - Windows-on-Snapdragon](#prepare-executable---windows-on-snapdragon) - * [Prepare stand-alone executable - Windows](#prepare-stand-alone-executable---windows) - * [Prepare Installer](#prepare-installer) - -## Environment Setup - -Prepare required assets to build the application - -* DLC: Paste it in "C:\Qualcomm\AIStack\AI_Solutions" directory -* SNPE LIBS: paste at any appropriate location and mention the path in CMakelists.txt -* SNPE INCLUDE: paste at any appropriate location and mention the path in CMakelists.txt -* ZeroMQ LIBS: paste it in C:\Program Files (x86) and mention the path in CMakelists.txt -* cppzmq: paste it in C:\Program Files (x86) -* SNPE_CPP_CODE: paste all files present in this directory to SNPE_CPP_CODE folder of this github repo. - -## Dependencies -* Python 3.8 is installed. -* Visual Studio 2022 is installed based on instructions given at https://docs.qualcomm.com/bundle/publicresource/topics/80-62010-1/Install-Visual-Studio-2022.html?product=Windows%20on%20Snapdragon -* Download and install latest Visual C++ Redistributable. -* Models used in this solution need to be generated, using steps mentioned at https://github.com/quic/ai-stack-models/tree/main/models-for-solutions - - -## Directory Structure -This Repo Contains 3 directories, which handle different aspects of this project. - -1. Electron app UI: This directory contains UI code and handles the part to start UI and connecting it to flask server. Here, User provides input image, selects the AI model for their use. All this information is sent to python using ajax request. - -2. Python Flask Server : Electron UI acts as foreground, and Flask server works in background to handle request from elecron UI. It takes all information given by elecron UI and pre-process the received image here, and then give the processed image to SNPE_CPP_CODE for running the selected model. SNPE_CPP_CODE returns the output of the model and then we process the data given by model into human understandable form and return that back to Electron UI for display. - -3. SNPE_CPP_CODE: This works as a service for flask server. This runs the preprocessed image on network and return the output given by model back to Flask Server. - -![Flowchart](Docs_assets/FlowChart.png) - -## Prepare Executable - Windows-on-Snapdragon - -### Prepare stand-alone executable - Windows -* In python_flask_server: - - Python pkg dependencies : - ```bash - pip install empatches flask opencv-python pillow flask_cors zmq pyinstaller waitress torch - ``` - - Create DLC Directory and put DLC(s) under there respective folders at "C:\Qualcomm\AIStack\AI_Solutions\DLC". For example, path of superresolution dlc will be "DLC/superesolution" and ImageEnhancement be "DLC/imageenhancement". Please follow relevant section for generating DLC. <--TODO. - - To start flask server, please run: - ```bash - python server.py - ``` - - It will start server at port : 9081 - - To view webpage on browser, please use this URL : http://localhost:9081 - -* In SNPE_CPP_Code: - - Apply zmq_support.patch to the SNPE SampleCode_Windows present in SNPE sdk. After that please copy all the files in that folder to SNPE_CPP_CODE folder in this github repo. - - For ZeroMQ, clone following gits and use their instructions to build those libs for your system, or you can follow below instructions to build. - - For libzmq: - - ```bash - git clone https://github.com/zeromq/libzmq.git - git reset --hard 9d31965548b5c734f1edc01742c39f984e9cedd8 - cd libzmq - mkdir build - cd build - cmake ../. -G "Visual Studio 17 2022" -D WITH_PERF_TOOL=OFF -D ZMQ_BUILD_TESTS=OFF -D ENABLE_CPACK=OFF -D CMAKE_BUILD_TYPE=Release - ``` - - - Open _ZeroMQ.sln_ in Visual Studio - - In Solution Directory, right click on INSTALL and _build_ solution - - See that the _ZeroMQ_ is installed in C Drive. - - - For Cppzmq: - - ```bash - git clone https://github.com/zeromq/cppzmq.git - git reset --hard 160ac8ec61298508093933a9f53bfedfb6ba0425 - cd cppzmq - mkdir build - cd build - cmake ../. -G "Visual Studio 17 2022" - ``` - - - Open _cppZMQ.sln_ in Visual Studio - - In Solution Directory, right click on INSTALL and _build_ solution - - Confirm that _cppzmq_ is installed in C Drive. - - - - Change following paths in CmakeLists.txt of SNPE_CPP_Code according to your setup: - ```bash - set (SNPE_INCLUDE_DIR "C:/Qualcomm/AIStack/SNPE/2.12.0.230626/include/SNPE") - set (SNPE_LIB_PREFIX "C:/Qualcomm/AIStack/SNPE/2.12.0.230626/lib") - set (ZERO_MQ_PATH "C:/Program Files (x86)/ZeroMQ") - ``` - - Change DLL filename, according to your setup: get_filename_component(ZMQ_DLL_PATH "${ZERO_MQ_PATH}/bin/_libzmq-v143-mt-gd-4_3_6.dll_" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) - - Create a build folder and build files. - ```bash - mkdir build - cd build - cmake ../ -G "Visual Studio 17 2022" -A ARM64 -DCHIPSET=SC8380 - cmake --build ./ --config Release - ``` - - - For running, please go to build/Release folder and run snpe-sample.exe - - * In electron_app_ui: - - Execute - ```bash - npm install - ``` - This will make node modules directory which will contain all necessary npm packages. - - To start UI, please run : - ```bash - npm start - ``` - -### Prepare Installer - -Please execute following commands. These will generate "dist" directory which will contain all your packaged data. -```bash -npm install -npm run package -``` - -Note: Make sure that you have resolved all dependencies mentioned in [Prepare stand-alone executable](#prepare-stand-alone-executable) section, like setting SNPE and ZMQ libs, installing python packages etc. - -### Supported Platforms - -This solution is verified on following compute platforms - -- Snapdragon X Elite -- Althorugh not verified, this application, and procedure is applicable to : SC8280XP as well. - -## Demo - -https://github.com/quic/ai-stack-models/assets/121066912/b5ebc51b-083b-4abc-86a7-b1f3a6554d24 diff --git a/ai-solutions/windows/electron-app-cv/SNPE_CPP_Code/zmq_support.patch b/ai-solutions/windows/electron-app-cv/SNPE_CPP_Code/zmq_support.patch deleted file mode 100644 index d141f93c..00000000 --- a/ai-solutions/windows/electron-app-cv/SNPE_CPP_Code/zmq_support.patch +++ /dev/null @@ -1,1165 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index f4686c3..e39b176 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -14,6 +14,8 @@ set (APP "snpe-sample") - option(BUILD_WITH_VCRUNTIME "Build the snpe-sample with static vcruntime libraries." OFF) - message("Build snpe-sample with vcruntime: ${BUILD_WITH_VCRUNTIME}") - -+message("CHIPSET= ${CHIPSET}") -+ - set( APP_SOURCES - "main.cpp" - "Util.cpp" -@@ -38,8 +40,9 @@ set( APP_SOURCES - "CreateUserBuffer.hpp" - ) - --set (SNPE_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../../include/SNPE) --set (SNPE_LIB_PREFIX ../../../../lib) -+set (SNPE_INCLUDE_DIR "C:/Qualcomm/AIStack/SNPE/2.14.1.230828/include/SNPE") -+set (SNPE_LIB_PREFIX "C:/Qualcomm/AIStack/SNPE/2.14.1.230828/lib") -+set (ZERO_MQ_PATH "C:/Program Files (x86)/ZeroMQ") - set (_dtuple_POSTFIX windows-msvc) - - if(CMAKE_GENERATOR_PLATFORM STREQUAL "x64") -@@ -48,8 +51,20 @@ if(CMAKE_GENERATOR_PLATFORM STREQUAL "x64") - get_filename_component(SNPE_IMPLIB_PATH "${SNPE_LIB_PREFIX}/x86_64-${_dtuple_POSTFIX}/SNPE.lib" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) - elseif(CMAKE_GENERATOR_PLATFORM STREQUAL "ARM64") - message("Linking with ARM64 SNPE") -- get_filename_component(SNPE_DLL_PATH "${SNPE_LIB_PREFIX}/aarch64-${_dtuple_POSTFIX}/SNPE.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -- get_filename_component(SNPE_IMPLIB_PATH "${SNPE_LIB_PREFIX}/aarch64-${_dtuple_POSTFIX}/SNPE.lib" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_DLL_PATH "${SNPE_LIB_PREFIX}/arm64x-${_dtuple_POSTFIX}/SNPE.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_IMPLIB_PATH "${SNPE_LIB_PREFIX}/arm64x-${_dtuple_POSTFIX}/SNPE.lib" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(ZMQ_DLL_PATH "${ZERO_MQ_PATH}/bin/libzmq-v143-mt-gd-4_3_6.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_HTP_PATH "${SNPE_LIB_PREFIX}/arm64x-${_dtuple_POSTFIX}/SnpeHtpPrepare.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ -+ if(CHIPSET STREQUAL "SC8380") -+ message("SC8380 is selected") -+ get_filename_component(SNPE_STUB_PATH "${SNPE_LIB_PREFIX}/arm64x-${_dtuple_POSTFIX}/SnpeHtpV73Stub.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_SKEL_PATH "${SNPE_LIB_PREFIX}/hexagon-v73/unsigned/libSnpeHtpV73Skel.so" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ else() -+ message("Default is selected") -+ get_filename_component(SNPE_STUB_PATH "${SNPE_LIB_PREFIX}/arm64x-${_dtuple_POSTFIX}/SnpeHtpV68Stub.dll" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ get_filename_component(SNPE_SKEL_PATH "${SNPE_LIB_PREFIX}/hexagon-v68/unsigned/libSnpeHtpV68Skel.so" REALPATH BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -+ endif() - else() - message(FATAL "Not Supported Platform") - endif() -@@ -61,13 +76,33 @@ set_target_properties(SNPE PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES ${SNPE_INCLUDE_DIR} - ) - -+find_package(cppzmq) - add_executable(${APP} ${APP_SOURCES}) - target_compile_definitions(${APP} PUBLIC -D_CRT_SECURE_NO_WARNINGS) - if(${BUILD_WITH_VCRUNTIME}) - target_compile_options(${APP} PUBLIC /MT) - endif() --target_link_libraries (${APP} SNPE) -+target_link_libraries (${APP} SNPE cppzmq) - add_custom_command(TARGET ${APP} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_if_different - ${SNPE_DLL_PATH} - $) -+ -+add_custom_command(TARGET ${APP} POST_BUILD -+ COMMAND ${CMAKE_COMMAND} -E copy_if_different -+ ${SNPE_STUB_PATH} -+ $) -+ -+add_custom_command(TARGET ${APP} POST_BUILD -+ COMMAND ${CMAKE_COMMAND} -E copy_if_different -+ ${SNPE_HTP_PATH} -+ $) -+ -+add_custom_command(TARGET ${APP} POST_BUILD -+ COMMAND ${CMAKE_COMMAND} -E copy_if_different -+ ${SNPE_SKEL_PATH} -+ $) -+add_custom_command(TARGET ${APP} POST_BUILD -+ COMMAND ${CMAKE_COMMAND} -E copy_if_different -+ ${ZMQ_DLL_PATH} -+ $) -\ No newline at end of file -diff --git a/CreateUserBuffer.cpp b/CreateUserBuffer.cpp -index fa500e5..d2a5bbe 100644 ---- a/CreateUserBuffer.cpp -+++ b/CreateUserBuffer.cpp -@@ -199,3 +199,131 @@ void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, - createUserBuffer(inputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name); - } - } -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+void createUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ const char * name, -+ const bool isTfNBuffer, -+ bool staticQuantization, -+ int bitWidth) -+{ -+ // get attributes of buffer by name -+ auto bufferAttributesOpt = snpe->getInputOutputBufferAttributes(name); -+ if (!bufferAttributesOpt) throw std::runtime_error(std::string("Error obtaining attributes for input tensor ") + name); -+ -+ // calculate the size of buffer required by the input tensor -+ const zdl::DlSystem::TensorShape& bufferShape = (*bufferAttributesOpt)->getDims(); -+ -+ size_t bufferElementSize = 0; -+ if (isTfNBuffer) { -+ bufferElementSize = bitWidth / 8; -+ } -+ else { -+ bufferElementSize = sizeof(float); -+ } -+ -+ // Calculate the stride based on buffer strides. -+ // Note: Strides = Number of bytes to advance to the next element in each dimension. -+ // For example, if a float tensor of dimension 2x4x3 is tightly packed in a buffer of 96 bytes, then the strides would be (48,12,4) -+ // Note: Buffer stride is usually known and does not need to be calculated. -+ std::vector strides(bufferShape.rank()); -+ strides[strides.size() - 1] = bufferElementSize; -+ size_t stride = strides[strides.size() - 1]; -+ for (size_t i = bufferShape.rank() - 1; i > 0; i--) -+ { -+ (bufferShape[i] == 0) ? stride *= getResizableDim() : stride *= bufferShape[i]; -+ strides[i-1] = stride; -+ } -+ -+ size_t bufSize = calcSizeFromDims(bufferShape.getDimensions(), bufferShape.rank(), bufferElementSize); -+ -+ // set the buffer encoding type -+ std::unique_ptr userBufferEncoding; -+ if (isTfNBuffer) -+ { -+ if((*bufferAttributesOpt)->getEncodingType() == zdl::DlSystem::UserBufferEncoding::ElementType_t::FLOAT && staticQuantization){ -+ std::cerr << "ERROR: Quantization parameters not present in model" << std::endl; -+ std::exit(EXIT_FAILURE); -+ } -+ -+ const zdl::DlSystem::UserBufferEncodingTfN* ubeTfN = dynamic_cast((*bufferAttributesOpt)->getEncoding()); -+ uint64_t stepEquivalentTo0 = ubeTfN->getStepExactly0(); -+ float quantizedStepSize = ubeTfN->getQuantizedStepSize(); -+ userBufferEncoding = std::unique_ptr(new zdl::DlSystem::UserBufferEncodingTfN(stepEquivalentTo0,quantizedStepSize, bitWidth)); -+ } -+ else -+ { -+ userBufferEncoding = std::unique_ptr(new zdl::DlSystem::UserBufferEncodingFloat()); -+ } -+ -+ // create user-backed storage to load input data onto it -+ applicationBuffers.emplace(name, std::vector(bufSize)); -+ -+ // create SNPE user buffer from the user-backed buffer -+ zdl::DlSystem::IUserBufferFactory& ubFactory = zdl::SNPE::SNPEFactory::getUserBufferFactory(); -+ snpeUserBackedBuffers.push_back(ubFactory.createUserBuffer(applicationBuffers.at(name).data(), -+ bufSize, -+ strides, -+ userBufferEncoding.get())); -+ if (snpeUserBackedBuffers.back() == nullptr) -+ { -+ std::cerr << "Error while creating user buffer." << std::endl; -+ } -+ // add the user-backed buffer to the inputMap, which is later on fed to the network for execution -+ userBufferMap.add(name, snpeUserBackedBuffers.back().get()); -+} -+ -+void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ bool isTfNBuffer, -+ bool staticQuantization, -+ int bitWidth) -+{ -+ // get input tensor names of the network that need to be populated -+ const auto& inputNamesOpt = snpe->getInputTensorNames(); -+ if (!inputNamesOpt) throw std::runtime_error("Error obtaining input tensor names"); -+ const zdl::DlSystem::StringList& inputNames = *inputNamesOpt; -+ assert(inputNames.size() > 0); -+ -+ // create SNPE user buffers for each application storage buffer -+ for (const char *name : inputNames) { -+ createUserBuffer(inputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name, isTfNBuffer, staticQuantization, bitWidth); -+ } -+} -+ -+void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ bool isTfNBuffer, -+ int bitWidth) -+{ -+ // get input tensor names of the network that need to be populated -+ const auto& outputNamesOpt = snpe->getOutputTensorNames(); -+ if (!outputNamesOpt) throw std::runtime_error("Error obtaining output tensor names"); -+ const zdl::DlSystem::StringList& outputNames = *outputNamesOpt; -+ -+ // create SNPE user buffers for each application storage buffer -+ for (const char *name : outputNames) { -+ createUserBuffer(outputMap, applicationBuffers, snpeUserBackedBuffers, snpe, name, isTfNBuffer, false, bitWidth); -+ } -+} -\ No newline at end of file -diff --git a/CreateUserBuffer.hpp b/CreateUserBuffer.hpp -index 138a7c4..4a6cf60 100644 ---- a/CreateUserBuffer.hpp -+++ b/CreateUserBuffer.hpp -@@ -53,3 +53,33 @@ void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, - std::unordered_map& applicationBuffers, - std::vector>& snpeUserBackedBuffers, - std::unique_ptr& snpe); -+ -+ -+ -+ -+ -+void createUserBuffer(zdl::DlSystem::UserBufferMap& userBufferMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ const char * name, -+ const bool isTfNBuffer, -+ bool staticQuantization, -+ int bitWidth); -+ -+// Create a UserBufferMap of the SNPE network inputs -+void createInputBufferMap(zdl::DlSystem::UserBufferMap& inputMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ const bool isTfNBuffer, -+ bool staticQuantization, -+ int bitWidth); -+ -+// Create a UserBufferMap of the SNPE network outputs -+void createOutputBufferMap(zdl::DlSystem::UserBufferMap& outputMap, -+ std::unordered_map>& applicationBuffers, -+ std::vector>& snpeUserBackedBuffers, -+ std::unique_ptr& snpe, -+ const bool isTfNBuffer, -+ int bitWidth); -diff --git a/SetBuilderOptions.cpp b/SetBuilderOptions.cpp -index 20bbeca..75284fb 100644 ---- a/SetBuilderOptions.cpp -+++ b/SetBuilderOptions.cpp -@@ -36,6 +36,7 @@ std::unique_ptr setBuilderOptions(std::unique_ptr - #include - #include -@@ -20,6 +21,10 @@ - #include - #include - #include -+#include -+#include -+#include -+#include - - #include "GetOpt.hpp" - #include "CheckRuntime.hpp" -@@ -40,455 +45,419 @@ - /* Windows Modification - * Replace to and refactor the "Process command line arguments" part - */ -- - const int FAILURE = 1; - const int SUCCESS = 0; - --int main(int argc, char** argv) -+enum { UNKNOWN, USERBUFFER_FLOAT, USERBUFFER_TF8, ITENSOR, USERBUFFER_TF16 }; -+enum { CPUBUFFER, GLBUFFER }; -+bool useUserSuppliedBuffers = false; -+int bufferType; -+int bitWidth = 0; -+ -+std::string getCurrentDir() { -+ char buff[MAX_PATH]; -+ GetModuleFileName(NULL, buff, MAX_PATH); -+ std::string::size_type position = std::string(buff).find_last_of("\\/"); -+ return std::string(buff).substr(0, position); -+} -+ -+std::unique_ptr build_network(std::string dlc, static zdl::DlSystem::Runtime_t runtime, bool runtimeSpecified, bool usingInitCaching, std::string bufferTypeStr, std::string userBufferSourceStr, std::string staticQuantizationStr) -+{ -+ -+ static zdl::DlSystem::RuntimeList runtimeList; -+ bool staticQuantization; -+ -+ if (staticQuantizationStr == "true") -+ { -+ staticQuantization = true; -+ } -+ else if (staticQuantizationStr == "false") -+ { -+ staticQuantization = false; -+ } -+ else -+ { -+ std::cout << "\nStatic quantization value is not valid. Please run snpe-sample with the -h flag for more details" -+ << std::endl; -+ return nullptr; -+ } -+ -+ if (runtimeList.empty() == false) -+ { -+ std::cout << "runtimelist not empty" << std::endl; -+ } -+ -+ if (runtimeSpecified) -+ { -+ std::cout << "runtime is specificed" << std::endl; -+ } -+ -+ // Check if given arguments represent valid files -+ std::ifstream dlcFile(dlc); -+ if (!dlcFile) { -+ std::cout << "\nInput list or dlc file not valid. Please ensure that you have provided a valid input list and dlc for processing. Run snpe-sample with the -h flag for more details" << std::endl; -+ return nullptr; -+ } -+ -+ // Check if given buffer type is valid -+ if (bufferTypeStr == "USERBUFFER_FLOAT") -+ { -+ bufferType = USERBUFFER_FLOAT; -+ } -+ else -+ { -+ std::cout << "\nBuffer type is not valid. Please run snpe-sample with the -h flag for more details" << std::endl; -+ return nullptr; -+ } -+ -+ //Check if given user buffer source type is valid -+ int userBufferSourceType; -+ -+ // CPUBUFFER / GLBUFFER supported only for USERBUFFER_FLOAT -+ if (bufferType == USERBUFFER_FLOAT) -+ { -+ if (userBufferSourceStr == "CPUBUFFER") -+ { -+ userBufferSourceType = CPUBUFFER; -+ } -+ else if (userBufferSourceStr == "GLBUFFER") -+ { -+ std::cout << "\nGLBUFFER mode is only supported on Android OS" << std::endl; -+ return nullptr; -+ userBufferSourceType = GLBUFFER; -+ } -+ else -+ { -+ std::cout -+ << "\nSource of user buffer type is not valid. Please run snpe-sample with the -h flag for more details" -+ << std::endl; -+ return nullptr; -+ } -+ } -+ -+ if (staticQuantizationStr == "true") -+ { -+ staticQuantization = true; -+ } -+ else if (staticQuantizationStr == "false") -+ { -+ staticQuantization = false; -+ } -+ else -+ { -+ std::cout << "\nStatic quantization value is not valid. Please run snpe-sample with the -h flag for more details" -+ << std::endl; -+ return nullptr; -+ } -+ -+ //Check if both runtimelist and runtime are passed in -+ if (runtimeSpecified && (runtimeList.empty() == false)) -+ { -+ std::cout << "\nInvalid option cannot mix runtime order -l with runtime -r " << std::endl; -+ std::exit(FAILURE); -+ } -+ -+ if (runtimeSpecified) -+ { -+ runtime = checkRuntime(runtime, staticQuantization); -+ std::cout << "runtime is checked " << std::endl; -+ } -+ -+ std::unique_ptr container = loadContainerFromFile(dlc); -+ if (container == nullptr) -+ { -+ std::cerr << "Error while opening the container file." << std::endl; -+ return nullptr; -+ } -+ -+ useUserSuppliedBuffers = (bufferType == USERBUFFER_FLOAT || -+ bufferType == USERBUFFER_TF8 || -+ bufferType == USERBUFFER_TF16); -+ -+ std::unique_ptr snpe; -+ zdl::DlSystem::PlatformConfig platformConfig; -+ -+ std::cout << "\nSettingbuilderoptions..................\n"; -+ snpe = setBuilderOptions(container, runtime, runtimeList, useUserSuppliedBuffers, platformConfig, usingInitCaching); -+ return snpe; -+ -+} -+int main() - { -- enum {UNKNOWN, USERBUFFER_FLOAT, USERBUFFER_TF8, ITENSOR, USERBUFFER_TF16}; -- enum {CPUBUFFER, GLBUFFER}; -- -- // Command line arguments -- static std::string dlc = ""; -- static std::string OutputDir = "./output/"; -- const char* inputFile = ""; -- std::string bufferTypeStr = "ITENSOR"; -- std::string userBufferSourceStr = "CPUBUFFER"; -- std::string staticQuantizationStr = "false"; -- static zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::Runtime_t::CPU; -- static zdl::DlSystem::RuntimeList runtimeList; -- bool runtimeSpecified = false; -- bool execStatus = false; -- bool usingInitCaching = false; -- bool staticQuantization = false; -- -- // Process command line arguments -- int opt = 0; -- enum OPTIONS -- { -- OPT_HELP = 0, -- OPT_CONTAINER = 1, -- OPT_INPUT_LIST = 2, -- OPT_OUTPUT_DIR = 3, -- OPT_USERBUFFER = 4, -- OPT_RUNTIME = 5, -- OPT_RESIZABLE_DIM = 6, -- OPT_INITBLOBSCACHE = 7, -- OPT_RUNTIME_ORDER = 8, -- OPT_STATIC_QUANTIZATION = 9, -- }; -- static struct WinOpt::option long_options[] = { -- {"help", WinOpt::no_argument, NULL, OPT_HELP}, -- {"container", WinOpt::required_argument, NULL, OPT_CONTAINER}, -- {"input_list", WinOpt::required_argument, NULL, OPT_INPUT_LIST}, -- {"output_dir", WinOpt::required_argument, NULL, OPT_OUTPUT_DIR}, -- {"userbuffer", WinOpt::required_argument, NULL, OPT_USERBUFFER}, -- {"runtime", WinOpt::required_argument, NULL, OPT_RUNTIME}, -- {"resizable_dim", WinOpt::required_argument, NULL, OPT_RESIZABLE_DIM}, -- {"enable_init_cache", WinOpt::no_argument, NULL, OPT_INITBLOBSCACHE}, -- {"runtime_order", WinOpt::required_argument, NULL, OPT_RUNTIME_ORDER}, -- {"static_quantization", WinOpt::required_argument, NULL, OPT_STATIC_QUANTIZATION}, -- {NULL, 0, NULL, 0 } -- }; -- int long_index = 0; -- while ((opt = WinOpt::GetOptLongOnly(argc, argv, "", long_options, &long_index)) != -1) -- { -- switch (opt) -- { -- case OPT_HELP: -- std::cout -- << "\nDESCRIPTION:\n" -- << "------------\n" -- << "Example application demonstrating how to load and execute a neural network\n" -- << "using the SNPE C++ API.\n" -- << "\n\n" -- << "REQUIRED ARGUMENTS:\n" -- << "-------------------\n" -- << " --container Path to the DL container containing the network.\n" -- << " --input_list Path to a file listing the inputs for the network.\n" -- << " --output_dir Path to directory to store output results.\n" -- << "\n" -- << "OPTIONAL ARGUMENTS:\n" -- << "-------------------\n" -- << " --userbuffer Type of buffers to use [USERBUFFER_FLOAT, USERBUFFER_TF8, ITENSOR, USERBUFFER_TF16] (" << bufferTypeStr << " is default).\n" -- << " --static_quantization Specifies to use static quantization parameters from the model instead of input specific quantization [true, false]. Used in conjunction with USERBUFFER_TF8. \n" -- << " --runtime The runtime to be used [gpu, dsp, aip, cpu] (cpu is default). \n" -- << " --resizable_dim The maximum number that resizable dimensions can grow into. \n" -- << " Used as a hint to create UserBuffers for models with dynamic sized outputs. Should be a positive integer and is not applicable when using ITensor. \n" -- << " --enable_init_cache Enable init caching to accelerate the initialization process of SNPE. Defaults to disable.\n" -- << " --runtime_order Specifies the order of precedence for runtime e.g cpu_float32, dsp_fixed8_tf etc. Valid values are:- \n" -- << " cpu_float32 (Snapdragon CPU) = Data & Math: float 32bit \n" -- << " gpu_float32_16_hybrid (Adreno GPU) = Data: float 16bit Math: float 32bit \n" -- << " dsp_fixed8_tf (Hexagon DSP) = Data & Math: 8bit fixed point Tensorflow style format \n" -- << " gpu_float16 (Adreno GPU) = Data: float 16bit Math: float 16bit \n" -- << " cpu (Snapdragon CPU) = Same as cpu_float32 \n" -- << " gpu (Adreno GPU) = Same as gpu_float32_16_hybrid \n" -- << " dsp (Hexagon DSP) = Same as dsp_fixed8_tf \n" -- << std::endl; -- -- std::exit(SUCCESS); -- case OPT_CONTAINER: -- dlc = WinOpt::optarg; -- break; -- case OPT_INPUT_LIST: -- inputFile = WinOpt::optarg; -- break; -- case OPT_OUTPUT_DIR: -- OutputDir = WinOpt::optarg; -- break; -- case OPT_USERBUFFER: -- bufferTypeStr = WinOpt::optarg; -- break; -- case OPT_RESIZABLE_DIM: -- setResizableDim(atoi(WinOpt::optarg)); -- break; -- case OPT_RUNTIME: -- runtimeSpecified = true; -- if (strcmp(WinOpt::optarg, "gpu") == 0) -- { -- runtime = zdl::DlSystem::Runtime_t::GPU; -- } -- else if (strcmp(WinOpt::optarg, "aip") == 0) -- { -- runtime = zdl::DlSystem::Runtime_t::AIP_FIXED8_TF; -- } -- else if (strcmp(WinOpt::optarg, "dsp") == 0) -- { -- runtime = zdl::DlSystem::Runtime_t::DSP; -- } -- else if (strcmp(WinOpt::optarg, "cpu") == 0) -- { -- runtime = zdl::DlSystem::Runtime_t::CPU; -- } -- else -- { -- std::cerr << "The runtime option provide is not valid. Defaulting to the CPU runtime." << std::endl; -- -- } -- break; -- -- case OPT_RUNTIME_ORDER: -- { -- std::string inputString = WinOpt::optarg; -- //std::cout<<"Input String: "< runtimeStrVector; -- split(runtimeStrVector, inputString, ','); -- -- //Check for dups -- for(auto it = runtimeStrVector.begin(); it != runtimeStrVector.end()-1; it++) -- { -- auto found = std::find(it+1, runtimeStrVector.end(), *it); -- if(found != runtimeStrVector.end()) -- { -- std::cerr << "Error: Invalid values passed to the argument "<< argv[WinOpt::optind-2] << ". Duplicate entries in runtime order" << std::endl; -- std::exit(FAILURE); -- } -- } -- -- runtimeList.clear(); -- for(auto& runtimeStr : runtimeStrVector) -- { -- //std::cout< container = loadContainerFromFile(dlc); -- if (container == nullptr) -- { -- std::cerr << "Error while opening the container file." << std::endl; -- return EXIT_FAILURE; -- } -- -- bool useUserSuppliedBuffers = (bufferType == USERBUFFER_FLOAT || -- bufferType == USERBUFFER_TF8 || -- bufferType == USERBUFFER_TF16); -- -- std::unique_ptr snpe; -- zdl::DlSystem::PlatformConfig platformConfig; -- -- snpe = setBuilderOptions(container, runtime, runtimeList, useUserSuppliedBuffers, platformConfig, usingInitCaching); -- if (snpe == nullptr) -- { -- std::cerr << "Error while building SNPE object." << std::endl; -- return EXIT_FAILURE; -- } -- if (usingInitCaching) -- { -- if (container->save(dlc)) -- { -- std::cout << "Saved container into archive successfully" << std::endl; -- } -- else -- { -- std::cout << "Failed to save container into archive" << std::endl; -- } -- } -- -- // Check the batch size for the container -- // SNPE 1.16.0 (and newer) assumes the first dimension of the tensor shape -- // is the batch size. -- zdl::DlSystem::TensorShape tensorShape; -- tensorShape = snpe->getInputDimensions(); -- size_t batchSize = tensorShape.getDimensions()[0]; -- std::cout << "Batch size for the container is " << batchSize << std::endl; -- -- // Open the input file listing and group input files into batches -- std::vector> inputs = preprocessInput(inputFile, batchSize); -- -- // Load contents of input file batches ino a SNPE tensor or user buffer, -- // user buffer include cpu buffer and OpenGL buffer, -- // execute the network with the input and save each of the returned output to a file. -- if(useUserSuppliedBuffers) -- { -- // SNPE allows its input and output buffers that are fed to the network -- // to come from user-backed buffers. First, SNPE buffers are created from -- // user-backed storage. These SNPE buffers are then supplied to the network -- // and the results are stored in user-backed output buffers. This allows for -- // reusing the same buffers for multiple inputs and outputs. -- zdl::DlSystem::UserBufferMap inputMap, outputMap; -- std::vector > snpeUserBackedInputBuffers, snpeUserBackedOutputBuffers; -- std::unordered_map > applicationOutputBuffers; -- -- if( bufferType == USERBUFFER_TF8 || bufferType == USERBUFFER_TF16 ) -- { -- createOutputBufferMap(outputMap, applicationOutputBuffers, snpeUserBackedOutputBuffers, snpe, true, bitWidth); -- -- std::unordered_map > applicationInputBuffers; -- createInputBufferMap(inputMap, applicationInputBuffers, snpeUserBackedInputBuffers, snpe, true, staticQuantization, bitWidth); -- -- for( size_t i = 0; i < inputs.size(); i++ ) -- { -- // Load input user buffer(s) with values from file(s) -- if( batchSize > 1 ) -- std::cout << "Batch " << i << ":" << std::endl; -- if(!loadInputUserBufferTfN(applicationInputBuffers, snpe, inputs[i], inputMap, staticQuantization, bitWidth)) -- { -- return EXIT_FAILURE; -- } -- // Execute the input buffer map on the model with SNPE -- execStatus = snpe->execute(inputMap, outputMap); -- // Save the execution results only if successful -- if (execStatus == true) -- { -- if(!saveOutput(outputMap, applicationOutputBuffers, OutputDir, i * batchSize, batchSize, true, bitWidth)) -- { -- return EXIT_FAILURE; -- } -- -- } -- else -- { -- std::cerr << "Error while executing the network." << std::endl; -- } -- } -- } -- else if( bufferType == USERBUFFER_FLOAT ) -- { -- createOutputBufferMap(outputMap, applicationOutputBuffers, snpeUserBackedOutputBuffers, snpe, false, bitWidth); -- -- if( userBufferSourceType == CPUBUFFER ) -- { -- std::unordered_map > applicationInputBuffers; -- createInputBufferMap(inputMap, applicationInputBuffers, snpeUserBackedInputBuffers, snpe, false, false, bitWidth); -- -- for( size_t i = 0; i < inputs.size(); i++ ) -- { -- // Load input user buffer(s) with values from file(s) -- if( batchSize > 1 ) -- std::cout << "Batch " << i << ":" << std::endl; -- if(!loadInputUserBufferFloat(applicationInputBuffers, snpe, inputs[i])) -- { -- return EXIT_FAILURE; -- } -- // Execute the input buffer map on the model with SNPE -- execStatus = snpe->execute(inputMap, outputMap); -- // Save the execution results only if successful -- if (execStatus == true) -- { -- if(!saveOutput(outputMap, applicationOutputBuffers, OutputDir, i * batchSize, batchSize, false, bitWidth)) -- { -- return EXIT_FAILURE; -- } -- } -- else -- { -- std::cerr << "Error while executing the network." << std::endl; -- } -- } -- } -- } -- } -- else if(bufferType == ITENSOR) -- { -- // A tensor map for SNPE execution outputs -- zdl::DlSystem::TensorMap outputTensorMap; -- //Get input names and number -- const auto& inputTensorNamesRef = snpe->getInputTensorNames(); -- if (!inputTensorNamesRef) throw std::runtime_error("Error obtaining Input tensor names"); -- const auto &inputTensorNames = *inputTensorNamesRef; -- -- for (size_t i = 0; i < inputs.size(); i++) { -- // Load input/output buffers with ITensor -- if(batchSize > 1) -- std::cout << "Batch " << i << ":" << std::endl; -- if (inputTensorNames.size() == 1) -- { -- // Load input/output buffers with ITensor -- std::unique_ptr inputTensor = loadInputTensor(snpe, inputs[i], inputTensorNames); -- if(!inputTensor) -- { -- return EXIT_FAILURE; -- } -- // Execute the input tensor on the model with SNPE -- execStatus = snpe->execute(inputTensor.get(), outputTensorMap); -- } -- else -- { -- std::vector> inputTensors(inputTensorNames.size()); -- zdl::DlSystem::TensorMap inputTensorMap; -- bool inputLoadStatus = false; -- // Load input/output buffers with TensorMap -- std::tie(inputTensorMap, inputLoadStatus) = loadMultipleInput(snpe, inputs[i], inputTensorNames, inputTensors); -- if(!inputLoadStatus) -- { -- return EXIT_FAILURE; -- } -- // Execute the multiple input tensorMap on the model with SNPE -- execStatus = snpe->execute(inputTensorMap, outputTensorMap); -- } -- // Save the execution results if execution successful -- if (execStatus == true) -- { -- if(!saveOutput(outputTensorMap, OutputDir, i * batchSize, batchSize)) -- { -- return EXIT_FAILURE; -- } -- } -- else -- { -- std::cerr << "Error while executing the network." << std::endl; -- } -- } -- } -- // Freeing of snpe object -- snpe.reset(); -- return SUCCESS; -+ // Initialize a ZeroMQ context -+ zmq::context_t context(1); -+ -+ // Create a REP (reply) socket -+ zmq::socket_t socket(context, ZMQ_REP); -+ // zmq::socket_t socket(context, ZMQ_PULL); -+ -+ // Bind the socket to a TCP address -+ std::string serverAddress = "tcp://*:5555"; // Replace with your desired address -+ socket.bind(serverAddress.c_str()); -+ -+ -+ enum { UNKNOWN, USERBUFFER_FLOAT, USERBUFFER_TF8, ITENSOR, USERBUFFER_TF16 }; -+ enum { CPUBUFFER, GLBUFFER }; -+ -+ std::unique_ptr snpe; -+ -+ // zmq::message_t first_msg; //TODO: 5 is hardcoded -+ zmq::message_t infer_time_reply; -+ //struct timeval start_time, end_time; -+ //float seconds, useconds, milli_time; -+ useUserSuppliedBuffers = true; //TODO: hardcoded but take it from builder functions -+ const char* in_name; -+ const char* out_name; -+ size_t batchSize; -+ zdl::DlSystem::StringList outputNames; -+ zdl::DlSystem::StringList inputNames; -+ zdl::DlSystem::UserBufferMap inputMap, outputMap; -+ std::vector > snpeUserBackedInputBuffers, snpeUserBackedOutputBuffers; -+ std::unordered_map > applicationOutputBuffers; -+ std::unordered_map > applicationOutputBuffersFloat; -+ -+ std::unordered_map > applicationInputBuffers; -+ std::unordered_map > applicationInputBuffersFloat; -+ -+ while (true) -+ { -+ try{ -+ std::vector msgsfromserver; -+ msgsfromserver.clear(); -+ //Waiting for first msg from client; -+ std::cout << "Waiting for first msg from socket:" << std::endl; -+ const auto ret = zmq::recv_multipart(socket, std::back_inserter(msgsfromserver)); -+ -+ //std::string receivedData_function = msgsfromserver[0].to_string(); -+ //std::cout<<"#################"<DSP -+ -+ bool runtimeSpecified = true; //shubham change false -> true -+ bool usingInitCaching = false; -+ -+ if (runtime_socket.compare("GPU") == 0) -+ { -+ runtime = zdl::DlSystem::Runtime_t::GPU; -+ } -+ else if (runtime_socket.compare("DSP") == 0) -+ { -+ runtime = zdl::DlSystem::Runtime_t::DSP; -+ } -+ else if (runtime_socket.compare("CPU") == 0) -+ { -+ runtime = zdl::DlSystem::Runtime_t::CPU; -+ } -+ else -+ { -+ std::cerr << "\nCorrect Runtime not specified, choosing default(CPU)" << std::endl; -+ runtime = zdl::DlSystem::Runtime_t::CPU; -+ } -+ // std::cout<<"\nmsg2.size(): "<getInputDimensions(); -+ batchSize = tensorShape.getDimensions()[0]; -+ -+ -+ outputNames = snpe->getOutputTensorNames(); -+ out_name = outputNames.at(0); //Only one output is present -+ std::cout << "shubham EXP: out_name: " << out_name; -+ -+ //const auto& inputNamesOpt = snpe->getInputTensorNames(); -+ inputNames = snpe->getInputTensorNames(); -+ in_name = inputNames.at(0); //Only one input is present -+ -+ -+ build_status_str = "build is successful"; -+ // goto startwaiting; -+ } -+ -+ std::cout << build_status_str << std::endl; -+ zmq::message_t message(build_status_str.size()); -+ memcpy(message.data(), build_status_str.c_str(), build_status_str.size()); -+ socket.send(message); -+ -+ } -+ else if (msgsfromserver[0].to_string() == "infer") -+ { -+ //make inference -+ bool execStatus = false; -+ -+ if (snpe == nullptr) -+ { -+ std::cerr << "Error while building SNPE object." << std::endl; -+ std::string build_status_str = "Error while building SNPE object."; -+ zmq::message_t message(build_status_str.size()); -+ memcpy(message.data(), build_status_str.c_str(), build_status_str.size()); -+ socket.send(message); -+ continue; -+ } -+ -+ std::cout << "\nMAKING INFERENCE" << std::endl; -+ -+ -+ -+ -+ // Check the batch size for the container -+ // SNPE 1.16.0 (and newer) assumes the first dimension of the tensor shape -+ // is the batch size. -+ -+ // std::cout << "Batch size for the container is " << batchSize << std::endl; -+ -+ // Open the input file listing and group input files into batches -+ // std::vector> inputs = preprocessInput(inputFile, batchSize); -+ -+ try { -+ // std::cout << "Waiting for socket:"< receivedData(static_cast(msgsfromserver[1].data()), static_cast(msgsfromserver[1].data()) + msgsfromserver[1].size() / sizeof(float)); -+ std::vector receivedData(static_cast(msgsfromserver[1].data()), static_cast(msgsfromserver[1].data()) + msgsfromserver[1].size()); -+ -+ // std::cout << "\n Size:" << messages.size() << "data: " << messages.data()< 1) -+ std::cout << "Batch " << i << ":" << std::endl; -+ -+ auto start = std::chrono::high_resolution_clock::now(); -+ -+ // Execute the input buffer map on the model with SNPE -+ execStatus = snpe->execute(inputMap, outputMap); -+ auto end = std::chrono::high_resolution_clock::now(); -+ auto infer_time = std::chrono::duration_cast(end - start).count(); -+ std::cout << "\nExecStatius: " << execStatus << std::endl; -+ -+ -+ std::cout << "Inference time:" << infer_time << std::endl; -+ -+ std::string mill_str = std::to_string(infer_time); -+ infer_time_reply.rebuild(mill_str.size()); -+ memcpy((void *) infer_time_reply.data(), (mill_str.c_str()), mill_str.size()); -+ -+ // Prepare the vectors(execution result) to be sent -+ //std::vector vector; // = applicationOutputBuffersFloat.at(out_name); -+ std::vector vector; // = applicationOutputBuffers.at(out_name); -+ start = std::chrono::high_resolution_clock::now(); -+ for (const char* outputName : outputNames) -+ { -+ std::cout << "\nOutputNames: " << outputName << std::endl; -+ //vector.insert(vector.end(), applicationOutputBuffersFloat.at(outputName).begin(), applicationOutputBuffersFloat.at(outputName).end()); -+ vector.insert(vector.end(), applicationOutputBuffers.at(outputName).begin(), applicationOutputBuffers.at(outputName).end()); -+ -+ } -+ end = std::chrono::high_resolution_clock::now(); -+ std::cout << "\nMerge time:" << std::chrono::duration_cast(end - start).count() << std::endl; -+ -+ // Send the result -+ if (execStatus) -+ { -+ // std::cout << "execStatus is true"< - - - - - - - - - - - - - - - - AI Solutions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
-
-

AI Solutions

-
Select Use Case to see relevant solutions across platforms
- -
-
- -
-
-
- Mobirise Website Builder -
-
-
- Super Resolution
-
-

- Upscale image with AI

- -
-
-
-
-
- Mobirise Website Builder -
-
-
- Low Light Enhancement
-
-

Enhance Low Light image with AI

- -
-
-
-
-
- Mobirise Website Builder -
-
-
- Object Detection
-
-

Multi-Class Object Detection on real-time camera feed

- -
-
-
-
-
- Mobirise Website Builder -
-
-
- Image Segmentation
-
-

Multi-Class Image Segmentation on real-time camera feed

- -
-
-
-
-
-
- - - - - \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample1_lowlight.jpg b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample1_lowlight.jpg deleted file mode 100644 index 583f4dbd..00000000 Binary files a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample1_lowlight.jpg and /dev/null differ diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample1_sr.jpg b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample1_sr.jpg deleted file mode 100644 index f0ef1300..00000000 Binary files a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample1_sr.jpg and /dev/null differ diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample2_lowlight.jpg b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample2_lowlight.jpg deleted file mode 100644 index 7fae3ae6..00000000 Binary files a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample2_lowlight.jpg and /dev/null differ diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample2_sr.jpg b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample2_sr.jpg deleted file mode 100644 index 962b02c0..00000000 Binary files a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/Sample2_sr.jpg and /dev/null differ diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/animatecss/animate.css b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/animatecss/animate.css deleted file mode 100644 index 9eb921a4..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/animatecss/animate.css +++ /dev/null @@ -1,4074 +0,0 @@ - -@charset "UTF-8"; -/*! - * animate.css - https://animate.style/ - * Version - 4.1.0 - * Licensed under the MIT license - http://opensource.org/licenses/MIT - * - * Copyright (c) 2020 Animate.css - */ -:root { - --animate-duration: 0.4s; - --animate-delay: 0.1s; - --animate-repeat: 1; -} -.animate__animated { - -webkit-animation-duration: var(--animate-duration)s; - animation-duration: var(--animate-duration); - -webkit-animation-duration: var(--animate-duration); - animation-duration: var(--animate-duration); - -webkit-animation-fill-mode: both; - animation-fill-mode: both; -} -.animate__animated.animate__infinite { - -webkit-animation-iteration-count: infinite; - animation-iteration-count: infinite; -} -.animate__animated.animate__repeat-1 { - -webkit-animation-iteration-count: 1; - animation-iteration-count: 1; - -webkit-animation-iteration-count: var(--animate-repeat); - animation-iteration-count: var(--animate-repeat); -} -.animate__animated.animate__repeat-2 { - -webkit-animation-iteration-count: calc(1 * 2); - animation-iteration-count: calc(1 * 2); - -webkit-animation-iteration-count: calc(var(--animate-repeat) * 2); - animation-iteration-count: calc(var(--animate-repeat) * 2); -} -.animate__animated.animate__repeat-3 { - -webkit-animation-iteration-count: calc(1 * 3); - animation-iteration-count: calc(1 * 3); - -webkit-animation-iteration-count: calc(var(--animate-repeat) * 3); - animation-iteration-count: calc(var(--animate-repeat) * 3); -} -.animate__animated.animate__delay-1s { - -webkit-animation-delay: 1s; - animation-delay: 1s; - -webkit-animation-delay: var(--animate-delay); - animation-delay: var(--animate-delay); -} -.animate__animated.animate__delay-2s { - -webkit-animation-delay: calc(1s * 2); - animation-delay: calc(1s * 2); - -webkit-animation-delay: calc(var(--animate-delay) * 2); - animation-delay: calc(var(--animate-delay) * 2); -} -.animate__animated.animate__delay-3s { - -webkit-animation-delay: calc(1s * 3); - animation-delay: calc(1s * 3); - -webkit-animation-delay: calc(var(--animate-delay) * 3); - animation-delay: calc(var(--animate-delay) * 3); -} -.animate__animated.animate__delay-4s { - -webkit-animation-delay: calc(1s * 4); - animation-delay: calc(1s * 4); - -webkit-animation-delay: calc(var(--animate-delay) * 4); - animation-delay: calc(var(--animate-delay) * 4); -} -.animate__animated.animate__delay-5s { - -webkit-animation-delay: calc(1s * 5); - animation-delay: calc(1s * 5); - -webkit-animation-delay: calc(var(--animate-delay) * 5); - animation-delay: calc(var(--animate-delay) * 5); -} -.animate__animated.animate__faster { - -webkit-animation-duration: calc(1s / 2); - animation-duration: calc(1s / 2); - -webkit-animation-duration: calc(var(--animate-duration) / 2); - animation-duration: calc(var(--animate-duration) / 2); -} -.animate__animated.animate__fast { - -webkit-animation-duration: calc(1s * 0.8); - animation-duration: calc(1s * 0.8); - -webkit-animation-duration: calc(var(--animate-duration) * 0.8); - animation-duration: calc(var(--animate-duration) * 0.8); -} -.animate__animated.animate__slow { - -webkit-animation-duration: calc(1s * 2); - animation-duration: calc(1s * 2); - -webkit-animation-duration: calc(var(--animate-duration) * 2); - animation-duration: calc(var(--animate-duration) * 2); -} -.animate__animated.animate__slower { - -webkit-animation-duration: calc(1s * 3); - animation-duration: calc(1s * 3); - -webkit-animation-duration: calc(var(--animate-duration) * 3); - animation-duration: calc(var(--animate-duration) * 3); -} -@media print, (prefers-reduced-motion: reduce) { - .animate__animated { - -webkit-animation-duration: 1ms !important; - animation-duration: 1ms !important; - -webkit-transition-duration: 1ms !important; - transition-duration: 1ms !important; - -webkit-animation-iteration-count: 1 !important; - animation-iteration-count: 1 !important; - } - - .animate__animated[class*='Out'] { - opacity: 0; - } -} -/* Attention seekers */ -@-webkit-keyframes bounce { - from, - 20%, - 53%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - 40%, - 43% { - -webkit-animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06); - animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06); - -webkit-transform: translate3d(0, -30px, 0) scaleY(1.1); - transform: translate3d(0, -30px, 0) scaleY(1.1); - } - - 70% { - -webkit-animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06); - animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06); - -webkit-transform: translate3d(0, -15px, 0) scaleY(1.05); - transform: translate3d(0, -15px, 0) scaleY(1.05); - } - - 80% { - -webkit-transition-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - transition-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - -webkit-transform: translate3d(0, 0, 0) scaleY(0.95); - transform: translate3d(0, 0, 0) scaleY(0.95); - } - - 90% { - -webkit-transform: translate3d(0, -4px, 0) scaleY(1.02); - transform: translate3d(0, -4px, 0) scaleY(1.02); - } -} -@keyframes bounce { - from, - 20%, - 53%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - 40%, - 43% { - -webkit-animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06); - animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06); - -webkit-transform: translate3d(0, -30px, 0) scaleY(1.1); - transform: translate3d(0, -30px, 0) scaleY(1.1); - } - - 70% { - -webkit-animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06); - animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06); - -webkit-transform: translate3d(0, -15px, 0) scaleY(1.05); - transform: translate3d(0, -15px, 0) scaleY(1.05); - } - - 80% { - -webkit-transition-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - transition-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - -webkit-transform: translate3d(0, 0, 0) scaleY(0.95); - transform: translate3d(0, 0, 0) scaleY(0.95); - } - - 90% { - -webkit-transform: translate3d(0, -4px, 0) scaleY(1.02); - transform: translate3d(0, -4px, 0) scaleY(1.02); - } -} -.animate__bounce { - -webkit-animation-name: bounce; - animation-name: bounce; - -webkit-transform-origin: center bottom; - transform-origin: center bottom; -} -@-webkit-keyframes flash { - from, - 50%, - to { - opacity: 1; - } - - 25%, - 75% { - opacity: 0; - } -} -@keyframes flash { - from, - 50%, - to { - opacity: 1; - } - - 25%, - 75% { - opacity: 0; - } -} -.animate__flash { - -webkit-animation-name: flash; - animation-name: flash; -} -/* originally authored by Nick Pettit - https://github.com/nickpettit/glide */ -@-webkit-keyframes pulse { - from { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } - - 50% { - -webkit-transform: scale3d(1.05, 1.05, 1.05); - transform: scale3d(1.05, 1.05, 1.05); - } - - to { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } -} -@keyframes pulse { - from { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } - - 50% { - -webkit-transform: scale3d(1.05, 1.05, 1.05); - transform: scale3d(1.05, 1.05, 1.05); - } - - to { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } -} -.animate__pulse { - -webkit-animation-name: pulse; - animation-name: pulse; - -webkit-animation-timing-function: ease-in-out; - animation-timing-function: ease-in-out; -} -@-webkit-keyframes rubberBand { - from { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } - - 30% { - -webkit-transform: scale3d(1.25, 0.75, 1); - transform: scale3d(1.25, 0.75, 1); - } - - 40% { - -webkit-transform: scale3d(0.75, 1.25, 1); - transform: scale3d(0.75, 1.25, 1); - } - - 50% { - -webkit-transform: scale3d(1.15, 0.85, 1); - transform: scale3d(1.15, 0.85, 1); - } - - 65% { - -webkit-transform: scale3d(0.95, 1.05, 1); - transform: scale3d(0.95, 1.05, 1); - } - - 75% { - -webkit-transform: scale3d(1.05, 0.95, 1); - transform: scale3d(1.05, 0.95, 1); - } - - to { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } -} -@keyframes rubberBand { - from { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } - - 30% { - -webkit-transform: scale3d(1.25, 0.75, 1); - transform: scale3d(1.25, 0.75, 1); - } - - 40% { - -webkit-transform: scale3d(0.75, 1.25, 1); - transform: scale3d(0.75, 1.25, 1); - } - - 50% { - -webkit-transform: scale3d(1.15, 0.85, 1); - transform: scale3d(1.15, 0.85, 1); - } - - 65% { - -webkit-transform: scale3d(0.95, 1.05, 1); - transform: scale3d(0.95, 1.05, 1); - } - - 75% { - -webkit-transform: scale3d(1.05, 0.95, 1); - transform: scale3d(1.05, 0.95, 1); - } - - to { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } -} -.animate__rubberBand { - -webkit-animation-name: rubberBand; - animation-name: rubberBand; -} -@-webkit-keyframes shakeX { - from, - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - 10%, - 30%, - 50%, - 70%, - 90% { - -webkit-transform: translate3d(-10px, 0, 0); - transform: translate3d(-10px, 0, 0); - } - - 20%, - 40%, - 60%, - 80% { - -webkit-transform: translate3d(10px, 0, 0); - transform: translate3d(10px, 0, 0); - } -} -@keyframes shakeX { - from, - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - 10%, - 30%, - 50%, - 70%, - 90% { - -webkit-transform: translate3d(-10px, 0, 0); - transform: translate3d(-10px, 0, 0); - } - - 20%, - 40%, - 60%, - 80% { - -webkit-transform: translate3d(10px, 0, 0); - transform: translate3d(10px, 0, 0); - } -} -.animate__shakeX { - -webkit-animation-name: shakeX; - animation-name: shakeX; -} -@-webkit-keyframes shakeY { - from, - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - 10%, - 30%, - 50%, - 70%, - 90% { - -webkit-transform: translate3d(0, -10px, 0); - transform: translate3d(0, -10px, 0); - } - - 20%, - 40%, - 60%, - 80% { - -webkit-transform: translate3d(0, 10px, 0); - transform: translate3d(0, 10px, 0); - } -} -@keyframes shakeY { - from, - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - 10%, - 30%, - 50%, - 70%, - 90% { - -webkit-transform: translate3d(0, -10px, 0); - transform: translate3d(0, -10px, 0); - } - - 20%, - 40%, - 60%, - 80% { - -webkit-transform: translate3d(0, 10px, 0); - transform: translate3d(0, 10px, 0); - } -} -.animate__shakeY { - -webkit-animation-name: shakeY; - animation-name: shakeY; -} -@-webkit-keyframes headShake { - 0% { - -webkit-transform: translateX(0); - transform: translateX(0); - } - - 6.5% { - -webkit-transform: translateX(-6px) rotateY(-9deg); - transform: translateX(-6px) rotateY(-9deg); - } - - 18.5% { - -webkit-transform: translateX(5px) rotateY(7deg); - transform: translateX(5px) rotateY(7deg); - } - - 31.5% { - -webkit-transform: translateX(-3px) rotateY(-5deg); - transform: translateX(-3px) rotateY(-5deg); - } - - 43.5% { - -webkit-transform: translateX(2px) rotateY(3deg); - transform: translateX(2px) rotateY(3deg); - } - - 50% { - -webkit-transform: translateX(0); - transform: translateX(0); - } -} -@keyframes headShake { - 0% { - -webkit-transform: translateX(0); - transform: translateX(0); - } - - 6.5% { - -webkit-transform: translateX(-6px) rotateY(-9deg); - transform: translateX(-6px) rotateY(-9deg); - } - - 18.5% { - -webkit-transform: translateX(5px) rotateY(7deg); - transform: translateX(5px) rotateY(7deg); - } - - 31.5% { - -webkit-transform: translateX(-3px) rotateY(-5deg); - transform: translateX(-3px) rotateY(-5deg); - } - - 43.5% { - -webkit-transform: translateX(2px) rotateY(3deg); - transform: translateX(2px) rotateY(3deg); - } - - 50% { - -webkit-transform: translateX(0); - transform: translateX(0); - } -} -.animate__headShake { - -webkit-animation-timing-function: ease-in-out; - animation-timing-function: ease-in-out; - -webkit-animation-name: headShake; - animation-name: headShake; -} -@-webkit-keyframes swing { - 20% { - -webkit-transform: rotate3d(0, 0, 1, 15deg); - transform: rotate3d(0, 0, 1, 15deg); - } - - 40% { - -webkit-transform: rotate3d(0, 0, 1, -10deg); - transform: rotate3d(0, 0, 1, -10deg); - } - - 60% { - -webkit-transform: rotate3d(0, 0, 1, 5deg); - transform: rotate3d(0, 0, 1, 5deg); - } - - 80% { - -webkit-transform: rotate3d(0, 0, 1, -5deg); - transform: rotate3d(0, 0, 1, -5deg); - } - - to { - -webkit-transform: rotate3d(0, 0, 1, 0deg); - transform: rotate3d(0, 0, 1, 0deg); - } -} -@keyframes swing { - 20% { - -webkit-transform: rotate3d(0, 0, 1, 15deg); - transform: rotate3d(0, 0, 1, 15deg); - } - - 40% { - -webkit-transform: rotate3d(0, 0, 1, -10deg); - transform: rotate3d(0, 0, 1, -10deg); - } - - 60% { - -webkit-transform: rotate3d(0, 0, 1, 5deg); - transform: rotate3d(0, 0, 1, 5deg); - } - - 80% { - -webkit-transform: rotate3d(0, 0, 1, -5deg); - transform: rotate3d(0, 0, 1, -5deg); - } - - to { - -webkit-transform: rotate3d(0, 0, 1, 0deg); - transform: rotate3d(0, 0, 1, 0deg); - } -} -.animate__swing { - -webkit-transform-origin: top center; - transform-origin: top center; - -webkit-animation-name: swing; - animation-name: swing; -} -@-webkit-keyframes tada { - from { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } - - 10%, - 20% { - -webkit-transform: scale3d(0.9, 0.9, 0.9) rotate3d(0, 0, 1, -3deg); - transform: scale3d(0.9, 0.9, 0.9) rotate3d(0, 0, 1, -3deg); - } - - 30%, - 50%, - 70%, - 90% { - -webkit-transform: scale3d(1.1, 1.1, 1.1) rotate3d(0, 0, 1, 3deg); - transform: scale3d(1.1, 1.1, 1.1) rotate3d(0, 0, 1, 3deg); - } - - 40%, - 60%, - 80% { - -webkit-transform: scale3d(1.1, 1.1, 1.1) rotate3d(0, 0, 1, -3deg); - transform: scale3d(1.1, 1.1, 1.1) rotate3d(0, 0, 1, -3deg); - } - - to { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } -} -@keyframes tada { - from { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } - - 10%, - 20% { - -webkit-transform: scale3d(0.9, 0.9, 0.9) rotate3d(0, 0, 1, -3deg); - transform: scale3d(0.9, 0.9, 0.9) rotate3d(0, 0, 1, -3deg); - } - - 30%, - 50%, - 70%, - 90% { - -webkit-transform: scale3d(1.1, 1.1, 1.1) rotate3d(0, 0, 1, 3deg); - transform: scale3d(1.1, 1.1, 1.1) rotate3d(0, 0, 1, 3deg); - } - - 40%, - 60%, - 80% { - -webkit-transform: scale3d(1.1, 1.1, 1.1) rotate3d(0, 0, 1, -3deg); - transform: scale3d(1.1, 1.1, 1.1) rotate3d(0, 0, 1, -3deg); - } - - to { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } -} -.animate__tada { - -webkit-animation-name: tada; - animation-name: tada; -} -/* originally authored by Nick Pettit - https://github.com/nickpettit/glide */ -@-webkit-keyframes wobble { - from { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - 15% { - -webkit-transform: translate3d(-25%, 0, 0) rotate3d(0, 0, 1, -5deg); - transform: translate3d(-25%, 0, 0) rotate3d(0, 0, 1, -5deg); - } - - 30% { - -webkit-transform: translate3d(20%, 0, 0) rotate3d(0, 0, 1, 3deg); - transform: translate3d(20%, 0, 0) rotate3d(0, 0, 1, 3deg); - } - - 45% { - -webkit-transform: translate3d(-15%, 0, 0) rotate3d(0, 0, 1, -3deg); - transform: translate3d(-15%, 0, 0) rotate3d(0, 0, 1, -3deg); - } - - 60% { - -webkit-transform: translate3d(10%, 0, 0) rotate3d(0, 0, 1, 2deg); - transform: translate3d(10%, 0, 0) rotate3d(0, 0, 1, 2deg); - } - - 75% { - -webkit-transform: translate3d(-5%, 0, 0) rotate3d(0, 0, 1, -1deg); - transform: translate3d(-5%, 0, 0) rotate3d(0, 0, 1, -1deg); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes wobble { - from { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - 15% { - -webkit-transform: translate3d(-25%, 0, 0) rotate3d(0, 0, 1, -5deg); - transform: translate3d(-25%, 0, 0) rotate3d(0, 0, 1, -5deg); - } - - 30% { - -webkit-transform: translate3d(20%, 0, 0) rotate3d(0, 0, 1, 3deg); - transform: translate3d(20%, 0, 0) rotate3d(0, 0, 1, 3deg); - } - - 45% { - -webkit-transform: translate3d(-15%, 0, 0) rotate3d(0, 0, 1, -3deg); - transform: translate3d(-15%, 0, 0) rotate3d(0, 0, 1, -3deg); - } - - 60% { - -webkit-transform: translate3d(10%, 0, 0) rotate3d(0, 0, 1, 2deg); - transform: translate3d(10%, 0, 0) rotate3d(0, 0, 1, 2deg); - } - - 75% { - -webkit-transform: translate3d(-5%, 0, 0) rotate3d(0, 0, 1, -1deg); - transform: translate3d(-5%, 0, 0) rotate3d(0, 0, 1, -1deg); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__wobble { - -webkit-animation-name: wobble; - animation-name: wobble; -} -@-webkit-keyframes jello { - from, - 11.1%, - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - 22.2% { - -webkit-transform: skewX(-12.5deg) skewY(-12.5deg); - transform: skewX(-12.5deg) skewY(-12.5deg); - } - - 33.3% { - -webkit-transform: skewX(6.25deg) skewY(6.25deg); - transform: skewX(6.25deg) skewY(6.25deg); - } - - 44.4% { - -webkit-transform: skewX(-3.125deg) skewY(-3.125deg); - transform: skewX(-3.125deg) skewY(-3.125deg); - } - - 55.5% { - -webkit-transform: skewX(1.5625deg) skewY(1.5625deg); - transform: skewX(1.5625deg) skewY(1.5625deg); - } - - 66.6% { - -webkit-transform: skewX(-0.78125deg) skewY(-0.78125deg); - transform: skewX(-0.78125deg) skewY(-0.78125deg); - } - - 77.7% { - -webkit-transform: skewX(0.390625deg) skewY(0.390625deg); - transform: skewX(0.390625deg) skewY(0.390625deg); - } - - 88.8% { - -webkit-transform: skewX(-0.1953125deg) skewY(-0.1953125deg); - transform: skewX(-0.1953125deg) skewY(-0.1953125deg); - } -} -@keyframes jello { - from, - 11.1%, - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - 22.2% { - -webkit-transform: skewX(-12.5deg) skewY(-12.5deg); - transform: skewX(-12.5deg) skewY(-12.5deg); - } - - 33.3% { - -webkit-transform: skewX(6.25deg) skewY(6.25deg); - transform: skewX(6.25deg) skewY(6.25deg); - } - - 44.4% { - -webkit-transform: skewX(-3.125deg) skewY(-3.125deg); - transform: skewX(-3.125deg) skewY(-3.125deg); - } - - 55.5% { - -webkit-transform: skewX(1.5625deg) skewY(1.5625deg); - transform: skewX(1.5625deg) skewY(1.5625deg); - } - - 66.6% { - -webkit-transform: skewX(-0.78125deg) skewY(-0.78125deg); - transform: skewX(-0.78125deg) skewY(-0.78125deg); - } - - 77.7% { - -webkit-transform: skewX(0.390625deg) skewY(0.390625deg); - transform: skewX(0.390625deg) skewY(0.390625deg); - } - - 88.8% { - -webkit-transform: skewX(-0.1953125deg) skewY(-0.1953125deg); - transform: skewX(-0.1953125deg) skewY(-0.1953125deg); - } -} -.animate__jello { - -webkit-animation-name: jello; - animation-name: jello; - -webkit-transform-origin: center; - transform-origin: center; -} -@-webkit-keyframes heartBeat { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - } - - 14% { - -webkit-transform: scale(1.3); - transform: scale(1.3); - } - - 28% { - -webkit-transform: scale(1); - transform: scale(1); - } - - 42% { - -webkit-transform: scale(1.3); - transform: scale(1.3); - } - - 70% { - -webkit-transform: scale(1); - transform: scale(1); - } -} -@keyframes heartBeat { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - } - - 14% { - -webkit-transform: scale(1.3); - transform: scale(1.3); - } - - 28% { - -webkit-transform: scale(1); - transform: scale(1); - } - - 42% { - -webkit-transform: scale(1.3); - transform: scale(1.3); - } - - 70% { - -webkit-transform: scale(1); - transform: scale(1); - } -} -.animate__heartBeat { - -webkit-animation-name: heartBeat; - animation-name: heartBeat; - -webkit-animation-duration: calc(1s * 1.3); - animation-duration: calc(1s * 1.3); - -webkit-animation-duration: calc(var(--animate-duration) * 1.3); - animation-duration: calc(var(--animate-duration) * 1.3); - -webkit-animation-timing-function: ease-in-out; - animation-timing-function: ease-in-out; -} -/* Back entrances */ -@-webkit-keyframes backInDown { - 0% { - -webkit-transform: translateY(-1200px) scale(0.7); - transform: translateY(-1200px) scale(0.7); - opacity: 0.7; - } - - 80% { - -webkit-transform: translateY(0px) scale(0.7); - transform: translateY(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } -} -@keyframes backInDown { - 0% { - -webkit-transform: translateY(-1200px) scale(0.7); - transform: translateY(-1200px) scale(0.7); - opacity: 0.7; - } - - 80% { - -webkit-transform: translateY(0px) scale(0.7); - transform: translateY(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } -} -.animate__backInDown { - -webkit-animation-name: backInDown; - animation-name: backInDown; -} -@-webkit-keyframes backInLeft { - 0% { - -webkit-transform: translateX(-2000px) scale(0.7); - transform: translateX(-2000px) scale(0.7); - opacity: 0.7; - } - - 80% { - -webkit-transform: translateX(0px) scale(0.7); - transform: translateX(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } -} -@keyframes backInLeft { - 0% { - -webkit-transform: translateX(-2000px) scale(0.7); - transform: translateX(-2000px) scale(0.7); - opacity: 0.7; - } - - 80% { - -webkit-transform: translateX(0px) scale(0.7); - transform: translateX(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } -} -.animate__backInLeft { - -webkit-animation-name: backInLeft; - animation-name: backInLeft; -} -@-webkit-keyframes backInRight { - 0% { - -webkit-transform: translateX(2000px) scale(0.7); - transform: translateX(2000px) scale(0.7); - opacity: 0.7; - } - - 80% { - -webkit-transform: translateX(0px) scale(0.7); - transform: translateX(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } -} -@keyframes backInRight { - 0% { - -webkit-transform: translateX(2000px) scale(0.7); - transform: translateX(2000px) scale(0.7); - opacity: 0.7; - } - - 80% { - -webkit-transform: translateX(0px) scale(0.7); - transform: translateX(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } -} -.animate__backInRight { - -webkit-animation-name: backInRight; - animation-name: backInRight; -} -@-webkit-keyframes backInUp { - 0% { - -webkit-transform: translateY(1200px) scale(0.7); - transform: translateY(1200px) scale(0.7); - opacity: 0.7; - } - - 80% { - -webkit-transform: translateY(0px) scale(0.7); - transform: translateY(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } -} -@keyframes backInUp { - 0% { - -webkit-transform: translateY(1200px) scale(0.7); - transform: translateY(1200px) scale(0.7); - opacity: 0.7; - } - - 80% { - -webkit-transform: translateY(0px) scale(0.7); - transform: translateY(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } -} -.animate__backInUp { - -webkit-animation-name: backInUp; - animation-name: backInUp; -} -/* Back exits */ -@-webkit-keyframes backOutDown { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } - - 20% { - -webkit-transform: translateY(0px) scale(0.7); - transform: translateY(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: translateY(700px) scale(0.7); - transform: translateY(700px) scale(0.7); - opacity: 0.7; - } -} -@keyframes backOutDown { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } - - 20% { - -webkit-transform: translateY(0px) scale(0.7); - transform: translateY(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: translateY(700px) scale(0.7); - transform: translateY(700px) scale(0.7); - opacity: 0.7; - } -} -.animate__backOutDown { - -webkit-animation-name: backOutDown; - animation-name: backOutDown; -} -@-webkit-keyframes backOutLeft { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } - - 20% { - -webkit-transform: translateX(0px) scale(0.7); - transform: translateX(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: translateX(-2000px) scale(0.7); - transform: translateX(-2000px) scale(0.7); - opacity: 0.7; - } -} -@keyframes backOutLeft { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } - - 20% { - -webkit-transform: translateX(0px) scale(0.7); - transform: translateX(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: translateX(-2000px) scale(0.7); - transform: translateX(-2000px) scale(0.7); - opacity: 0.7; - } -} -.animate__backOutLeft { - -webkit-animation-name: backOutLeft; - animation-name: backOutLeft; -} -@-webkit-keyframes backOutRight { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } - - 20% { - -webkit-transform: translateX(0px) scale(0.7); - transform: translateX(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: translateX(2000px) scale(0.7); - transform: translateX(2000px) scale(0.7); - opacity: 0.7; - } -} -@keyframes backOutRight { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } - - 20% { - -webkit-transform: translateX(0px) scale(0.7); - transform: translateX(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: translateX(2000px) scale(0.7); - transform: translateX(2000px) scale(0.7); - opacity: 0.7; - } -} -.animate__backOutRight { - -webkit-animation-name: backOutRight; - animation-name: backOutRight; -} -@-webkit-keyframes backOutUp { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } - - 20% { - -webkit-transform: translateY(0px) scale(0.7); - transform: translateY(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: translateY(-700px) scale(0.7); - transform: translateY(-700px) scale(0.7); - opacity: 0.7; - } -} -@keyframes backOutUp { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } - - 20% { - -webkit-transform: translateY(0px) scale(0.7); - transform: translateY(0px) scale(0.7); - opacity: 0.7; - } - - 100% { - -webkit-transform: translateY(-700px) scale(0.7); - transform: translateY(-700px) scale(0.7); - opacity: 0.7; - } -} -.animate__backOutUp { - -webkit-animation-name: backOutUp; - animation-name: backOutUp; -} -/* Bouncing entrances */ -@-webkit-keyframes bounceIn { - from, - 20%, - 40%, - 60%, - 80%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - } - - 0% { - opacity: 0; - -webkit-transform: scale3d(0.3, 0.3, 0.3); - transform: scale3d(0.3, 0.3, 0.3); - } - - 20% { - -webkit-transform: scale3d(1.1, 1.1, 1.1); - transform: scale3d(1.1, 1.1, 1.1); - } - - 40% { - -webkit-transform: scale3d(0.9, 0.9, 0.9); - transform: scale3d(0.9, 0.9, 0.9); - } - - 60% { - opacity: 1; - -webkit-transform: scale3d(1.03, 1.03, 1.03); - transform: scale3d(1.03, 1.03, 1.03); - } - - 80% { - -webkit-transform: scale3d(0.97, 0.97, 0.97); - transform: scale3d(0.97, 0.97, 0.97); - } - - to { - opacity: 1; - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } -} -@keyframes bounceIn { - from, - 20%, - 40%, - 60%, - 80%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - } - - 0% { - opacity: 0; - -webkit-transform: scale3d(0.3, 0.3, 0.3); - transform: scale3d(0.3, 0.3, 0.3); - } - - 20% { - -webkit-transform: scale3d(1.1, 1.1, 1.1); - transform: scale3d(1.1, 1.1, 1.1); - } - - 40% { - -webkit-transform: scale3d(0.9, 0.9, 0.9); - transform: scale3d(0.9, 0.9, 0.9); - } - - 60% { - opacity: 1; - -webkit-transform: scale3d(1.03, 1.03, 1.03); - transform: scale3d(1.03, 1.03, 1.03); - } - - 80% { - -webkit-transform: scale3d(0.97, 0.97, 0.97); - transform: scale3d(0.97, 0.97, 0.97); - } - - to { - opacity: 1; - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } -} -.animate__bounceIn { - -webkit-animation-duration: calc(1s * 0.75); - animation-duration: calc(1s * 0.75); - -webkit-animation-duration: calc(var(--animate-duration) * 0.75); - animation-duration: calc(var(--animate-duration) * 0.75); - -webkit-animation-name: bounceIn; - animation-name: bounceIn; -} -@-webkit-keyframes bounceInDown { - from, - 60%, - 75%, - 90%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - } - - 0% { - opacity: 0; - -webkit-transform: translate3d(0, -3000px, 0) scaleY(3); - transform: translate3d(0, -3000px, 0) scaleY(3); - } - - 60% { - opacity: 1; - -webkit-transform: translate3d(0, 25px, 0) scaleY(0.9); - transform: translate3d(0, 25px, 0) scaleY(0.9); - } - - 75% { - -webkit-transform: translate3d(0, -10px, 0) scaleY(0.95); - transform: translate3d(0, -10px, 0) scaleY(0.95); - } - - 90% { - -webkit-transform: translate3d(0, 5px, 0) scaleY(0.985); - transform: translate3d(0, 5px, 0) scaleY(0.985); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes bounceInDown { - from, - 60%, - 75%, - 90%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - } - - 0% { - opacity: 0; - -webkit-transform: translate3d(0, -3000px, 0) scaleY(3); - transform: translate3d(0, -3000px, 0) scaleY(3); - } - - 60% { - opacity: 1; - -webkit-transform: translate3d(0, 25px, 0) scaleY(0.9); - transform: translate3d(0, 25px, 0) scaleY(0.9); - } - - 75% { - -webkit-transform: translate3d(0, -10px, 0) scaleY(0.95); - transform: translate3d(0, -10px, 0) scaleY(0.95); - } - - 90% { - -webkit-transform: translate3d(0, 5px, 0) scaleY(0.985); - transform: translate3d(0, 5px, 0) scaleY(0.985); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__bounceInDown { - -webkit-animation-name: bounceInDown; - animation-name: bounceInDown; -} -@-webkit-keyframes bounceInLeft { - from, - 60%, - 75%, - 90%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - } - - 0% { - opacity: 0; - -webkit-transform: translate3d(-3000px, 0, 0) scaleX(3); - transform: translate3d(-3000px, 0, 0) scaleX(3); - } - - 60% { - opacity: 1; - -webkit-transform: translate3d(25px, 0, 0) scaleX(1); - transform: translate3d(25px, 0, 0) scaleX(1); - } - - 75% { - -webkit-transform: translate3d(-10px, 0, 0) scaleX(0.98); - transform: translate3d(-10px, 0, 0) scaleX(0.98); - } - - 90% { - -webkit-transform: translate3d(5px, 0, 0) scaleX(0.995); - transform: translate3d(5px, 0, 0) scaleX(0.995); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes bounceInLeft { - from, - 60%, - 75%, - 90%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - } - - 0% { - opacity: 0; - -webkit-transform: translate3d(-3000px, 0, 0) scaleX(3); - transform: translate3d(-3000px, 0, 0) scaleX(3); - } - - 60% { - opacity: 1; - -webkit-transform: translate3d(25px, 0, 0) scaleX(1); - transform: translate3d(25px, 0, 0) scaleX(1); - } - - 75% { - -webkit-transform: translate3d(-10px, 0, 0) scaleX(0.98); - transform: translate3d(-10px, 0, 0) scaleX(0.98); - } - - 90% { - -webkit-transform: translate3d(5px, 0, 0) scaleX(0.995); - transform: translate3d(5px, 0, 0) scaleX(0.995); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__bounceInLeft { - -webkit-animation-name: bounceInLeft; - animation-name: bounceInLeft; -} -@-webkit-keyframes bounceInRight { - from, - 60%, - 75%, - 90%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - } - - from { - opacity: 0; - -webkit-transform: translate3d(3000px, 0, 0) scaleX(3); - transform: translate3d(3000px, 0, 0) scaleX(3); - } - - 60% { - opacity: 1; - -webkit-transform: translate3d(-25px, 0, 0) scaleX(1); - transform: translate3d(-25px, 0, 0) scaleX(1); - } - - 75% { - -webkit-transform: translate3d(10px, 0, 0) scaleX(0.98); - transform: translate3d(10px, 0, 0) scaleX(0.98); - } - - 90% { - -webkit-transform: translate3d(-5px, 0, 0) scaleX(0.995); - transform: translate3d(-5px, 0, 0) scaleX(0.995); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes bounceInRight { - from, - 60%, - 75%, - 90%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - } - - from { - opacity: 0; - -webkit-transform: translate3d(3000px, 0, 0) scaleX(3); - transform: translate3d(3000px, 0, 0) scaleX(3); - } - - 60% { - opacity: 1; - -webkit-transform: translate3d(-25px, 0, 0) scaleX(1); - transform: translate3d(-25px, 0, 0) scaleX(1); - } - - 75% { - -webkit-transform: translate3d(10px, 0, 0) scaleX(0.98); - transform: translate3d(10px, 0, 0) scaleX(0.98); - } - - 90% { - -webkit-transform: translate3d(-5px, 0, 0) scaleX(0.995); - transform: translate3d(-5px, 0, 0) scaleX(0.995); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__bounceInRight { - -webkit-animation-name: bounceInRight; - animation-name: bounceInRight; -} -@-webkit-keyframes bounceInUp { - from, - 60%, - 75%, - 90%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - } - - from { - opacity: 0; - -webkit-transform: translate3d(0, 3000px, 0) scaleY(5); - transform: translate3d(0, 3000px, 0) scaleY(5); - } - - 60% { - opacity: 1; - -webkit-transform: translate3d(0, -20px, 0) scaleY(0.9); - transform: translate3d(0, -20px, 0) scaleY(0.9); - } - - 75% { - -webkit-transform: translate3d(0, 10px, 0) scaleY(0.95); - transform: translate3d(0, 10px, 0) scaleY(0.95); - } - - 90% { - -webkit-transform: translate3d(0, -5px, 0) scaleY(0.985); - transform: translate3d(0, -5px, 0) scaleY(0.985); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes bounceInUp { - from, - 60%, - 75%, - 90%, - to { - -webkit-animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); - } - - from { - opacity: 0; - -webkit-transform: translate3d(0, 3000px, 0) scaleY(5); - transform: translate3d(0, 3000px, 0) scaleY(5); - } - - 60% { - opacity: 1; - -webkit-transform: translate3d(0, -20px, 0) scaleY(0.9); - transform: translate3d(0, -20px, 0) scaleY(0.9); - } - - 75% { - -webkit-transform: translate3d(0, 10px, 0) scaleY(0.95); - transform: translate3d(0, 10px, 0) scaleY(0.95); - } - - 90% { - -webkit-transform: translate3d(0, -5px, 0) scaleY(0.985); - transform: translate3d(0, -5px, 0) scaleY(0.985); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__bounceInUp { - -webkit-animation-name: bounceInUp; - animation-name: bounceInUp; -} -/* Bouncing exits */ -@-webkit-keyframes bounceOut { - 20% { - -webkit-transform: scale3d(0.9, 0.9, 0.9); - transform: scale3d(0.9, 0.9, 0.9); - } - - 50%, - 55% { - opacity: 1; - -webkit-transform: scale3d(1.1, 1.1, 1.1); - transform: scale3d(1.1, 1.1, 1.1); - } - - to { - opacity: 0; - -webkit-transform: scale3d(0.3, 0.3, 0.3); - transform: scale3d(0.3, 0.3, 0.3); - } -} -@keyframes bounceOut { - 20% { - -webkit-transform: scale3d(0.9, 0.9, 0.9); - transform: scale3d(0.9, 0.9, 0.9); - } - - 50%, - 55% { - opacity: 1; - -webkit-transform: scale3d(1.1, 1.1, 1.1); - transform: scale3d(1.1, 1.1, 1.1); - } - - to { - opacity: 0; - -webkit-transform: scale3d(0.3, 0.3, 0.3); - transform: scale3d(0.3, 0.3, 0.3); - } -} -.animate__bounceOut { - -webkit-animation-duration: calc(1s * 0.75); - animation-duration: calc(1s * 0.75); - -webkit-animation-duration: calc(var(--animate-duration) * 0.75); - animation-duration: calc(var(--animate-duration) * 0.75); - -webkit-animation-name: bounceOut; - animation-name: bounceOut; -} -@-webkit-keyframes bounceOutDown { - 20% { - -webkit-transform: translate3d(0, 10px, 0) scaleY(0.985); - transform: translate3d(0, 10px, 0) scaleY(0.985); - } - - 40%, - 45% { - opacity: 1; - -webkit-transform: translate3d(0, -20px, 0) scaleY(0.9); - transform: translate3d(0, -20px, 0) scaleY(0.9); - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, 2000px, 0) scaleY(3); - transform: translate3d(0, 2000px, 0) scaleY(3); - } -} -@keyframes bounceOutDown { - 20% { - -webkit-transform: translate3d(0, 10px, 0) scaleY(0.985); - transform: translate3d(0, 10px, 0) scaleY(0.985); - } - - 40%, - 45% { - opacity: 1; - -webkit-transform: translate3d(0, -20px, 0) scaleY(0.9); - transform: translate3d(0, -20px, 0) scaleY(0.9); - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, 2000px, 0) scaleY(3); - transform: translate3d(0, 2000px, 0) scaleY(3); - } -} -.animate__bounceOutDown { - -webkit-animation-name: bounceOutDown; - animation-name: bounceOutDown; -} -@-webkit-keyframes bounceOutLeft { - 20% { - opacity: 1; - -webkit-transform: translate3d(20px, 0, 0) scaleX(0.9); - transform: translate3d(20px, 0, 0) scaleX(0.9); - } - - to { - opacity: 0; - -webkit-transform: translate3d(-2000px, 0, 0) scaleX(2); - transform: translate3d(-2000px, 0, 0) scaleX(2); - } -} -@keyframes bounceOutLeft { - 20% { - opacity: 1; - -webkit-transform: translate3d(20px, 0, 0) scaleX(0.9); - transform: translate3d(20px, 0, 0) scaleX(0.9); - } - - to { - opacity: 0; - -webkit-transform: translate3d(-2000px, 0, 0) scaleX(2); - transform: translate3d(-2000px, 0, 0) scaleX(2); - } -} -.animate__bounceOutLeft { - -webkit-animation-name: bounceOutLeft; - animation-name: bounceOutLeft; -} -@-webkit-keyframes bounceOutRight { - 20% { - opacity: 1; - -webkit-transform: translate3d(-20px, 0, 0) scaleX(0.9); - transform: translate3d(-20px, 0, 0) scaleX(0.9); - } - - to { - opacity: 0; - -webkit-transform: translate3d(2000px, 0, 0) scaleX(2); - transform: translate3d(2000px, 0, 0) scaleX(2); - } -} -@keyframes bounceOutRight { - 20% { - opacity: 1; - -webkit-transform: translate3d(-20px, 0, 0) scaleX(0.9); - transform: translate3d(-20px, 0, 0) scaleX(0.9); - } - - to { - opacity: 0; - -webkit-transform: translate3d(2000px, 0, 0) scaleX(2); - transform: translate3d(2000px, 0, 0) scaleX(2); - } -} -.animate__bounceOutRight { - -webkit-animation-name: bounceOutRight; - animation-name: bounceOutRight; -} -@-webkit-keyframes bounceOutUp { - 20% { - -webkit-transform: translate3d(0, -10px, 0) scaleY(0.985); - transform: translate3d(0, -10px, 0) scaleY(0.985); - } - - 40%, - 45% { - opacity: 1; - -webkit-transform: translate3d(0, 20px, 0) scaleY(0.9); - transform: translate3d(0, 20px, 0) scaleY(0.9); - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, -2000px, 0) scaleY(3); - transform: translate3d(0, -2000px, 0) scaleY(3); - } -} -@keyframes bounceOutUp { - 20% { - -webkit-transform: translate3d(0, -10px, 0) scaleY(0.985); - transform: translate3d(0, -10px, 0) scaleY(0.985); - } - - 40%, - 45% { - opacity: 1; - -webkit-transform: translate3d(0, 20px, 0) scaleY(0.9); - transform: translate3d(0, 20px, 0) scaleY(0.9); - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, -2000px, 0) scaleY(3); - transform: translate3d(0, -2000px, 0) scaleY(3); - } -} -.animate__bounceOutUp { - -webkit-animation-name: bounceOutUp; - animation-name: bounceOutUp; -} -/* Fading entrances */ -@-webkit-keyframes fadeIn { - from { - opacity: 0; - } - - to { - opacity: 1; - } -} -@keyframes fadeIn { - from { - opacity: 0; - } - - to { - opacity: 1; - } -} -.animate__fadeIn { - -webkit-animation-name: fadeIn; - animation-name: fadeIn; -} -@-webkit-keyframes fadeInDown { - from { - opacity: 0; - -webkit-transform: translateY(0, 300px, 0); - transform: translateY(0, 300px, 0); - } - - to { - opacity: 1; - -webkit-transform: translateY(0, 0, 0); - transform: translateY(0, 0, 0); - } -} -@keyframes fadeInDown { - from { - opacity: 0; - -webkit-transform: translateY(0, 300px, 0); - transform: translateY(0, 300px, 0); - } - - to { - opacity: 1; - -webkit-transform: translateY(0, 0, 0); - transform: translateY(0, 0, 0); - } -} -.animate__fadeInDown { - -webkit-animation-name: fadeInDown; - animation-name: fadeInDown; -} -@-webkit-keyframes fadeInDownBig { - from { - opacity: 0; - -webkit-transform: translate3d(0, -2000px, 0); - transform: translate3d(0, -2000px, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes fadeInDownBig { - from { - opacity: 0; - -webkit-transform: translate3d(0, -2000px, 0); - transform: translate3d(0, -2000px, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__fadeInDownBig { - -webkit-animation-name: fadeInDownBig; - animation-name: fadeInDownBig; -} -@-webkit-keyframes fadeInLeft { - from { - opacity: 0; - -webkit-transform: translate3d(-100%, 0, 0); - transform: translate3d(-100%, 0, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes fadeInLeft { - from { - opacity: 0; - -webkit-transform: translate3d(-100%, 0, 0); - transform: translate3d(-100%, 0, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__fadeInLeft { - -webkit-animation-name: fadeInLeft; - animation-name: fadeInLeft; -} -@-webkit-keyframes fadeInLeftBig { - from { - opacity: 0; - -webkit-transform: translate3d(-2000px, 0, 0); - transform: translate3d(-2000px, 0, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes fadeInLeftBig { - from { - opacity: 0; - -webkit-transform: translate3d(-2000px, 0, 0); - transform: translate3d(-2000px, 0, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__fadeInLeftBig { - -webkit-animation-name: fadeInLeftBig; - animation-name: fadeInLeftBig; -} -@-webkit-keyframes fadeInRight { - from { - opacity: 0; - -webkit-transform: translate3d(100%, 0, 0); - transform: translate3d(100%, 0, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes fadeInRight { - from { - opacity: 0; - -webkit-transform: translate3d(100%, 0, 0); - transform: translate3d(100%, 0, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__fadeInRight { - -webkit-animation-name: fadeInRight; - animation-name: fadeInRight; -} -@-webkit-keyframes fadeInRightBig { - from { - opacity: 0; - -webkit-transform: translate3d(2000px, 0, 0); - transform: translate3d(2000px, 0, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes fadeInRightBig { - from { - opacity: 0; - -webkit-transform: translate3d(2000px, 0, 0); - transform: translate3d(2000px, 0, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__fadeInRightBig { - -webkit-animation-name: fadeInRightBig; - animation-name: fadeInRightBig; -} -@-webkit-keyframes fadeInUp { - from { - opacity: 0; - -webkit-transform: translateY(150px); - transform: translateY(150px); - } - - to { - opacity: 1; - -webkit-transform: translateY(0); - transform: translateY(0); - } -} -@keyframes fadeInUp { - from { - opacity: 0; - -webkit-transform: translateY(150px); - transform: translateY(150px); - } - - to { - opacity: 1; - -webkit-transform: translateY(0); - transform: translateY(0); - } -} -.animate__fadeInUp { - -webkit-animation-name: fadeInUp; - animation-name: fadeInUp; -} -@-webkit-keyframes fadeInUpBig { - from { - opacity: 0; - -webkit-transform: translate3d(0, 2000px, 0); - transform: translate3d(0, 2000px, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes fadeInUpBig { - from { - opacity: 0; - -webkit-transform: translate3d(0, 2000px, 0); - transform: translate3d(0, 2000px, 0); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__fadeInUpBig { - -webkit-animation-name: fadeInUpBig; - animation-name: fadeInUpBig; -} -@-webkit-keyframes fadeInTopLeft { - from { - opacity: 0; - -webkit-transform: translate3d(-100%, -100%, 0); - transform: translate3d(-100%, -100%, 0); - } - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes fadeInTopLeft { - from { - opacity: 0; - -webkit-transform: translate3d(-100%, -100%, 0); - transform: translate3d(-100%, -100%, 0); - } - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__fadeInTopLeft { - -webkit-animation-name: fadeInTopLeft; - animation-name: fadeInTopLeft; -} -@-webkit-keyframes fadeInTopRight { - from { - opacity: 0; - -webkit-transform: translate3d(100%, -100%, 0); - transform: translate3d(100%, -100%, 0); - } - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes fadeInTopRight { - from { - opacity: 0; - -webkit-transform: translate3d(100%, -100%, 0); - transform: translate3d(100%, -100%, 0); - } - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__fadeInTopRight { - -webkit-animation-name: fadeInTopRight; - animation-name: fadeInTopRight; -} -@-webkit-keyframes fadeInBottomLeft { - from { - opacity: 0; - -webkit-transform: translate3d(-100%, 100%, 0); - transform: translate3d(-100%, 100%, 0); - } - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes fadeInBottomLeft { - from { - opacity: 0; - -webkit-transform: translate3d(-100%, 100%, 0); - transform: translate3d(-100%, 100%, 0); - } - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__fadeInBottomLeft { - -webkit-animation-name: fadeInBottomLeft; - animation-name: fadeInBottomLeft; -} -@-webkit-keyframes fadeInBottomRight { - from { - opacity: 0; - -webkit-transform: translate3d(100%, 100%, 0); - transform: translate3d(100%, 100%, 0); - } - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes fadeInBottomRight { - from { - opacity: 0; - -webkit-transform: translate3d(100%, 100%, 0); - transform: translate3d(100%, 100%, 0); - } - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__fadeInBottomRight { - -webkit-animation-name: fadeInBottomRight; - animation-name: fadeInBottomRight; -} -/* Fading exits */ -@-webkit-keyframes fadeOut { - from { - opacity: 1; - } - - to { - opacity: 0; - } -} -@keyframes fadeOut { - from { - opacity: 1; - } - - to { - opacity: 0; - } -} -.animate__fadeOut { - -webkit-animation-name: fadeOut; - animation-name: fadeOut; -} -@-webkit-keyframes fadeOutDown { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, 100%, 0); - transform: translate3d(0, 100%, 0); - } -} -@keyframes fadeOutDown { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, 100%, 0); - transform: translate3d(0, 100%, 0); - } -} -.animate__fadeOutDown { - -webkit-animation-name: fadeOutDown; - animation-name: fadeOutDown; -} -@-webkit-keyframes fadeOutDownBig { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, 2000px, 0); - transform: translate3d(0, 2000px, 0); - } -} -@keyframes fadeOutDownBig { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, 2000px, 0); - transform: translate3d(0, 2000px, 0); - } -} -.animate__fadeOutDownBig { - -webkit-animation-name: fadeOutDownBig; - animation-name: fadeOutDownBig; -} -@-webkit-keyframes fadeOutLeft { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(-100%, 0, 0); - transform: translate3d(-100%, 0, 0); - } -} -@keyframes fadeOutLeft { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(-100%, 0, 0); - transform: translate3d(-100%, 0, 0); - } -} -.animate__fadeOutLeft { - -webkit-animation-name: fadeOutLeft; - animation-name: fadeOutLeft; -} -@-webkit-keyframes fadeOutLeftBig { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(-2000px, 0, 0); - transform: translate3d(-2000px, 0, 0); - } -} -@keyframes fadeOutLeftBig { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(-2000px, 0, 0); - transform: translate3d(-2000px, 0, 0); - } -} -.animate__fadeOutLeftBig { - -webkit-animation-name: fadeOutLeftBig; - animation-name: fadeOutLeftBig; -} -@-webkit-keyframes fadeOutRight { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(100%, 0, 0); - transform: translate3d(100%, 0, 0); - } -} -@keyframes fadeOutRight { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(100%, 0, 0); - transform: translate3d(100%, 0, 0); - } -} -.animate__fadeOutRight { - -webkit-animation-name: fadeOutRight; - animation-name: fadeOutRight; -} -@-webkit-keyframes fadeOutRightBig { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(2000px, 0, 0); - transform: translate3d(2000px, 0, 0); - } -} -@keyframes fadeOutRightBig { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(2000px, 0, 0); - transform: translate3d(2000px, 0, 0); - } -} -.animate__fadeOutRightBig { - -webkit-animation-name: fadeOutRightBig; - animation-name: fadeOutRightBig; -} -@-webkit-keyframes fadeOutUp { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, -100%, 0); - transform: translate3d(0, -100%, 0); - } -} -@keyframes fadeOutUp { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, -100%, 0); - transform: translate3d(0, -100%, 0); - } -} -.animate__fadeOutUp { - -webkit-animation-name: fadeOutUp; - animation-name: fadeOutUp; -} -@-webkit-keyframes fadeOutUpBig { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, -2000px, 0); - transform: translate3d(0, -2000px, 0); - } -} -@keyframes fadeOutUpBig { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(0, -2000px, 0); - transform: translate3d(0, -2000px, 0); - } -} -.animate__fadeOutUpBig { - -webkit-animation-name: fadeOutUpBig; - animation-name: fadeOutUpBig; -} -@-webkit-keyframes fadeOutTopLeft { - from { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - to { - opacity: 0; - -webkit-transform: translate3d(-100%, -100%, 0); - transform: translate3d(-100%, -100%, 0); - } -} -@keyframes fadeOutTopLeft { - from { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - to { - opacity: 0; - -webkit-transform: translate3d(-100%, -100%, 0); - transform: translate3d(-100%, -100%, 0); - } -} -.animate__fadeOutTopLeft { - -webkit-animation-name: fadeOutTopLeft; - animation-name: fadeOutTopLeft; -} -@-webkit-keyframes fadeOutTopRight { - from { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - to { - opacity: 0; - -webkit-transform: translate3d(100%, -100%, 0); - transform: translate3d(100%, -100%, 0); - } -} -@keyframes fadeOutTopRight { - from { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - to { - opacity: 0; - -webkit-transform: translate3d(100%, -100%, 0); - transform: translate3d(100%, -100%, 0); - } -} -.animate__fadeOutTopRight { - -webkit-animation-name: fadeOutTopRight; - animation-name: fadeOutTopRight; -} -@-webkit-keyframes fadeOutBottomRight { - from { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - to { - opacity: 0; - -webkit-transform: translate3d(100%, 100%, 0); - transform: translate3d(100%, 100%, 0); - } -} -@keyframes fadeOutBottomRight { - from { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - to { - opacity: 0; - -webkit-transform: translate3d(100%, 100%, 0); - transform: translate3d(100%, 100%, 0); - } -} -.animate__fadeOutBottomRight { - -webkit-animation-name: fadeOutBottomRight; - animation-name: fadeOutBottomRight; -} -@-webkit-keyframes fadeOutBottomLeft { - from { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - to { - opacity: 0; - -webkit-transform: translate3d(-100%, 100%, 0); - transform: translate3d(-100%, 100%, 0); - } -} -@keyframes fadeOutBottomLeft { - from { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - to { - opacity: 0; - -webkit-transform: translate3d(-100%, 100%, 0); - transform: translate3d(-100%, 100%, 0); - } -} -.animate__fadeOutBottomLeft { - -webkit-animation-name: fadeOutBottomLeft; - animation-name: fadeOutBottomLeft; -} -/* Flippers */ -@-webkit-keyframes flip { - from { - -webkit-transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 0) rotate3d(0, 1, 0, -360deg); - transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 0) rotate3d(0, 1, 0, -360deg); - -webkit-animation-timing-function: ease-out; - animation-timing-function: ease-out; - } - - 40% { - -webkit-transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 150px) - rotate3d(0, 1, 0, -190deg); - transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 150px) - rotate3d(0, 1, 0, -190deg); - -webkit-animation-timing-function: ease-out; - animation-timing-function: ease-out; - } - - 50% { - -webkit-transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 150px) - rotate3d(0, 1, 0, -170deg); - transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 150px) - rotate3d(0, 1, 0, -170deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - } - - 80% { - -webkit-transform: perspective(400px) scale3d(0.95, 0.95, 0.95) translate3d(0, 0, 0) - rotate3d(0, 1, 0, 0deg); - transform: perspective(400px) scale3d(0.95, 0.95, 0.95) translate3d(0, 0, 0) - rotate3d(0, 1, 0, 0deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - } - - to { - -webkit-transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 0) rotate3d(0, 1, 0, 0deg); - transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 0) rotate3d(0, 1, 0, 0deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - } -} -@keyframes flip { - from { - -webkit-transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 0) rotate3d(0, 1, 0, -360deg); - transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 0) rotate3d(0, 1, 0, -360deg); - -webkit-animation-timing-function: ease-out; - animation-timing-function: ease-out; - } - - 40% { - -webkit-transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 150px) - rotate3d(0, 1, 0, -190deg); - transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 150px) - rotate3d(0, 1, 0, -190deg); - -webkit-animation-timing-function: ease-out; - animation-timing-function: ease-out; - } - - 50% { - -webkit-transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 150px) - rotate3d(0, 1, 0, -170deg); - transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 150px) - rotate3d(0, 1, 0, -170deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - } - - 80% { - -webkit-transform: perspective(400px) scale3d(0.95, 0.95, 0.95) translate3d(0, 0, 0) - rotate3d(0, 1, 0, 0deg); - transform: perspective(400px) scale3d(0.95, 0.95, 0.95) translate3d(0, 0, 0) - rotate3d(0, 1, 0, 0deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - } - - to { - -webkit-transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 0) rotate3d(0, 1, 0, 0deg); - transform: perspective(400px) scale3d(1, 1, 1) translate3d(0, 0, 0) rotate3d(0, 1, 0, 0deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - } -} -.animate__animated.animate__flip { - -webkit-backface-visibility: visible; - backface-visibility: visible; - -webkit-animation-name: flip; - animation-name: flip; -} -@-webkit-keyframes flipInX { - from { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, 90deg); - transform: perspective(400px) rotate3d(1, 0, 0, 90deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - opacity: 0; - } - - 40% { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, -20deg); - transform: perspective(400px) rotate3d(1, 0, 0, -20deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - } - - 60% { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, 10deg); - transform: perspective(400px) rotate3d(1, 0, 0, 10deg); - opacity: 1; - } - - 80% { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, -5deg); - transform: perspective(400px) rotate3d(1, 0, 0, -5deg); - } - - to { - -webkit-transform: perspective(400px); - transform: perspective(400px); - } -} -@keyframes flipInX { - from { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, 90deg); - transform: perspective(400px) rotate3d(1, 0, 0, 90deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - opacity: 0; - } - - 40% { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, -20deg); - transform: perspective(400px) rotate3d(1, 0, 0, -20deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - } - - 60% { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, 10deg); - transform: perspective(400px) rotate3d(1, 0, 0, 10deg); - opacity: 1; - } - - 80% { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, -5deg); - transform: perspective(400px) rotate3d(1, 0, 0, -5deg); - } - - to { - -webkit-transform: perspective(400px); - transform: perspective(400px); - } -} -.animate__flipInX { - -webkit-backface-visibility: visible !important; - backface-visibility: visible !important; - -webkit-animation-name: flipInX; - animation-name: flipInX; -} -@-webkit-keyframes flipInY { - from { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, 90deg); - transform: perspective(400px) rotate3d(0, 1, 0, 90deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - opacity: 0; - } - - 40% { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, -20deg); - transform: perspective(400px) rotate3d(0, 1, 0, -20deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - } - - 60% { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, 10deg); - transform: perspective(400px) rotate3d(0, 1, 0, 10deg); - opacity: 1; - } - - 80% { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, -5deg); - transform: perspective(400px) rotate3d(0, 1, 0, -5deg); - } - - to { - -webkit-transform: perspective(400px); - transform: perspective(400px); - } -} -@keyframes flipInY { - from { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, 90deg); - transform: perspective(400px) rotate3d(0, 1, 0, 90deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - opacity: 0; - } - - 40% { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, -20deg); - transform: perspective(400px) rotate3d(0, 1, 0, -20deg); - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; - } - - 60% { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, 10deg); - transform: perspective(400px) rotate3d(0, 1, 0, 10deg); - opacity: 1; - } - - 80% { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, -5deg); - transform: perspective(400px) rotate3d(0, 1, 0, -5deg); - } - - to { - -webkit-transform: perspective(400px); - transform: perspective(400px); - } -} -.animate__flipInY { - -webkit-backface-visibility: visible !important; - backface-visibility: visible !important; - -webkit-animation-name: flipInY; - animation-name: flipInY; -} -@-webkit-keyframes flipOutX { - from { - -webkit-transform: perspective(400px); - transform: perspective(400px); - } - - 30% { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, -20deg); - transform: perspective(400px) rotate3d(1, 0, 0, -20deg); - opacity: 1; - } - - to { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, 90deg); - transform: perspective(400px) rotate3d(1, 0, 0, 90deg); - opacity: 0; - } -} -@keyframes flipOutX { - from { - -webkit-transform: perspective(400px); - transform: perspective(400px); - } - - 30% { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, -20deg); - transform: perspective(400px) rotate3d(1, 0, 0, -20deg); - opacity: 1; - } - - to { - -webkit-transform: perspective(400px) rotate3d(1, 0, 0, 90deg); - transform: perspective(400px) rotate3d(1, 0, 0, 90deg); - opacity: 0; - } -} -.animate__flipOutX { - -webkit-animation-duration: calc(1s * 0.75); - animation-duration: calc(1s * 0.75); - -webkit-animation-duration: calc(var(--animate-duration) * 0.75); - animation-duration: calc(var(--animate-duration) * 0.75); - -webkit-animation-name: flipOutX; - animation-name: flipOutX; - -webkit-backface-visibility: visible !important; - backface-visibility: visible !important; -} -@-webkit-keyframes flipOutY { - from { - -webkit-transform: perspective(400px); - transform: perspective(400px); - } - - 30% { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, -15deg); - transform: perspective(400px) rotate3d(0, 1, 0, -15deg); - opacity: 1; - } - - to { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, 90deg); - transform: perspective(400px) rotate3d(0, 1, 0, 90deg); - opacity: 0; - } -} -@keyframes flipOutY { - from { - -webkit-transform: perspective(400px); - transform: perspective(400px); - } - - 30% { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, -15deg); - transform: perspective(400px) rotate3d(0, 1, 0, -15deg); - opacity: 1; - } - - to { - -webkit-transform: perspective(400px) rotate3d(0, 1, 0, 90deg); - transform: perspective(400px) rotate3d(0, 1, 0, 90deg); - opacity: 0; - } -} -.animate__flipOutY { - -webkit-animation-duration: calc(1s * 0.75); - animation-duration: calc(1s * 0.75); - -webkit-animation-duration: calc(var(--animate-duration) * 0.75); - animation-duration: calc(var(--animate-duration) * 0.75); - -webkit-backface-visibility: visible !important; - backface-visibility: visible !important; - -webkit-animation-name: flipOutY; - animation-name: flipOutY; -} -/* Lightspeed */ -@-webkit-keyframes lightSpeedInRight { - from { - -webkit-transform: translate3d(100%, 0, 0) skewX(-30deg); - transform: translate3d(100%, 0, 0) skewX(-30deg); - opacity: 0; - } - - 60% { - -webkit-transform: skewX(20deg); - transform: skewX(20deg); - opacity: 1; - } - - 80% { - -webkit-transform: skewX(-5deg); - transform: skewX(-5deg); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes lightSpeedInRight { - from { - -webkit-transform: translate3d(100%, 0, 0) skewX(-30deg); - transform: translate3d(100%, 0, 0) skewX(-30deg); - opacity: 0; - } - - 60% { - -webkit-transform: skewX(20deg); - transform: skewX(20deg); - opacity: 1; - } - - 80% { - -webkit-transform: skewX(-5deg); - transform: skewX(-5deg); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__lightSpeedInRight { - -webkit-animation-name: lightSpeedInRight; - animation-name: lightSpeedInRight; - -webkit-animation-timing-function: ease-out; - animation-timing-function: ease-out; -} -@-webkit-keyframes lightSpeedInLeft { - from { - -webkit-transform: translate3d(-100%, 0, 0) skewX(30deg); - transform: translate3d(-100%, 0, 0) skewX(30deg); - opacity: 0; - } - - 60% { - -webkit-transform: skewX(-20deg); - transform: skewX(-20deg); - opacity: 1; - } - - 80% { - -webkit-transform: skewX(5deg); - transform: skewX(5deg); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes lightSpeedInLeft { - from { - -webkit-transform: translate3d(-100%, 0, 0) skewX(30deg); - transform: translate3d(-100%, 0, 0) skewX(30deg); - opacity: 0; - } - - 60% { - -webkit-transform: skewX(-20deg); - transform: skewX(-20deg); - opacity: 1; - } - - 80% { - -webkit-transform: skewX(5deg); - transform: skewX(5deg); - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__lightSpeedInLeft { - -webkit-animation-name: lightSpeedInLeft; - animation-name: lightSpeedInLeft; - -webkit-animation-timing-function: ease-out; - animation-timing-function: ease-out; -} -@-webkit-keyframes lightSpeedOutRight { - from { - opacity: 1; - } - - to { - -webkit-transform: translate3d(100%, 0, 0) skewX(30deg); - transform: translate3d(100%, 0, 0) skewX(30deg); - opacity: 0; - } -} -@keyframes lightSpeedOutRight { - from { - opacity: 1; - } - - to { - -webkit-transform: translate3d(100%, 0, 0) skewX(30deg); - transform: translate3d(100%, 0, 0) skewX(30deg); - opacity: 0; - } -} -.animate__lightSpeedOutRight { - -webkit-animation-name: lightSpeedOutRight; - animation-name: lightSpeedOutRight; - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; -} -@-webkit-keyframes lightSpeedOutLeft { - from { - opacity: 1; - } - - to { - -webkit-transform: translate3d(-100%, 0, 0) skewX(-30deg); - transform: translate3d(-100%, 0, 0) skewX(-30deg); - opacity: 0; - } -} -@keyframes lightSpeedOutLeft { - from { - opacity: 1; - } - - to { - -webkit-transform: translate3d(-100%, 0, 0) skewX(-30deg); - transform: translate3d(-100%, 0, 0) skewX(-30deg); - opacity: 0; - } -} -.animate__lightSpeedOutLeft { - -webkit-animation-name: lightSpeedOutLeft; - animation-name: lightSpeedOutLeft; - -webkit-animation-timing-function: ease-in; - animation-timing-function: ease-in; -} -/* Rotating entrances */ -@-webkit-keyframes rotateIn { - from { - -webkit-transform: rotate3d(0, 0, 1, -200deg); - transform: rotate3d(0, 0, 1, -200deg); - opacity: 0; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - opacity: 1; - } -} -@keyframes rotateIn { - from { - -webkit-transform: rotate3d(0, 0, 1, -200deg); - transform: rotate3d(0, 0, 1, -200deg); - opacity: 0; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - opacity: 1; - } -} -.animate__rotateIn { - -webkit-animation-name: rotateIn; - animation-name: rotateIn; - -webkit-transform-origin: center; - transform-origin: center; -} -@-webkit-keyframes rotateInDownLeft { - from { - -webkit-transform: rotate3d(0, 0, 1, -45deg); - transform: rotate3d(0, 0, 1, -45deg); - opacity: 0; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - opacity: 1; - } -} -@keyframes rotateInDownLeft { - from { - -webkit-transform: rotate3d(0, 0, 1, -45deg); - transform: rotate3d(0, 0, 1, -45deg); - opacity: 0; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - opacity: 1; - } -} -.animate__rotateInDownLeft { - -webkit-animation-name: rotateInDownLeft; - animation-name: rotateInDownLeft; - -webkit-transform-origin: left bottom; - transform-origin: left bottom; -} -@-webkit-keyframes rotateInDownRight { - from { - -webkit-transform: rotate3d(0, 0, 1, 45deg); - transform: rotate3d(0, 0, 1, 45deg); - opacity: 0; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - opacity: 1; - } -} -@keyframes rotateInDownRight { - from { - -webkit-transform: rotate3d(0, 0, 1, 45deg); - transform: rotate3d(0, 0, 1, 45deg); - opacity: 0; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - opacity: 1; - } -} -.animate__rotateInDownRight { - -webkit-animation-name: rotateInDownRight; - animation-name: rotateInDownRight; - -webkit-transform-origin: right bottom; - transform-origin: right bottom; -} -@-webkit-keyframes rotateInUpLeft { - from { - -webkit-transform: rotate3d(0, 0, 1, 45deg); - transform: rotate3d(0, 0, 1, 45deg); - opacity: 0; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - opacity: 1; - } -} -@keyframes rotateInUpLeft { - from { - -webkit-transform: rotate3d(0, 0, 1, 45deg); - transform: rotate3d(0, 0, 1, 45deg); - opacity: 0; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - opacity: 1; - } -} -.animate__rotateInUpLeft { - -webkit-animation-name: rotateInUpLeft; - animation-name: rotateInUpLeft; - -webkit-transform-origin: left bottom; - transform-origin: left bottom; -} -@-webkit-keyframes rotateInUpRight { - from { - -webkit-transform: rotate3d(0, 0, 1, -90deg); - transform: rotate3d(0, 0, 1, -90deg); - opacity: 0; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - opacity: 1; - } -} -@keyframes rotateInUpRight { - from { - -webkit-transform: rotate3d(0, 0, 1, -90deg); - transform: rotate3d(0, 0, 1, -90deg); - opacity: 0; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - opacity: 1; - } -} -.animate__rotateInUpRight { - -webkit-animation-name: rotateInUpRight; - animation-name: rotateInUpRight; - -webkit-transform-origin: right bottom; - transform-origin: right bottom; -} -/* Rotating exits */ -@-webkit-keyframes rotateOut { - from { - opacity: 1; - } - - to { - -webkit-transform: rotate3d(0, 0, 1, 200deg); - transform: rotate3d(0, 0, 1, 200deg); - opacity: 0; - } -} -@keyframes rotateOut { - from { - opacity: 1; - } - - to { - -webkit-transform: rotate3d(0, 0, 1, 200deg); - transform: rotate3d(0, 0, 1, 200deg); - opacity: 0; - } -} -.animate__rotateOut { - -webkit-animation-name: rotateOut; - animation-name: rotateOut; - -webkit-transform-origin: center; - transform-origin: center; -} -@-webkit-keyframes rotateOutDownLeft { - from { - opacity: 1; - } - - to { - -webkit-transform: rotate3d(0, 0, 1, 45deg); - transform: rotate3d(0, 0, 1, 45deg); - opacity: 0; - } -} -@keyframes rotateOutDownLeft { - from { - opacity: 1; - } - - to { - -webkit-transform: rotate3d(0, 0, 1, 45deg); - transform: rotate3d(0, 0, 1, 45deg); - opacity: 0; - } -} -.animate__rotateOutDownLeft { - -webkit-animation-name: rotateOutDownLeft; - animation-name: rotateOutDownLeft; - -webkit-transform-origin: left bottom; - transform-origin: left bottom; -} -@-webkit-keyframes rotateOutDownRight { - from { - opacity: 1; - } - - to { - -webkit-transform: rotate3d(0, 0, 1, -45deg); - transform: rotate3d(0, 0, 1, -45deg); - opacity: 0; - } -} -@keyframes rotateOutDownRight { - from { - opacity: 1; - } - - to { - -webkit-transform: rotate3d(0, 0, 1, -45deg); - transform: rotate3d(0, 0, 1, -45deg); - opacity: 0; - } -} -.animate__rotateOutDownRight { - -webkit-animation-name: rotateOutDownRight; - animation-name: rotateOutDownRight; - -webkit-transform-origin: right bottom; - transform-origin: right bottom; -} -@-webkit-keyframes rotateOutUpLeft { - from { - opacity: 1; - } - - to { - -webkit-transform: rotate3d(0, 0, 1, -45deg); - transform: rotate3d(0, 0, 1, -45deg); - opacity: 0; - } -} -@keyframes rotateOutUpLeft { - from { - opacity: 1; - } - - to { - -webkit-transform: rotate3d(0, 0, 1, -45deg); - transform: rotate3d(0, 0, 1, -45deg); - opacity: 0; - } -} -.animate__rotateOutUpLeft { - -webkit-animation-name: rotateOutUpLeft; - animation-name: rotateOutUpLeft; - -webkit-transform-origin: left bottom; - transform-origin: left bottom; -} -@-webkit-keyframes rotateOutUpRight { - from { - opacity: 1; - } - - to { - -webkit-transform: rotate3d(0, 0, 1, 90deg); - transform: rotate3d(0, 0, 1, 90deg); - opacity: 0; - } -} -@keyframes rotateOutUpRight { - from { - opacity: 1; - } - - to { - -webkit-transform: rotate3d(0, 0, 1, 90deg); - transform: rotate3d(0, 0, 1, 90deg); - opacity: 0; - } -} -.animate__rotateOutUpRight { - -webkit-animation-name: rotateOutUpRight; - animation-name: rotateOutUpRight; - -webkit-transform-origin: right bottom; - transform-origin: right bottom; -} -/* Specials */ -@-webkit-keyframes hinge { - 0% { - -webkit-animation-timing-function: ease-in-out; - animation-timing-function: ease-in-out; - } - - 20%, - 60% { - -webkit-transform: rotate3d(0, 0, 1, 80deg); - transform: rotate3d(0, 0, 1, 80deg); - -webkit-animation-timing-function: ease-in-out; - animation-timing-function: ease-in-out; - } - - 40%, - 80% { - -webkit-transform: rotate3d(0, 0, 1, 60deg); - transform: rotate3d(0, 0, 1, 60deg); - -webkit-animation-timing-function: ease-in-out; - animation-timing-function: ease-in-out; - opacity: 1; - } - - to { - -webkit-transform: translate3d(0, 700px, 0); - transform: translate3d(0, 700px, 0); - opacity: 0; - } -} -@keyframes hinge { - 0% { - -webkit-animation-timing-function: ease-in-out; - animation-timing-function: ease-in-out; - } - - 20%, - 60% { - -webkit-transform: rotate3d(0, 0, 1, 80deg); - transform: rotate3d(0, 0, 1, 80deg); - -webkit-animation-timing-function: ease-in-out; - animation-timing-function: ease-in-out; - } - - 40%, - 80% { - -webkit-transform: rotate3d(0, 0, 1, 60deg); - transform: rotate3d(0, 0, 1, 60deg); - -webkit-animation-timing-function: ease-in-out; - animation-timing-function: ease-in-out; - opacity: 1; - } - - to { - -webkit-transform: translate3d(0, 700px, 0); - transform: translate3d(0, 700px, 0); - opacity: 0; - } -} -.animate__hinge { - -webkit-animation-duration: calc(1s * 2); - animation-duration: calc(1s * 2); - -webkit-animation-duration: calc(var(--animate-duration) * 2); - animation-duration: calc(var(--animate-duration) * 2); - -webkit-animation-name: hinge; - animation-name: hinge; - -webkit-transform-origin: top left; - transform-origin: top left; -} -@-webkit-keyframes jackInTheBox { - from { - opacity: 0; - -webkit-transform: scale(0.1) rotate(30deg); - transform: scale(0.1) rotate(30deg); - -webkit-transform-origin: center bottom; - transform-origin: center bottom; - } - - 50% { - -webkit-transform: rotate(-10deg); - transform: rotate(-10deg); - } - - 70% { - -webkit-transform: rotate(3deg); - transform: rotate(3deg); - } - - to { - opacity: 1; - -webkit-transform: scale(1); - transform: scale(1); - } -} -@keyframes jackInTheBox { - from { - opacity: 0; - -webkit-transform: scale(0.1) rotate(30deg); - transform: scale(0.1) rotate(30deg); - -webkit-transform-origin: center bottom; - transform-origin: center bottom; - } - - 50% { - -webkit-transform: rotate(-10deg); - transform: rotate(-10deg); - } - - 70% { - -webkit-transform: rotate(3deg); - transform: rotate(3deg); - } - - to { - opacity: 1; - -webkit-transform: scale(1); - transform: scale(1); - } -} -.animate__jackInTheBox { - -webkit-animation-name: jackInTheBox; - animation-name: jackInTheBox; -} -/* originally authored by Nick Pettit - https://github.com/nickpettit/glide */ -@-webkit-keyframes rollIn { - from { - opacity: 0; - -webkit-transform: translate3d(-100%, 0, 0) rotate3d(0, 0, 1, -120deg); - transform: translate3d(-100%, 0, 0) rotate3d(0, 0, 1, -120deg); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes rollIn { - from { - opacity: 0; - -webkit-transform: translate3d(-100%, 0, 0) rotate3d(0, 0, 1, -120deg); - transform: translate3d(-100%, 0, 0) rotate3d(0, 0, 1, -120deg); - } - - to { - opacity: 1; - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__rollIn { - -webkit-animation-name: rollIn; - animation-name: rollIn; -} -/* originally authored by Nick Pettit - https://github.com/nickpettit/glide */ -@-webkit-keyframes rollOut { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(100%, 0, 0) rotate3d(0, 0, 1, 120deg); - transform: translate3d(100%, 0, 0) rotate3d(0, 0, 1, 120deg); - } -} -@keyframes rollOut { - from { - opacity: 1; - } - - to { - opacity: 0; - -webkit-transform: translate3d(100%, 0, 0) rotate3d(0, 0, 1, 120deg); - transform: translate3d(100%, 0, 0) rotate3d(0, 0, 1, 120deg); - } -} -.animate__rollOut { - -webkit-animation-name: rollOut; - animation-name: rollOut; -} -/* Zooming entrances */ -@-webkit-keyframes zoomIn { - from { - opacity: 0; - -webkit-transform: scale3d(0.3, 0.3, 0.3); - transform: scale3d(0.3, 0.3, 0.3); - } - - 50% { - opacity: 1; - } -} -@keyframes zoomIn { - from { - opacity: 0; - -webkit-transform: scale3d(0.3, 0.3, 0.3); - transform: scale3d(0.3, 0.3, 0.3); - } - - 50% { - opacity: 1; - } -} -.animate__zoomIn { - -webkit-animation-name: zoomIn; - animation-name: zoomIn; -} -@-webkit-keyframes zoomInDown { - from { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(0, -1000px, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(0, -1000px, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - 60% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(0, 60px, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(0, 60px, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -@keyframes zoomInDown { - from { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(0, -1000px, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(0, -1000px, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - 60% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(0, 60px, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(0, 60px, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -.animate__zoomInDown { - -webkit-animation-name: zoomInDown; - animation-name: zoomInDown; -} -@-webkit-keyframes zoomInLeft { - from { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(-1000px, 0, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(-1000px, 0, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - 60% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(10px, 0, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(10px, 0, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -@keyframes zoomInLeft { - from { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(-1000px, 0, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(-1000px, 0, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - 60% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(10px, 0, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(10px, 0, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -.animate__zoomInLeft { - -webkit-animation-name: zoomInLeft; - animation-name: zoomInLeft; -} -@-webkit-keyframes zoomInRight { - from { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(1000px, 0, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(1000px, 0, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - 60% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(-10px, 0, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(-10px, 0, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -@keyframes zoomInRight { - from { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(1000px, 0, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(1000px, 0, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - 60% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(-10px, 0, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(-10px, 0, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -.animate__zoomInRight { - -webkit-animation-name: zoomInRight; - animation-name: zoomInRight; -} -@-webkit-keyframes zoomInUp { - from { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(0, 1000px, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(0, 1000px, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - 60% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(0, -60px, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(0, -60px, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -@keyframes zoomInUp { - from { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(0, 1000px, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(0, 1000px, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - 60% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(0, -60px, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(0, -60px, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -.animate__zoomInUp { - -webkit-animation-name: zoomInUp; - animation-name: zoomInUp; -} -/* Zooming exits */ -@-webkit-keyframes zoomOut { - from { - opacity: 1; - } - - 50% { - opacity: 0; - -webkit-transform: scale3d(0.3, 0.3, 0.3); - transform: scale3d(0.3, 0.3, 0.3); - } - - to { - opacity: 0; - } -} -@keyframes zoomOut { - from { - opacity: 1; - } - - 50% { - opacity: 0; - -webkit-transform: scale3d(0.3, 0.3, 0.3); - transform: scale3d(0.3, 0.3, 0.3); - } - - to { - opacity: 0; - } -} -.animate__zoomOut { - -webkit-animation-name: zoomOut; - animation-name: zoomOut; -} -@-webkit-keyframes zoomOutDown { - 40% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(0, -60px, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(0, -60px, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - to { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(0, 2000px, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(0, 2000px, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -@keyframes zoomOutDown { - 40% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(0, -60px, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(0, -60px, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - to { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(0, 2000px, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(0, 2000px, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -.animate__zoomOutDown { - -webkit-animation-name: zoomOutDown; - animation-name: zoomOutDown; - -webkit-transform-origin: center bottom; - transform-origin: center bottom; -} -@-webkit-keyframes zoomOutLeft { - 40% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(42px, 0, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(42px, 0, 0); - } - - to { - opacity: 0; - -webkit-transform: scale(0.1) translate3d(-2000px, 0, 0); - transform: scale(0.1) translate3d(-2000px, 0, 0); - } -} -@keyframes zoomOutLeft { - 40% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(42px, 0, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(42px, 0, 0); - } - - to { - opacity: 0; - -webkit-transform: scale(0.1) translate3d(-2000px, 0, 0); - transform: scale(0.1) translate3d(-2000px, 0, 0); - } -} -.animate__zoomOutLeft { - -webkit-animation-name: zoomOutLeft; - animation-name: zoomOutLeft; - -webkit-transform-origin: left center; - transform-origin: left center; -} -@-webkit-keyframes zoomOutRight { - 40% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(-42px, 0, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(-42px, 0, 0); - } - - to { - opacity: 0; - -webkit-transform: scale(0.1) translate3d(2000px, 0, 0); - transform: scale(0.1) translate3d(2000px, 0, 0); - } -} -@keyframes zoomOutRight { - 40% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(-42px, 0, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(-42px, 0, 0); - } - - to { - opacity: 0; - -webkit-transform: scale(0.1) translate3d(2000px, 0, 0); - transform: scale(0.1) translate3d(2000px, 0, 0); - } -} -.animate__zoomOutRight { - -webkit-animation-name: zoomOutRight; - animation-name: zoomOutRight; - -webkit-transform-origin: right center; - transform-origin: right center; -} -@-webkit-keyframes zoomOutUp { - 40% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(0, 60px, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(0, 60px, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - to { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(0, -2000px, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(0, -2000px, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -@keyframes zoomOutUp { - 40% { - opacity: 1; - -webkit-transform: scale3d(0.475, 0.475, 0.475) translate3d(0, 60px, 0); - transform: scale3d(0.475, 0.475, 0.475) translate3d(0, 60px, 0); - -webkit-animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - animation-timing-function: cubic-bezier(0.55, 0.055, 0.675, 0.19); - } - - to { - opacity: 0; - -webkit-transform: scale3d(0.1, 0.1, 0.1) translate3d(0, -2000px, 0); - transform: scale3d(0.1, 0.1, 0.1) translate3d(0, -2000px, 0); - -webkit-animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - animation-timing-function: cubic-bezier(0.175, 0.885, 0.32, 1); - } -} -.animate__zoomOutUp { - -webkit-animation-name: zoomOutUp; - animation-name: zoomOutUp; - -webkit-transform-origin: center bottom; - transform-origin: center bottom; -} -/* Sliding entrances */ -@-webkit-keyframes slideInDown { - from { - -webkit-transform: translate3d(0, -100%, 0); - transform: translate3d(0, -100%, 0); - visibility: visible; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes slideInDown { - from { - -webkit-transform: translate3d(0, -100%, 0); - transform: translate3d(0, -100%, 0); - visibility: visible; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__slideInDown { - -webkit-animation-name: slideInDown; - animation-name: slideInDown; -} -@-webkit-keyframes slideInLeft { - from { - -webkit-transform: translate3d(-100%, 0, 0); - transform: translate3d(-100%, 0, 0); - visibility: visible; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes slideInLeft { - from { - -webkit-transform: translate3d(-100%, 0, 0); - transform: translate3d(-100%, 0, 0); - visibility: visible; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__slideInLeft { - -webkit-animation-name: slideInLeft; - animation-name: slideInLeft; -} -@-webkit-keyframes slideInRight { - from { - -webkit-transform: translate3d(100%, 0, 0); - transform: translate3d(100%, 0, 0); - visibility: visible; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes slideInRight { - from { - -webkit-transform: translate3d(100%, 0, 0); - transform: translate3d(100%, 0, 0); - visibility: visible; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__slideInRight { - -webkit-animation-name: slideInRight; - animation-name: slideInRight; -} -@-webkit-keyframes slideInUp { - from { - -webkit-transform: translate3d(0, 100%, 0); - transform: translate3d(0, 100%, 0); - visibility: visible; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -@keyframes slideInUp { - from { - -webkit-transform: translate3d(0, 100%, 0); - transform: translate3d(0, 100%, 0); - visibility: visible; - } - - to { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } -} -.animate__slideInUp { - -webkit-animation-name: slideInUp; - animation-name: slideInUp; -} -/* Sliding exits */ -@-webkit-keyframes slideOutDown { - from { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - to { - visibility: hidden; - -webkit-transform: translate3d(0, 100%, 0); - transform: translate3d(0, 100%, 0); - } -} -@keyframes slideOutDown { - from { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - to { - visibility: hidden; - -webkit-transform: translate3d(0, 100%, 0); - transform: translate3d(0, 100%, 0); - } -} -.animate__slideOutDown { - -webkit-animation-name: slideOutDown; - animation-name: slideOutDown; -} -@-webkit-keyframes slideOutLeft { - from { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - to { - visibility: hidden; - -webkit-transform: translate3d(-100%, 0, 0); - transform: translate3d(-100%, 0, 0); - } -} -@keyframes slideOutLeft { - from { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - to { - visibility: hidden; - -webkit-transform: translate3d(-100%, 0, 0); - transform: translate3d(-100%, 0, 0); - } -} -.animate__slideOutLeft { - -webkit-animation-name: slideOutLeft; - animation-name: slideOutLeft; -} -@-webkit-keyframes slideOutRight { - from { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - to { - visibility: hidden; - -webkit-transform: translate3d(100%, 0, 0); - transform: translate3d(100%, 0, 0); - } -} -@keyframes slideOutRight { - from { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - to { - visibility: hidden; - -webkit-transform: translate3d(100%, 0, 0); - transform: translate3d(100%, 0, 0); - } -} -.animate__slideOutRight { - -webkit-animation-name: slideOutRight; - animation-name: slideOutRight; -} -@-webkit-keyframes slideOutUp { - from { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - to { - visibility: hidden; - -webkit-transform: translate3d(0, -100%, 0); - transform: translate3d(0, -100%, 0); - } -} -@keyframes slideOutUp { - from { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - } - - to { - visibility: hidden; - -webkit-transform: translate3d(0, -100%, 0); - transform: translate3d(0, -100%, 0); - } -} -.animate__slideOutUp { - -webkit-animation-name: slideOutUp; - animation-name: slideOutUp; -} diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/css/bootstrap-grid.min.css b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/css/bootstrap-grid.min.css deleted file mode 100644 index 762fc008..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/css/bootstrap-grid.min.css +++ /dev/null @@ -1,6 +0,0 @@ -/*! - * Bootstrap Grid v5.0.1 (https://getbootstrap.com/) - * Copyright 2011-2021 The Bootstrap Authors - * Copyright 2011-2021 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) - */.container,.container-fluid,.container-lg,.container-md,.container-sm,.container-xl,.container-xxl{width:100%;padding-right:var(--bs-gutter-x,.75rem);padding-left:var(--bs-gutter-x,.75rem);margin-right:auto;margin-left:auto}@media (min-width:576px){.container,.container-sm{max-width:540px}}@media (min-width:768px){.container,.container-md,.container-sm{max-width:720px}}@media (min-width:992px){.container,.container-lg,.container-md,.container-sm{max-width:960px}}@media (min-width:1200px){.container,.container-lg,.container-md,.container-sm,.container-xl{max-width:1140px}}@media (min-width:1400px){.container,.container-lg,.container-md,.container-sm,.container-xl,.container-xxl{max-width:1320px}}.row{--bs-gutter-x:1.5rem;--bs-gutter-y:0;display:flex;flex-wrap:wrap;margin-top:calc(var(--bs-gutter-y) * -1);margin-right:calc(var(--bs-gutter-x)/ -2);margin-left:calc(var(--bs-gutter-x)/ -2)}.row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--bs-gutter-x)/ 2);padding-left:calc(var(--bs-gutter-x)/ 2);margin-top:var(--bs-gutter-y)}.col{flex:1 0 0%}.row-cols-auto>*{flex:0 0 auto;width:auto}.row-cols-1>*{flex:0 0 auto;width:100%}.row-cols-2>*{flex:0 0 auto;width:50%}.row-cols-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-4>*{flex:0 0 auto;width:25%}.row-cols-5>*{flex:0 0 auto;width:20%}.row-cols-6>*{flex:0 0 auto;width:16.6666666667%}.col-auto{flex:0 0 auto;width:auto}.col-1{flex:0 0 auto;width:8.3333333333%}.col-2{flex:0 0 auto;width:16.6666666667%}.col-3{flex:0 0 auto;width:25%}.col-4{flex:0 0 auto;width:33.3333333333%}.col-5{flex:0 0 auto;width:41.6666666667%}.col-6{flex:0 0 auto;width:50%}.col-7{flex:0 0 auto;width:58.3333333333%}.col-8{flex:0 0 auto;width:66.6666666667%}.col-9{flex:0 0 auto;width:75%}.col-10{flex:0 0 auto;width:83.3333333333%}.col-11{flex:0 0 auto;width:91.6666666667%}.col-12{flex:0 0 auto;width:100%}.offset-1{margin-left:8.3333333333%}.offset-2{margin-left:16.6666666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.3333333333%}.offset-5{margin-left:41.6666666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.3333333333%}.offset-8{margin-left:66.6666666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.3333333333%}.offset-11{margin-left:91.6666666667%}.g-0,.gx-0{--bs-gutter-x:0}.g-0,.gy-0{--bs-gutter-y:0}.g-1,.gx-1{--bs-gutter-x:0.25rem}.g-1,.gy-1{--bs-gutter-y:0.25rem}.g-2,.gx-2{--bs-gutter-x:0.5rem}.g-2,.gy-2{--bs-gutter-y:0.5rem}.g-3,.gx-3{--bs-gutter-x:1rem}.g-3,.gy-3{--bs-gutter-y:1rem}.g-4,.gx-4{--bs-gutter-x:1.5rem}.g-4,.gy-4{--bs-gutter-y:1.5rem}.g-5,.gx-5{--bs-gutter-x:3rem}.g-5,.gy-5{--bs-gutter-y:3rem}@media (min-width:576px){.col-sm{flex:1 0 0%}.row-cols-sm-auto>*{flex:0 0 auto;width:auto}.row-cols-sm-1>*{flex:0 0 auto;width:100%}.row-cols-sm-2>*{flex:0 0 auto;width:50%}.row-cols-sm-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-sm-4>*{flex:0 0 auto;width:25%}.row-cols-sm-5>*{flex:0 0 auto;width:20%}.row-cols-sm-6>*{flex:0 0 auto;width:16.6666666667%}.col-sm-auto{flex:0 0 auto;width:auto}.col-sm-1{flex:0 0 auto;width:8.3333333333%}.col-sm-2{flex:0 0 auto;width:16.6666666667%}.col-sm-3{flex:0 0 auto;width:25%}.col-sm-4{flex:0 0 auto;width:33.3333333333%}.col-sm-5{flex:0 0 auto;width:41.6666666667%}.col-sm-6{flex:0 0 auto;width:50%}.col-sm-7{flex:0 0 auto;width:58.3333333333%}.col-sm-8{flex:0 0 auto;width:66.6666666667%}.col-sm-9{flex:0 0 auto;width:75%}.col-sm-10{flex:0 0 auto;width:83.3333333333%}.col-sm-11{flex:0 0 auto;width:91.6666666667%}.col-sm-12{flex:0 0 auto;width:100%}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.3333333333%}.offset-sm-2{margin-left:16.6666666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.3333333333%}.offset-sm-5{margin-left:41.6666666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.3333333333%}.offset-sm-8{margin-left:66.6666666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.3333333333%}.offset-sm-11{margin-left:91.6666666667%}.g-sm-0,.gx-sm-0{--bs-gutter-x:0}.g-sm-0,.gy-sm-0{--bs-gutter-y:0}.g-sm-1,.gx-sm-1{--bs-gutter-x:0.25rem}.g-sm-1,.gy-sm-1{--bs-gutter-y:0.25rem}.g-sm-2,.gx-sm-2{--bs-gutter-x:0.5rem}.g-sm-2,.gy-sm-2{--bs-gutter-y:0.5rem}.g-sm-3,.gx-sm-3{--bs-gutter-x:1rem}.g-sm-3,.gy-sm-3{--bs-gutter-y:1rem}.g-sm-4,.gx-sm-4{--bs-gutter-x:1.5rem}.g-sm-4,.gy-sm-4{--bs-gutter-y:1.5rem}.g-sm-5,.gx-sm-5{--bs-gutter-x:3rem}.g-sm-5,.gy-sm-5{--bs-gutter-y:3rem}}@media (min-width:768px){.col-md{flex:1 0 0%}.row-cols-md-auto>*{flex:0 0 auto;width:auto}.row-cols-md-1>*{flex:0 0 auto;width:100%}.row-cols-md-2>*{flex:0 0 auto;width:50%}.row-cols-md-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-md-4>*{flex:0 0 auto;width:25%}.row-cols-md-5>*{flex:0 0 auto;width:20%}.row-cols-md-6>*{flex:0 0 auto;width:16.6666666667%}.col-md-auto{flex:0 0 auto;width:auto}.col-md-1{flex:0 0 auto;width:8.3333333333%}.col-md-2{flex:0 0 auto;width:16.6666666667%}.col-md-3{flex:0 0 auto;width:25%}.col-md-4{flex:0 0 auto;width:33.3333333333%}.col-md-5{flex:0 0 auto;width:41.6666666667%}.col-md-6{flex:0 0 auto;width:50%}.col-md-7{flex:0 0 auto;width:58.3333333333%}.col-md-8{flex:0 0 auto;width:66.6666666667%}.col-md-9{flex:0 0 auto;width:75%}.col-md-10{flex:0 0 auto;width:83.3333333333%}.col-md-11{flex:0 0 auto;width:91.6666666667%}.col-md-12{flex:0 0 auto;width:100%}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.3333333333%}.offset-md-2{margin-left:16.6666666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.3333333333%}.offset-md-5{margin-left:41.6666666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.3333333333%}.offset-md-8{margin-left:66.6666666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.3333333333%}.offset-md-11{margin-left:91.6666666667%}.g-md-0,.gx-md-0{--bs-gutter-x:0}.g-md-0,.gy-md-0{--bs-gutter-y:0}.g-md-1,.gx-md-1{--bs-gutter-x:0.25rem}.g-md-1,.gy-md-1{--bs-gutter-y:0.25rem}.g-md-2,.gx-md-2{--bs-gutter-x:0.5rem}.g-md-2,.gy-md-2{--bs-gutter-y:0.5rem}.g-md-3,.gx-md-3{--bs-gutter-x:1rem}.g-md-3,.gy-md-3{--bs-gutter-y:1rem}.g-md-4,.gx-md-4{--bs-gutter-x:1.5rem}.g-md-4,.gy-md-4{--bs-gutter-y:1.5rem}.g-md-5,.gx-md-5{--bs-gutter-x:3rem}.g-md-5,.gy-md-5{--bs-gutter-y:3rem}}@media (min-width:992px){.col-lg{flex:1 0 0%}.row-cols-lg-auto>*{flex:0 0 auto;width:auto}.row-cols-lg-1>*{flex:0 0 auto;width:100%}.row-cols-lg-2>*{flex:0 0 auto;width:50%}.row-cols-lg-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-lg-4>*{flex:0 0 auto;width:25%}.row-cols-lg-5>*{flex:0 0 auto;width:20%}.row-cols-lg-6>*{flex:0 0 auto;width:16.6666666667%}.col-lg-auto{flex:0 0 auto;width:auto}.col-lg-1{flex:0 0 auto;width:8.3333333333%}.col-lg-2{flex:0 0 auto;width:16.6666666667%}.col-lg-3{flex:0 0 auto;width:25%}.col-lg-4{flex:0 0 auto;width:33.3333333333%}.col-lg-5{flex:0 0 auto;width:41.6666666667%}.col-lg-6{flex:0 0 auto;width:50%}.col-lg-7{flex:0 0 auto;width:58.3333333333%}.col-lg-8{flex:0 0 auto;width:66.6666666667%}.col-lg-9{flex:0 0 auto;width:75%}.col-lg-10{flex:0 0 auto;width:83.3333333333%}.col-lg-11{flex:0 0 auto;width:91.6666666667%}.col-lg-12{flex:0 0 auto;width:100%}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.3333333333%}.offset-lg-2{margin-left:16.6666666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.3333333333%}.offset-lg-5{margin-left:41.6666666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.3333333333%}.offset-lg-8{margin-left:66.6666666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.3333333333%}.offset-lg-11{margin-left:91.6666666667%}.g-lg-0,.gx-lg-0{--bs-gutter-x:0}.g-lg-0,.gy-lg-0{--bs-gutter-y:0}.g-lg-1,.gx-lg-1{--bs-gutter-x:0.25rem}.g-lg-1,.gy-lg-1{--bs-gutter-y:0.25rem}.g-lg-2,.gx-lg-2{--bs-gutter-x:0.5rem}.g-lg-2,.gy-lg-2{--bs-gutter-y:0.5rem}.g-lg-3,.gx-lg-3{--bs-gutter-x:1rem}.g-lg-3,.gy-lg-3{--bs-gutter-y:1rem}.g-lg-4,.gx-lg-4{--bs-gutter-x:1.5rem}.g-lg-4,.gy-lg-4{--bs-gutter-y:1.5rem}.g-lg-5,.gx-lg-5{--bs-gutter-x:3rem}.g-lg-5,.gy-lg-5{--bs-gutter-y:3rem}}@media (min-width:1200px){.col-xl{flex:1 0 0%}.row-cols-xl-auto>*{flex:0 0 auto;width:auto}.row-cols-xl-1>*{flex:0 0 auto;width:100%}.row-cols-xl-2>*{flex:0 0 auto;width:50%}.row-cols-xl-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-xl-4>*{flex:0 0 auto;width:25%}.row-cols-xl-5>*{flex:0 0 auto;width:20%}.row-cols-xl-6>*{flex:0 0 auto;width:16.6666666667%}.col-xl-auto{flex:0 0 auto;width:auto}.col-xl-1{flex:0 0 auto;width:8.3333333333%}.col-xl-2{flex:0 0 auto;width:16.6666666667%}.col-xl-3{flex:0 0 auto;width:25%}.col-xl-4{flex:0 0 auto;width:33.3333333333%}.col-xl-5{flex:0 0 auto;width:41.6666666667%}.col-xl-6{flex:0 0 auto;width:50%}.col-xl-7{flex:0 0 auto;width:58.3333333333%}.col-xl-8{flex:0 0 auto;width:66.6666666667%}.col-xl-9{flex:0 0 auto;width:75%}.col-xl-10{flex:0 0 auto;width:83.3333333333%}.col-xl-11{flex:0 0 auto;width:91.6666666667%}.col-xl-12{flex:0 0 auto;width:100%}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.3333333333%}.offset-xl-2{margin-left:16.6666666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.3333333333%}.offset-xl-5{margin-left:41.6666666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.3333333333%}.offset-xl-8{margin-left:66.6666666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.3333333333%}.offset-xl-11{margin-left:91.6666666667%}.g-xl-0,.gx-xl-0{--bs-gutter-x:0}.g-xl-0,.gy-xl-0{--bs-gutter-y:0}.g-xl-1,.gx-xl-1{--bs-gutter-x:0.25rem}.g-xl-1,.gy-xl-1{--bs-gutter-y:0.25rem}.g-xl-2,.gx-xl-2{--bs-gutter-x:0.5rem}.g-xl-2,.gy-xl-2{--bs-gutter-y:0.5rem}.g-xl-3,.gx-xl-3{--bs-gutter-x:1rem}.g-xl-3,.gy-xl-3{--bs-gutter-y:1rem}.g-xl-4,.gx-xl-4{--bs-gutter-x:1.5rem}.g-xl-4,.gy-xl-4{--bs-gutter-y:1.5rem}.g-xl-5,.gx-xl-5{--bs-gutter-x:3rem}.g-xl-5,.gy-xl-5{--bs-gutter-y:3rem}}@media (min-width:1400px){.col-xxl{flex:1 0 0%}.row-cols-xxl-auto>*{flex:0 0 auto;width:auto}.row-cols-xxl-1>*{flex:0 0 auto;width:100%}.row-cols-xxl-2>*{flex:0 0 auto;width:50%}.row-cols-xxl-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-xxl-4>*{flex:0 0 auto;width:25%}.row-cols-xxl-5>*{flex:0 0 auto;width:20%}.row-cols-xxl-6>*{flex:0 0 auto;width:16.6666666667%}.col-xxl-auto{flex:0 0 auto;width:auto}.col-xxl-1{flex:0 0 auto;width:8.3333333333%}.col-xxl-2{flex:0 0 auto;width:16.6666666667%}.col-xxl-3{flex:0 0 auto;width:25%}.col-xxl-4{flex:0 0 auto;width:33.3333333333%}.col-xxl-5{flex:0 0 auto;width:41.6666666667%}.col-xxl-6{flex:0 0 auto;width:50%}.col-xxl-7{flex:0 0 auto;width:58.3333333333%}.col-xxl-8{flex:0 0 auto;width:66.6666666667%}.col-xxl-9{flex:0 0 auto;width:75%}.col-xxl-10{flex:0 0 auto;width:83.3333333333%}.col-xxl-11{flex:0 0 auto;width:91.6666666667%}.col-xxl-12{flex:0 0 auto;width:100%}.offset-xxl-0{margin-left:0}.offset-xxl-1{margin-left:8.3333333333%}.offset-xxl-2{margin-left:16.6666666667%}.offset-xxl-3{margin-left:25%}.offset-xxl-4{margin-left:33.3333333333%}.offset-xxl-5{margin-left:41.6666666667%}.offset-xxl-6{margin-left:50%}.offset-xxl-7{margin-left:58.3333333333%}.offset-xxl-8{margin-left:66.6666666667%}.offset-xxl-9{margin-left:75%}.offset-xxl-10{margin-left:83.3333333333%}.offset-xxl-11{margin-left:91.6666666667%}.g-xxl-0,.gx-xxl-0{--bs-gutter-x:0}.g-xxl-0,.gy-xxl-0{--bs-gutter-y:0}.g-xxl-1,.gx-xxl-1{--bs-gutter-x:0.25rem}.g-xxl-1,.gy-xxl-1{--bs-gutter-y:0.25rem}.g-xxl-2,.gx-xxl-2{--bs-gutter-x:0.5rem}.g-xxl-2,.gy-xxl-2{--bs-gutter-y:0.5rem}.g-xxl-3,.gx-xxl-3{--bs-gutter-x:1rem}.g-xxl-3,.gy-xxl-3{--bs-gutter-y:1rem}.g-xxl-4,.gx-xxl-4{--bs-gutter-x:1.5rem}.g-xxl-4,.gy-xxl-4{--bs-gutter-y:1.5rem}.g-xxl-5,.gx-xxl-5{--bs-gutter-x:3rem}.g-xxl-5,.gy-xxl-5{--bs-gutter-y:3rem}}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-grid{display:grid!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:flex!important}.d-inline-flex{display:inline-flex!important}.d-none{display:none!important}.flex-fill{flex:1 1 auto!important}.flex-row{flex-direction:row!important}.flex-column{flex-direction:column!important}.flex-row-reverse{flex-direction:row-reverse!important}.flex-column-reverse{flex-direction:column-reverse!important}.flex-grow-0{flex-grow:0!important}.flex-grow-1{flex-grow:1!important}.flex-shrink-0{flex-shrink:0!important}.flex-shrink-1{flex-shrink:1!important}.flex-wrap{flex-wrap:wrap!important}.flex-nowrap{flex-wrap:nowrap!important}.flex-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-start{justify-content:flex-start!important}.justify-content-end{justify-content:flex-end!important}.justify-content-center{justify-content:center!important}.justify-content-between{justify-content:space-between!important}.justify-content-around{justify-content:space-around!important}.justify-content-evenly{justify-content:space-evenly!important}.align-items-start{align-items:flex-start!important}.align-items-end{align-items:flex-end!important}.align-items-center{align-items:center!important}.align-items-baseline{align-items:baseline!important}.align-items-stretch{align-items:stretch!important}.align-content-start{align-content:flex-start!important}.align-content-end{align-content:flex-end!important}.align-content-center{align-content:center!important}.align-content-between{align-content:space-between!important}.align-content-around{align-content:space-around!important}.align-content-stretch{align-content:stretch!important}.align-self-auto{align-self:auto!important}.align-self-start{align-self:flex-start!important}.align-self-end{align-self:flex-end!important}.align-self-center{align-self:center!important}.align-self-baseline{align-self:baseline!important}.align-self-stretch{align-self:stretch!important}.order-first{order:-1!important}.order-0{order:0!important}.order-1{order:1!important}.order-2{order:2!important}.order-3{order:3!important}.order-4{order:4!important}.order-5{order:5!important}.order-last{order:6!important}.m-0{margin:0!important}.m-1{margin:.25rem!important}.m-2{margin:.5rem!important}.m-3{margin:1rem!important}.m-4{margin:1.5rem!important}.m-5{margin:3rem!important}.m-auto{margin:auto!important}.mx-0{margin-right:0!important;margin-left:0!important}.mx-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-3{margin-right:1rem!important;margin-left:1rem!important}.mx-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-5{margin-right:3rem!important;margin-left:3rem!important}.mx-auto{margin-right:auto!important;margin-left:auto!important}.my-0{margin-top:0!important;margin-bottom:0!important}.my-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-0{margin-top:0!important}.mt-1{margin-top:.25rem!important}.mt-2{margin-top:.5rem!important}.mt-3{margin-top:1rem!important}.mt-4{margin-top:1.5rem!important}.mt-5{margin-top:3rem!important}.mt-auto{margin-top:auto!important}.me-0{margin-right:0!important}.me-1{margin-right:.25rem!important}.me-2{margin-right:.5rem!important}.me-3{margin-right:1rem!important}.me-4{margin-right:1.5rem!important}.me-5{margin-right:3rem!important}.me-auto{margin-right:auto!important}.mb-0{margin-bottom:0!important}.mb-1{margin-bottom:.25rem!important}.mb-2{margin-bottom:.5rem!important}.mb-3{margin-bottom:1rem!important}.mb-4{margin-bottom:1.5rem!important}.mb-5{margin-bottom:3rem!important}.mb-auto{margin-bottom:auto!important}.ms-0{margin-left:0!important}.ms-1{margin-left:.25rem!important}.ms-2{margin-left:.5rem!important}.ms-3{margin-left:1rem!important}.ms-4{margin-left:1.5rem!important}.ms-5{margin-left:3rem!important}.ms-auto{margin-left:auto!important}.p-0{padding:0!important}.p-1{padding:.25rem!important}.p-2{padding:.5rem!important}.p-3{padding:1rem!important}.p-4{padding:1.5rem!important}.p-5{padding:3rem!important}.px-0{padding-right:0!important;padding-left:0!important}.px-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-3{padding-right:1rem!important;padding-left:1rem!important}.px-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-5{padding-right:3rem!important;padding-left:3rem!important}.py-0{padding-top:0!important;padding-bottom:0!important}.py-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-0{padding-top:0!important}.pt-1{padding-top:.25rem!important}.pt-2{padding-top:.5rem!important}.pt-3{padding-top:1rem!important}.pt-4{padding-top:1.5rem!important}.pt-5{padding-top:3rem!important}.pe-0{padding-right:0!important}.pe-1{padding-right:.25rem!important}.pe-2{padding-right:.5rem!important}.pe-3{padding-right:1rem!important}.pe-4{padding-right:1.5rem!important}.pe-5{padding-right:3rem!important}.pb-0{padding-bottom:0!important}.pb-1{padding-bottom:.25rem!important}.pb-2{padding-bottom:.5rem!important}.pb-3{padding-bottom:1rem!important}.pb-4{padding-bottom:1.5rem!important}.pb-5{padding-bottom:3rem!important}.ps-0{padding-left:0!important}.ps-1{padding-left:.25rem!important}.ps-2{padding-left:.5rem!important}.ps-3{padding-left:1rem!important}.ps-4{padding-left:1.5rem!important}.ps-5{padding-left:3rem!important}@media (min-width:576px){.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-grid{display:grid!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:flex!important}.d-sm-inline-flex{display:inline-flex!important}.d-sm-none{display:none!important}.flex-sm-fill{flex:1 1 auto!important}.flex-sm-row{flex-direction:row!important}.flex-sm-column{flex-direction:column!important}.flex-sm-row-reverse{flex-direction:row-reverse!important}.flex-sm-column-reverse{flex-direction:column-reverse!important}.flex-sm-grow-0{flex-grow:0!important}.flex-sm-grow-1{flex-grow:1!important}.flex-sm-shrink-0{flex-shrink:0!important}.flex-sm-shrink-1{flex-shrink:1!important}.flex-sm-wrap{flex-wrap:wrap!important}.flex-sm-nowrap{flex-wrap:nowrap!important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-sm-start{justify-content:flex-start!important}.justify-content-sm-end{justify-content:flex-end!important}.justify-content-sm-center{justify-content:center!important}.justify-content-sm-between{justify-content:space-between!important}.justify-content-sm-around{justify-content:space-around!important}.justify-content-sm-evenly{justify-content:space-evenly!important}.align-items-sm-start{align-items:flex-start!important}.align-items-sm-end{align-items:flex-end!important}.align-items-sm-center{align-items:center!important}.align-items-sm-baseline{align-items:baseline!important}.align-items-sm-stretch{align-items:stretch!important}.align-content-sm-start{align-content:flex-start!important}.align-content-sm-end{align-content:flex-end!important}.align-content-sm-center{align-content:center!important}.align-content-sm-between{align-content:space-between!important}.align-content-sm-around{align-content:space-around!important}.align-content-sm-stretch{align-content:stretch!important}.align-self-sm-auto{align-self:auto!important}.align-self-sm-start{align-self:flex-start!important}.align-self-sm-end{align-self:flex-end!important}.align-self-sm-center{align-self:center!important}.align-self-sm-baseline{align-self:baseline!important}.align-self-sm-stretch{align-self:stretch!important}.order-sm-first{order:-1!important}.order-sm-0{order:0!important}.order-sm-1{order:1!important}.order-sm-2{order:2!important}.order-sm-3{order:3!important}.order-sm-4{order:4!important}.order-sm-5{order:5!important}.order-sm-last{order:6!important}.m-sm-0{margin:0!important}.m-sm-1{margin:.25rem!important}.m-sm-2{margin:.5rem!important}.m-sm-3{margin:1rem!important}.m-sm-4{margin:1.5rem!important}.m-sm-5{margin:3rem!important}.m-sm-auto{margin:auto!important}.mx-sm-0{margin-right:0!important;margin-left:0!important}.mx-sm-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-sm-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-sm-3{margin-right:1rem!important;margin-left:1rem!important}.mx-sm-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-sm-5{margin-right:3rem!important;margin-left:3rem!important}.mx-sm-auto{margin-right:auto!important;margin-left:auto!important}.my-sm-0{margin-top:0!important;margin-bottom:0!important}.my-sm-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-sm-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-sm-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-sm-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-sm-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-sm-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-sm-0{margin-top:0!important}.mt-sm-1{margin-top:.25rem!important}.mt-sm-2{margin-top:.5rem!important}.mt-sm-3{margin-top:1rem!important}.mt-sm-4{margin-top:1.5rem!important}.mt-sm-5{margin-top:3rem!important}.mt-sm-auto{margin-top:auto!important}.me-sm-0{margin-right:0!important}.me-sm-1{margin-right:.25rem!important}.me-sm-2{margin-right:.5rem!important}.me-sm-3{margin-right:1rem!important}.me-sm-4{margin-right:1.5rem!important}.me-sm-5{margin-right:3rem!important}.me-sm-auto{margin-right:auto!important}.mb-sm-0{margin-bottom:0!important}.mb-sm-1{margin-bottom:.25rem!important}.mb-sm-2{margin-bottom:.5rem!important}.mb-sm-3{margin-bottom:1rem!important}.mb-sm-4{margin-bottom:1.5rem!important}.mb-sm-5{margin-bottom:3rem!important}.mb-sm-auto{margin-bottom:auto!important}.ms-sm-0{margin-left:0!important}.ms-sm-1{margin-left:.25rem!important}.ms-sm-2{margin-left:.5rem!important}.ms-sm-3{margin-left:1rem!important}.ms-sm-4{margin-left:1.5rem!important}.ms-sm-5{margin-left:3rem!important}.ms-sm-auto{margin-left:auto!important}.p-sm-0{padding:0!important}.p-sm-1{padding:.25rem!important}.p-sm-2{padding:.5rem!important}.p-sm-3{padding:1rem!important}.p-sm-4{padding:1.5rem!important}.p-sm-5{padding:3rem!important}.px-sm-0{padding-right:0!important;padding-left:0!important}.px-sm-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-sm-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-sm-3{padding-right:1rem!important;padding-left:1rem!important}.px-sm-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-sm-5{padding-right:3rem!important;padding-left:3rem!important}.py-sm-0{padding-top:0!important;padding-bottom:0!important}.py-sm-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-sm-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-sm-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-sm-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-sm-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-sm-0{padding-top:0!important}.pt-sm-1{padding-top:.25rem!important}.pt-sm-2{padding-top:.5rem!important}.pt-sm-3{padding-top:1rem!important}.pt-sm-4{padding-top:1.5rem!important}.pt-sm-5{padding-top:3rem!important}.pe-sm-0{padding-right:0!important}.pe-sm-1{padding-right:.25rem!important}.pe-sm-2{padding-right:.5rem!important}.pe-sm-3{padding-right:1rem!important}.pe-sm-4{padding-right:1.5rem!important}.pe-sm-5{padding-right:3rem!important}.pb-sm-0{padding-bottom:0!important}.pb-sm-1{padding-bottom:.25rem!important}.pb-sm-2{padding-bottom:.5rem!important}.pb-sm-3{padding-bottom:1rem!important}.pb-sm-4{padding-bottom:1.5rem!important}.pb-sm-5{padding-bottom:3rem!important}.ps-sm-0{padding-left:0!important}.ps-sm-1{padding-left:.25rem!important}.ps-sm-2{padding-left:.5rem!important}.ps-sm-3{padding-left:1rem!important}.ps-sm-4{padding-left:1.5rem!important}.ps-sm-5{padding-left:3rem!important}}@media (min-width:768px){.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-grid{display:grid!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:flex!important}.d-md-inline-flex{display:inline-flex!important}.d-md-none{display:none!important}.flex-md-fill{flex:1 1 auto!important}.flex-md-row{flex-direction:row!important}.flex-md-column{flex-direction:column!important}.flex-md-row-reverse{flex-direction:row-reverse!important}.flex-md-column-reverse{flex-direction:column-reverse!important}.flex-md-grow-0{flex-grow:0!important}.flex-md-grow-1{flex-grow:1!important}.flex-md-shrink-0{flex-shrink:0!important}.flex-md-shrink-1{flex-shrink:1!important}.flex-md-wrap{flex-wrap:wrap!important}.flex-md-nowrap{flex-wrap:nowrap!important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-md-start{justify-content:flex-start!important}.justify-content-md-end{justify-content:flex-end!important}.justify-content-md-center{justify-content:center!important}.justify-content-md-between{justify-content:space-between!important}.justify-content-md-around{justify-content:space-around!important}.justify-content-md-evenly{justify-content:space-evenly!important}.align-items-md-start{align-items:flex-start!important}.align-items-md-end{align-items:flex-end!important}.align-items-md-center{align-items:center!important}.align-items-md-baseline{align-items:baseline!important}.align-items-md-stretch{align-items:stretch!important}.align-content-md-start{align-content:flex-start!important}.align-content-md-end{align-content:flex-end!important}.align-content-md-center{align-content:center!important}.align-content-md-between{align-content:space-between!important}.align-content-md-around{align-content:space-around!important}.align-content-md-stretch{align-content:stretch!important}.align-self-md-auto{align-self:auto!important}.align-self-md-start{align-self:flex-start!important}.align-self-md-end{align-self:flex-end!important}.align-self-md-center{align-self:center!important}.align-self-md-baseline{align-self:baseline!important}.align-self-md-stretch{align-self:stretch!important}.order-md-first{order:-1!important}.order-md-0{order:0!important}.order-md-1{order:1!important}.order-md-2{order:2!important}.order-md-3{order:3!important}.order-md-4{order:4!important}.order-md-5{order:5!important}.order-md-last{order:6!important}.m-md-0{margin:0!important}.m-md-1{margin:.25rem!important}.m-md-2{margin:.5rem!important}.m-md-3{margin:1rem!important}.m-md-4{margin:1.5rem!important}.m-md-5{margin:3rem!important}.m-md-auto{margin:auto!important}.mx-md-0{margin-right:0!important;margin-left:0!important}.mx-md-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-md-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-md-3{margin-right:1rem!important;margin-left:1rem!important}.mx-md-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-md-5{margin-right:3rem!important;margin-left:3rem!important}.mx-md-auto{margin-right:auto!important;margin-left:auto!important}.my-md-0{margin-top:0!important;margin-bottom:0!important}.my-md-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-md-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-md-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-md-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-md-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-md-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-md-0{margin-top:0!important}.mt-md-1{margin-top:.25rem!important}.mt-md-2{margin-top:.5rem!important}.mt-md-3{margin-top:1rem!important}.mt-md-4{margin-top:1.5rem!important}.mt-md-5{margin-top:3rem!important}.mt-md-auto{margin-top:auto!important}.me-md-0{margin-right:0!important}.me-md-1{margin-right:.25rem!important}.me-md-2{margin-right:.5rem!important}.me-md-3{margin-right:1rem!important}.me-md-4{margin-right:1.5rem!important}.me-md-5{margin-right:3rem!important}.me-md-auto{margin-right:auto!important}.mb-md-0{margin-bottom:0!important}.mb-md-1{margin-bottom:.25rem!important}.mb-md-2{margin-bottom:.5rem!important}.mb-md-3{margin-bottom:1rem!important}.mb-md-4{margin-bottom:1.5rem!important}.mb-md-5{margin-bottom:3rem!important}.mb-md-auto{margin-bottom:auto!important}.ms-md-0{margin-left:0!important}.ms-md-1{margin-left:.25rem!important}.ms-md-2{margin-left:.5rem!important}.ms-md-3{margin-left:1rem!important}.ms-md-4{margin-left:1.5rem!important}.ms-md-5{margin-left:3rem!important}.ms-md-auto{margin-left:auto!important}.p-md-0{padding:0!important}.p-md-1{padding:.25rem!important}.p-md-2{padding:.5rem!important}.p-md-3{padding:1rem!important}.p-md-4{padding:1.5rem!important}.p-md-5{padding:3rem!important}.px-md-0{padding-right:0!important;padding-left:0!important}.px-md-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-md-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-md-3{padding-right:1rem!important;padding-left:1rem!important}.px-md-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-md-5{padding-right:3rem!important;padding-left:3rem!important}.py-md-0{padding-top:0!important;padding-bottom:0!important}.py-md-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-md-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-md-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-md-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-md-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-md-0{padding-top:0!important}.pt-md-1{padding-top:.25rem!important}.pt-md-2{padding-top:.5rem!important}.pt-md-3{padding-top:1rem!important}.pt-md-4{padding-top:1.5rem!important}.pt-md-5{padding-top:3rem!important}.pe-md-0{padding-right:0!important}.pe-md-1{padding-right:.25rem!important}.pe-md-2{padding-right:.5rem!important}.pe-md-3{padding-right:1rem!important}.pe-md-4{padding-right:1.5rem!important}.pe-md-5{padding-right:3rem!important}.pb-md-0{padding-bottom:0!important}.pb-md-1{padding-bottom:.25rem!important}.pb-md-2{padding-bottom:.5rem!important}.pb-md-3{padding-bottom:1rem!important}.pb-md-4{padding-bottom:1.5rem!important}.pb-md-5{padding-bottom:3rem!important}.ps-md-0{padding-left:0!important}.ps-md-1{padding-left:.25rem!important}.ps-md-2{padding-left:.5rem!important}.ps-md-3{padding-left:1rem!important}.ps-md-4{padding-left:1.5rem!important}.ps-md-5{padding-left:3rem!important}}@media (min-width:992px){.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-grid{display:grid!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:flex!important}.d-lg-inline-flex{display:inline-flex!important}.d-lg-none{display:none!important}.flex-lg-fill{flex:1 1 auto!important}.flex-lg-row{flex-direction:row!important}.flex-lg-column{flex-direction:column!important}.flex-lg-row-reverse{flex-direction:row-reverse!important}.flex-lg-column-reverse{flex-direction:column-reverse!important}.flex-lg-grow-0{flex-grow:0!important}.flex-lg-grow-1{flex-grow:1!important}.flex-lg-shrink-0{flex-shrink:0!important}.flex-lg-shrink-1{flex-shrink:1!important}.flex-lg-wrap{flex-wrap:wrap!important}.flex-lg-nowrap{flex-wrap:nowrap!important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-lg-start{justify-content:flex-start!important}.justify-content-lg-end{justify-content:flex-end!important}.justify-content-lg-center{justify-content:center!important}.justify-content-lg-between{justify-content:space-between!important}.justify-content-lg-around{justify-content:space-around!important}.justify-content-lg-evenly{justify-content:space-evenly!important}.align-items-lg-start{align-items:flex-start!important}.align-items-lg-end{align-items:flex-end!important}.align-items-lg-center{align-items:center!important}.align-items-lg-baseline{align-items:baseline!important}.align-items-lg-stretch{align-items:stretch!important}.align-content-lg-start{align-content:flex-start!important}.align-content-lg-end{align-content:flex-end!important}.align-content-lg-center{align-content:center!important}.align-content-lg-between{align-content:space-between!important}.align-content-lg-around{align-content:space-around!important}.align-content-lg-stretch{align-content:stretch!important}.align-self-lg-auto{align-self:auto!important}.align-self-lg-start{align-self:flex-start!important}.align-self-lg-end{align-self:flex-end!important}.align-self-lg-center{align-self:center!important}.align-self-lg-baseline{align-self:baseline!important}.align-self-lg-stretch{align-self:stretch!important}.order-lg-first{order:-1!important}.order-lg-0{order:0!important}.order-lg-1{order:1!important}.order-lg-2{order:2!important}.order-lg-3{order:3!important}.order-lg-4{order:4!important}.order-lg-5{order:5!important}.order-lg-last{order:6!important}.m-lg-0{margin:0!important}.m-lg-1{margin:.25rem!important}.m-lg-2{margin:.5rem!important}.m-lg-3{margin:1rem!important}.m-lg-4{margin:1.5rem!important}.m-lg-5{margin:3rem!important}.m-lg-auto{margin:auto!important}.mx-lg-0{margin-right:0!important;margin-left:0!important}.mx-lg-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-lg-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-lg-3{margin-right:1rem!important;margin-left:1rem!important}.mx-lg-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-lg-5{margin-right:3rem!important;margin-left:3rem!important}.mx-lg-auto{margin-right:auto!important;margin-left:auto!important}.my-lg-0{margin-top:0!important;margin-bottom:0!important}.my-lg-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-lg-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-lg-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-lg-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-lg-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-lg-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-lg-0{margin-top:0!important}.mt-lg-1{margin-top:.25rem!important}.mt-lg-2{margin-top:.5rem!important}.mt-lg-3{margin-top:1rem!important}.mt-lg-4{margin-top:1.5rem!important}.mt-lg-5{margin-top:3rem!important}.mt-lg-auto{margin-top:auto!important}.me-lg-0{margin-right:0!important}.me-lg-1{margin-right:.25rem!important}.me-lg-2{margin-right:.5rem!important}.me-lg-3{margin-right:1rem!important}.me-lg-4{margin-right:1.5rem!important}.me-lg-5{margin-right:3rem!important}.me-lg-auto{margin-right:auto!important}.mb-lg-0{margin-bottom:0!important}.mb-lg-1{margin-bottom:.25rem!important}.mb-lg-2{margin-bottom:.5rem!important}.mb-lg-3{margin-bottom:1rem!important}.mb-lg-4{margin-bottom:1.5rem!important}.mb-lg-5{margin-bottom:3rem!important}.mb-lg-auto{margin-bottom:auto!important}.ms-lg-0{margin-left:0!important}.ms-lg-1{margin-left:.25rem!important}.ms-lg-2{margin-left:.5rem!important}.ms-lg-3{margin-left:1rem!important}.ms-lg-4{margin-left:1.5rem!important}.ms-lg-5{margin-left:3rem!important}.ms-lg-auto{margin-left:auto!important}.p-lg-0{padding:0!important}.p-lg-1{padding:.25rem!important}.p-lg-2{padding:.5rem!important}.p-lg-3{padding:1rem!important}.p-lg-4{padding:1.5rem!important}.p-lg-5{padding:3rem!important}.px-lg-0{padding-right:0!important;padding-left:0!important}.px-lg-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-lg-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-lg-3{padding-right:1rem!important;padding-left:1rem!important}.px-lg-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-lg-5{padding-right:3rem!important;padding-left:3rem!important}.py-lg-0{padding-top:0!important;padding-bottom:0!important}.py-lg-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-lg-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-lg-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-lg-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-lg-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-lg-0{padding-top:0!important}.pt-lg-1{padding-top:.25rem!important}.pt-lg-2{padding-top:.5rem!important}.pt-lg-3{padding-top:1rem!important}.pt-lg-4{padding-top:1.5rem!important}.pt-lg-5{padding-top:3rem!important}.pe-lg-0{padding-right:0!important}.pe-lg-1{padding-right:.25rem!important}.pe-lg-2{padding-right:.5rem!important}.pe-lg-3{padding-right:1rem!important}.pe-lg-4{padding-right:1.5rem!important}.pe-lg-5{padding-right:3rem!important}.pb-lg-0{padding-bottom:0!important}.pb-lg-1{padding-bottom:.25rem!important}.pb-lg-2{padding-bottom:.5rem!important}.pb-lg-3{padding-bottom:1rem!important}.pb-lg-4{padding-bottom:1.5rem!important}.pb-lg-5{padding-bottom:3rem!important}.ps-lg-0{padding-left:0!important}.ps-lg-1{padding-left:.25rem!important}.ps-lg-2{padding-left:.5rem!important}.ps-lg-3{padding-left:1rem!important}.ps-lg-4{padding-left:1.5rem!important}.ps-lg-5{padding-left:3rem!important}}@media (min-width:1200px){.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-grid{display:grid!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:flex!important}.d-xl-inline-flex{display:inline-flex!important}.d-xl-none{display:none!important}.flex-xl-fill{flex:1 1 auto!important}.flex-xl-row{flex-direction:row!important}.flex-xl-column{flex-direction:column!important}.flex-xl-row-reverse{flex-direction:row-reverse!important}.flex-xl-column-reverse{flex-direction:column-reverse!important}.flex-xl-grow-0{flex-grow:0!important}.flex-xl-grow-1{flex-grow:1!important}.flex-xl-shrink-0{flex-shrink:0!important}.flex-xl-shrink-1{flex-shrink:1!important}.flex-xl-wrap{flex-wrap:wrap!important}.flex-xl-nowrap{flex-wrap:nowrap!important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-xl-start{justify-content:flex-start!important}.justify-content-xl-end{justify-content:flex-end!important}.justify-content-xl-center{justify-content:center!important}.justify-content-xl-between{justify-content:space-between!important}.justify-content-xl-around{justify-content:space-around!important}.justify-content-xl-evenly{justify-content:space-evenly!important}.align-items-xl-start{align-items:flex-start!important}.align-items-xl-end{align-items:flex-end!important}.align-items-xl-center{align-items:center!important}.align-items-xl-baseline{align-items:baseline!important}.align-items-xl-stretch{align-items:stretch!important}.align-content-xl-start{align-content:flex-start!important}.align-content-xl-end{align-content:flex-end!important}.align-content-xl-center{align-content:center!important}.align-content-xl-between{align-content:space-between!important}.align-content-xl-around{align-content:space-around!important}.align-content-xl-stretch{align-content:stretch!important}.align-self-xl-auto{align-self:auto!important}.align-self-xl-start{align-self:flex-start!important}.align-self-xl-end{align-self:flex-end!important}.align-self-xl-center{align-self:center!important}.align-self-xl-baseline{align-self:baseline!important}.align-self-xl-stretch{align-self:stretch!important}.order-xl-first{order:-1!important}.order-xl-0{order:0!important}.order-xl-1{order:1!important}.order-xl-2{order:2!important}.order-xl-3{order:3!important}.order-xl-4{order:4!important}.order-xl-5{order:5!important}.order-xl-last{order:6!important}.m-xl-0{margin:0!important}.m-xl-1{margin:.25rem!important}.m-xl-2{margin:.5rem!important}.m-xl-3{margin:1rem!important}.m-xl-4{margin:1.5rem!important}.m-xl-5{margin:3rem!important}.m-xl-auto{margin:auto!important}.mx-xl-0{margin-right:0!important;margin-left:0!important}.mx-xl-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-xl-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-xl-3{margin-right:1rem!important;margin-left:1rem!important}.mx-xl-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-xl-5{margin-right:3rem!important;margin-left:3rem!important}.mx-xl-auto{margin-right:auto!important;margin-left:auto!important}.my-xl-0{margin-top:0!important;margin-bottom:0!important}.my-xl-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-xl-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-xl-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-xl-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-xl-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-xl-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-xl-0{margin-top:0!important}.mt-xl-1{margin-top:.25rem!important}.mt-xl-2{margin-top:.5rem!important}.mt-xl-3{margin-top:1rem!important}.mt-xl-4{margin-top:1.5rem!important}.mt-xl-5{margin-top:3rem!important}.mt-xl-auto{margin-top:auto!important}.me-xl-0{margin-right:0!important}.me-xl-1{margin-right:.25rem!important}.me-xl-2{margin-right:.5rem!important}.me-xl-3{margin-right:1rem!important}.me-xl-4{margin-right:1.5rem!important}.me-xl-5{margin-right:3rem!important}.me-xl-auto{margin-right:auto!important}.mb-xl-0{margin-bottom:0!important}.mb-xl-1{margin-bottom:.25rem!important}.mb-xl-2{margin-bottom:.5rem!important}.mb-xl-3{margin-bottom:1rem!important}.mb-xl-4{margin-bottom:1.5rem!important}.mb-xl-5{margin-bottom:3rem!important}.mb-xl-auto{margin-bottom:auto!important}.ms-xl-0{margin-left:0!important}.ms-xl-1{margin-left:.25rem!important}.ms-xl-2{margin-left:.5rem!important}.ms-xl-3{margin-left:1rem!important}.ms-xl-4{margin-left:1.5rem!important}.ms-xl-5{margin-left:3rem!important}.ms-xl-auto{margin-left:auto!important}.p-xl-0{padding:0!important}.p-xl-1{padding:.25rem!important}.p-xl-2{padding:.5rem!important}.p-xl-3{padding:1rem!important}.p-xl-4{padding:1.5rem!important}.p-xl-5{padding:3rem!important}.px-xl-0{padding-right:0!important;padding-left:0!important}.px-xl-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-xl-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-xl-3{padding-right:1rem!important;padding-left:1rem!important}.px-xl-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-xl-5{padding-right:3rem!important;padding-left:3rem!important}.py-xl-0{padding-top:0!important;padding-bottom:0!important}.py-xl-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-xl-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-xl-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-xl-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-xl-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-xl-0{padding-top:0!important}.pt-xl-1{padding-top:.25rem!important}.pt-xl-2{padding-top:.5rem!important}.pt-xl-3{padding-top:1rem!important}.pt-xl-4{padding-top:1.5rem!important}.pt-xl-5{padding-top:3rem!important}.pe-xl-0{padding-right:0!important}.pe-xl-1{padding-right:.25rem!important}.pe-xl-2{padding-right:.5rem!important}.pe-xl-3{padding-right:1rem!important}.pe-xl-4{padding-right:1.5rem!important}.pe-xl-5{padding-right:3rem!important}.pb-xl-0{padding-bottom:0!important}.pb-xl-1{padding-bottom:.25rem!important}.pb-xl-2{padding-bottom:.5rem!important}.pb-xl-3{padding-bottom:1rem!important}.pb-xl-4{padding-bottom:1.5rem!important}.pb-xl-5{padding-bottom:3rem!important}.ps-xl-0{padding-left:0!important}.ps-xl-1{padding-left:.25rem!important}.ps-xl-2{padding-left:.5rem!important}.ps-xl-3{padding-left:1rem!important}.ps-xl-4{padding-left:1.5rem!important}.ps-xl-5{padding-left:3rem!important}}@media (min-width:1400px){.d-xxl-inline{display:inline!important}.d-xxl-inline-block{display:inline-block!important}.d-xxl-block{display:block!important}.d-xxl-grid{display:grid!important}.d-xxl-table{display:table!important}.d-xxl-table-row{display:table-row!important}.d-xxl-table-cell{display:table-cell!important}.d-xxl-flex{display:flex!important}.d-xxl-inline-flex{display:inline-flex!important}.d-xxl-none{display:none!important}.flex-xxl-fill{flex:1 1 auto!important}.flex-xxl-row{flex-direction:row!important}.flex-xxl-column{flex-direction:column!important}.flex-xxl-row-reverse{flex-direction:row-reverse!important}.flex-xxl-column-reverse{flex-direction:column-reverse!important}.flex-xxl-grow-0{flex-grow:0!important}.flex-xxl-grow-1{flex-grow:1!important}.flex-xxl-shrink-0{flex-shrink:0!important}.flex-xxl-shrink-1{flex-shrink:1!important}.flex-xxl-wrap{flex-wrap:wrap!important}.flex-xxl-nowrap{flex-wrap:nowrap!important}.flex-xxl-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-xxl-start{justify-content:flex-start!important}.justify-content-xxl-end{justify-content:flex-end!important}.justify-content-xxl-center{justify-content:center!important}.justify-content-xxl-between{justify-content:space-between!important}.justify-content-xxl-around{justify-content:space-around!important}.justify-content-xxl-evenly{justify-content:space-evenly!important}.align-items-xxl-start{align-items:flex-start!important}.align-items-xxl-end{align-items:flex-end!important}.align-items-xxl-center{align-items:center!important}.align-items-xxl-baseline{align-items:baseline!important}.align-items-xxl-stretch{align-items:stretch!important}.align-content-xxl-start{align-content:flex-start!important}.align-content-xxl-end{align-content:flex-end!important}.align-content-xxl-center{align-content:center!important}.align-content-xxl-between{align-content:space-between!important}.align-content-xxl-around{align-content:space-around!important}.align-content-xxl-stretch{align-content:stretch!important}.align-self-xxl-auto{align-self:auto!important}.align-self-xxl-start{align-self:flex-start!important}.align-self-xxl-end{align-self:flex-end!important}.align-self-xxl-center{align-self:center!important}.align-self-xxl-baseline{align-self:baseline!important}.align-self-xxl-stretch{align-self:stretch!important}.order-xxl-first{order:-1!important}.order-xxl-0{order:0!important}.order-xxl-1{order:1!important}.order-xxl-2{order:2!important}.order-xxl-3{order:3!important}.order-xxl-4{order:4!important}.order-xxl-5{order:5!important}.order-xxl-last{order:6!important}.m-xxl-0{margin:0!important}.m-xxl-1{margin:.25rem!important}.m-xxl-2{margin:.5rem!important}.m-xxl-3{margin:1rem!important}.m-xxl-4{margin:1.5rem!important}.m-xxl-5{margin:3rem!important}.m-xxl-auto{margin:auto!important}.mx-xxl-0{margin-right:0!important;margin-left:0!important}.mx-xxl-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-xxl-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-xxl-3{margin-right:1rem!important;margin-left:1rem!important}.mx-xxl-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-xxl-5{margin-right:3rem!important;margin-left:3rem!important}.mx-xxl-auto{margin-right:auto!important;margin-left:auto!important}.my-xxl-0{margin-top:0!important;margin-bottom:0!important}.my-xxl-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-xxl-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-xxl-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-xxl-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-xxl-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-xxl-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-xxl-0{margin-top:0!important}.mt-xxl-1{margin-top:.25rem!important}.mt-xxl-2{margin-top:.5rem!important}.mt-xxl-3{margin-top:1rem!important}.mt-xxl-4{margin-top:1.5rem!important}.mt-xxl-5{margin-top:3rem!important}.mt-xxl-auto{margin-top:auto!important}.me-xxl-0{margin-right:0!important}.me-xxl-1{margin-right:.25rem!important}.me-xxl-2{margin-right:.5rem!important}.me-xxl-3{margin-right:1rem!important}.me-xxl-4{margin-right:1.5rem!important}.me-xxl-5{margin-right:3rem!important}.me-xxl-auto{margin-right:auto!important}.mb-xxl-0{margin-bottom:0!important}.mb-xxl-1{margin-bottom:.25rem!important}.mb-xxl-2{margin-bottom:.5rem!important}.mb-xxl-3{margin-bottom:1rem!important}.mb-xxl-4{margin-bottom:1.5rem!important}.mb-xxl-5{margin-bottom:3rem!important}.mb-xxl-auto{margin-bottom:auto!important}.ms-xxl-0{margin-left:0!important}.ms-xxl-1{margin-left:.25rem!important}.ms-xxl-2{margin-left:.5rem!important}.ms-xxl-3{margin-left:1rem!important}.ms-xxl-4{margin-left:1.5rem!important}.ms-xxl-5{margin-left:3rem!important}.ms-xxl-auto{margin-left:auto!important}.p-xxl-0{padding:0!important}.p-xxl-1{padding:.25rem!important}.p-xxl-2{padding:.5rem!important}.p-xxl-3{padding:1rem!important}.p-xxl-4{padding:1.5rem!important}.p-xxl-5{padding:3rem!important}.px-xxl-0{padding-right:0!important;padding-left:0!important}.px-xxl-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-xxl-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-xxl-3{padding-right:1rem!important;padding-left:1rem!important}.px-xxl-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-xxl-5{padding-right:3rem!important;padding-left:3rem!important}.py-xxl-0{padding-top:0!important;padding-bottom:0!important}.py-xxl-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-xxl-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-xxl-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-xxl-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-xxl-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-xxl-0{padding-top:0!important}.pt-xxl-1{padding-top:.25rem!important}.pt-xxl-2{padding-top:.5rem!important}.pt-xxl-3{padding-top:1rem!important}.pt-xxl-4{padding-top:1.5rem!important}.pt-xxl-5{padding-top:3rem!important}.pe-xxl-0{padding-right:0!important}.pe-xxl-1{padding-right:.25rem!important}.pe-xxl-2{padding-right:.5rem!important}.pe-xxl-3{padding-right:1rem!important}.pe-xxl-4{padding-right:1.5rem!important}.pe-xxl-5{padding-right:3rem!important}.pb-xxl-0{padding-bottom:0!important}.pb-xxl-1{padding-bottom:.25rem!important}.pb-xxl-2{padding-bottom:.5rem!important}.pb-xxl-3{padding-bottom:1rem!important}.pb-xxl-4{padding-bottom:1.5rem!important}.pb-xxl-5{padding-bottom:3rem!important}.ps-xxl-0{padding-left:0!important}.ps-xxl-1{padding-left:.25rem!important}.ps-xxl-2{padding-left:.5rem!important}.ps-xxl-3{padding-left:1rem!important}.ps-xxl-4{padding-left:1.5rem!important}.ps-xxl-5{padding-left:3rem!important}}@media print{.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-grid{display:grid!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:flex!important}.d-print-inline-flex{display:inline-flex!important}.d-print-none{display:none!important}} \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/css/bootstrap-reboot.min.css b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/css/bootstrap-reboot.min.css deleted file mode 100644 index a370bb60..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/css/bootstrap-reboot.min.css +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * Bootstrap Reboot v5.0.1 (https://getbootstrap.com/) - * Copyright 2011-2021 The Bootstrap Authors - * Copyright 2011-2021 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) - * Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md) - */*,::after,::before{box-sizing:border-box}@media (prefers-reduced-motion:no-preference){:root{scroll-behavior:smooth}}body{margin:0;font-family:system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans","Liberation Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;background-color:#fff;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}hr{margin:1rem 0;color:inherit;background-color:currentColor;border:0;opacity:.25}hr:not([size]){height:1px}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem;font-weight:500;line-height:1.2}h1{font-size:calc(1.375rem + 1.5vw)}@media (min-width:1200px){h1{font-size:2.5rem}}h2{font-size:calc(1.325rem + .9vw)}@media (min-width:1200px){h2{font-size:2rem}}h3{font-size:calc(1.3rem + .6vw)}@media (min-width:1200px){h3{font-size:1.75rem}}h4{font-size:calc(1.275rem + .3vw)}@media (min-width:1200px){h4{font-size:1.5rem}}h5{font-size:1.25rem}h6{font-size:1rem}p{margin-top:0;margin-bottom:1rem}abbr[data-bs-original-title],abbr[title]{-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;-webkit-text-decoration-skip-ink:none;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul{padding-left:2rem}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}small{font-size:.875em}mark{padding:.2em;background-color:#fcf8e3}sub,sup{position:relative;font-size:.75em;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#0d6efd;text-decoration:underline}a:hover{color:#0a58ca}a:not([href]):not([class]),a:not([href]):not([class]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;font-size:1em;direction:ltr;unicode-bidi:bidi-override}pre{display:block;margin-top:0;margin-bottom:1rem;overflow:auto;font-size:.875em}pre code{font-size:inherit;color:inherit;word-break:normal}code{font-size:.875em;color:#d63384;word-wrap:break-word}a>code{color:inherit}kbd{padding:.2rem .4rem;font-size:.875em;color:#fff;background-color:#212529;border-radius:.2rem}kbd kbd{padding:0;font-size:1em;font-weight:700}figure{margin:0 0 1rem}img,svg{vertical-align:middle}table{caption-side:bottom;border-collapse:collapse}caption{padding-top:.5rem;padding-bottom:.5rem;color:#6c757d;text-align:left}th{text-align:inherit;text-align:-webkit-match-parent}tbody,td,tfoot,th,thead,tr{border-color:inherit;border-style:solid;border-width:0}label{display:inline-block}button{border-radius:0}button:focus:not(:focus-visible){outline:0}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,select{text-transform:none}[role=button]{cursor:pointer}select{word-wrap:normal}select:disabled{opacity:1}[list]::-webkit-calendar-picker-indicator{display:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}::-moz-focus-inner{padding:0;border-style:none}textarea{resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{float:left;width:100%;padding:0;margin-bottom:.5rem;font-size:calc(1.275rem + .3vw);line-height:inherit}@media (min-width:1200px){legend{font-size:1.5rem}}legend+*{clear:left}::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-fields-wrapper,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-minute,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-text,::-webkit-datetime-edit-year-field{padding:0}::-webkit-inner-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:textfield}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-color-swatch-wrapper{padding:0}::file-selector-button{font:inherit}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}iframe{border:0}summary{display:list-item;cursor:pointer}progress{vertical-align:baseline}[hidden]{display:none!important} \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/css/bootstrap.min.css b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/css/bootstrap.min.css deleted file mode 100644 index 43d6ac30..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/css/bootstrap.min.css +++ /dev/null @@ -1,6 +0,0 @@ -@charset "UTF-8";/*! - * Bootstrap v5.0.1 (https://getbootstrap.com/) - * Copyright 2011-2021 The Bootstrap Authors - * Copyright 2011-2021 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) - */:root{--bs-blue:#0d6efd;--bs-indigo:#6610f2;--bs-purple:#6f42c1;--bs-pink:#d63384;--bs-red:#dc3545;--bs-orange:#fd7e14;--bs-yellow:#ffc107;--bs-green:#198754;--bs-teal:#20c997;--bs-cyan:#0dcaf0;--bs-white:#fff;--bs-gray:#6c757d;--bs-gray-dark:#343a40;--bs-primary:#0d6efd;--bs-secondary:#6c757d;--bs-success:#198754;--bs-info:#0dcaf0;--bs-warning:#ffc107;--bs-danger:#dc3545;--bs-light:#f8f9fa;--bs-dark:#212529;--bs-font-sans-serif:system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans","Liberation Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--bs-font-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--bs-gradient:linear-gradient(180deg, rgba(255, 255, 255, 0.15), rgba(255, 255, 255, 0))}*,::after,::before{box-sizing:border-box}@media (prefers-reduced-motion:no-preference){:root{scroll-behavior:smooth}}body{margin:0;font-family:var(--bs-font-sans-serif);font-size:1rem;font-weight:400;line-height:1.5;color:#212529;background-color:#fff;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}hr{margin:1rem 0;color:inherit;background-color:currentColor;border:0;opacity:.25}hr:not([size]){height:1px}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem;font-weight:500;line-height:1.2}.h1,h1{font-size:calc(1.375rem + 1.5vw)}@media (min-width:1200px){.h1,h1{font-size:2.5rem}}.h2,h2{font-size:calc(1.325rem + .9vw)}@media (min-width:1200px){.h2,h2{font-size:2rem}}.h3,h3{font-size:calc(1.3rem + .6vw)}@media (min-width:1200px){.h3,h3{font-size:1.75rem}}.h4,h4{font-size:calc(1.275rem + .3vw)}@media (min-width:1200px){.h4,h4{font-size:1.5rem}}.h5,h5{font-size:1.25rem}.h6,h6{font-size:1rem}p{margin-top:0;margin-bottom:1rem}abbr[data-bs-original-title],abbr[title]{-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;-webkit-text-decoration-skip-ink:none;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul{padding-left:2rem}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}.small,small{font-size:.875em}.mark,mark{padding:.2em;background-color:#fcf8e3}sub,sup{position:relative;font-size:.75em;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#0d6efd;text-decoration:underline}a:hover{color:#0a58ca}a:not([href]):not([class]),a:not([href]):not([class]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:var(--bs-font-monospace);font-size:1em;direction:ltr;unicode-bidi:bidi-override}pre{display:block;margin-top:0;margin-bottom:1rem;overflow:auto;font-size:.875em}pre code{font-size:inherit;color:inherit;word-break:normal}code{font-size:.875em;color:#d63384;word-wrap:break-word}a>code{color:inherit}kbd{padding:.2rem .4rem;font-size:.875em;color:#fff;background-color:#212529;border-radius:.2rem}kbd kbd{padding:0;font-size:1em;font-weight:700}figure{margin:0 0 1rem}img,svg{vertical-align:middle}table{caption-side:bottom;border-collapse:collapse}caption{padding-top:.5rem;padding-bottom:.5rem;color:#6c757d;text-align:left}th{text-align:inherit;text-align:-webkit-match-parent}tbody,td,tfoot,th,thead,tr{border-color:inherit;border-style:solid;border-width:0}label{display:inline-block}button{border-radius:0}button:focus:not(:focus-visible){outline:0}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,select{text-transform:none}[role=button]{cursor:pointer}select{word-wrap:normal}select:disabled{opacity:1}[list]::-webkit-calendar-picker-indicator{display:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}::-moz-focus-inner{padding:0;border-style:none}textarea{resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{float:left;width:100%;padding:0;margin-bottom:.5rem;font-size:calc(1.275rem + .3vw);line-height:inherit}@media (min-width:1200px){legend{font-size:1.5rem}}legend+*{clear:left}::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-fields-wrapper,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-minute,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-text,::-webkit-datetime-edit-year-field{padding:0}::-webkit-inner-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:textfield}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-color-swatch-wrapper{padding:0}::file-selector-button{font:inherit}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}iframe{border:0}summary{display:list-item;cursor:pointer}progress{vertical-align:baseline}[hidden]{display:none!important}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:calc(1.625rem + 4.5vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-1{font-size:5rem}}.display-2{font-size:calc(1.575rem + 3.9vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-2{font-size:4.5rem}}.display-3{font-size:calc(1.525rem + 3.3vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-3{font-size:4rem}}.display-4{font-size:calc(1.475rem + 2.7vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-4{font-size:3.5rem}}.display-5{font-size:calc(1.425rem + 2.1vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-5{font-size:3rem}}.display-6{font-size:calc(1.375rem + 1.5vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-6{font-size:2.5rem}}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:.875em;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote>:last-child{margin-bottom:0}.blockquote-footer{margin-top:-1rem;margin-bottom:1rem;font-size:.875em;color:#6c757d}.blockquote-footer::before{content:"— "}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:.25rem;max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:.875em;color:#6c757d}.container,.container-fluid,.container-lg,.container-md,.container-sm,.container-xl,.container-xxl{width:100%;padding-right:var(--bs-gutter-x,.75rem);padding-left:var(--bs-gutter-x,.75rem);margin-right:auto;margin-left:auto}@media (min-width:576px){.container,.container-sm{max-width:540px}}@media (min-width:768px){.container,.container-md,.container-sm{max-width:720px}}@media (min-width:992px){.container,.container-lg,.container-md,.container-sm{max-width:960px}}@media (min-width:1200px){.container,.container-lg,.container-md,.container-sm,.container-xl{max-width:1140px}}@media (min-width:1400px){.container,.container-lg,.container-md,.container-sm,.container-xl,.container-xxl{max-width:1320px}}.row{--bs-gutter-x:1.5rem;--bs-gutter-y:0;display:flex;flex-wrap:wrap;margin-top:calc(var(--bs-gutter-y) * -1);margin-right:calc(var(--bs-gutter-x)/ -2);margin-left:calc(var(--bs-gutter-x)/ -2)}.row>*{flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--bs-gutter-x)/ 2);padding-left:calc(var(--bs-gutter-x)/ 2);margin-top:var(--bs-gutter-y)}.col{flex:1 0 0%}.row-cols-auto>*{flex:0 0 auto;width:auto}.row-cols-1>*{flex:0 0 auto;width:100%}.row-cols-2>*{flex:0 0 auto;width:50%}.row-cols-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-4>*{flex:0 0 auto;width:25%}.row-cols-5>*{flex:0 0 auto;width:20%}.row-cols-6>*{flex:0 0 auto;width:16.6666666667%}.col-auto{flex:0 0 auto;width:auto}.col-1{flex:0 0 auto;width:8.3333333333%}.col-2{flex:0 0 auto;width:16.6666666667%}.col-3{flex:0 0 auto;width:25%}.col-4{flex:0 0 auto;width:33.3333333333%}.col-5{flex:0 0 auto;width:41.6666666667%}.col-6{flex:0 0 auto;width:50%}.col-7{flex:0 0 auto;width:58.3333333333%}.col-8{flex:0 0 auto;width:66.6666666667%}.col-9{flex:0 0 auto;width:75%}.col-10{flex:0 0 auto;width:83.3333333333%}.col-11{flex:0 0 auto;width:91.6666666667%}.col-12{flex:0 0 auto;width:100%}.offset-1{margin-left:8.3333333333%}.offset-2{margin-left:16.6666666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.3333333333%}.offset-5{margin-left:41.6666666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.3333333333%}.offset-8{margin-left:66.6666666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.3333333333%}.offset-11{margin-left:91.6666666667%}.g-0,.gx-0{--bs-gutter-x:0}.g-0,.gy-0{--bs-gutter-y:0}.g-1,.gx-1{--bs-gutter-x:0.25rem}.g-1,.gy-1{--bs-gutter-y:0.25rem}.g-2,.gx-2{--bs-gutter-x:0.5rem}.g-2,.gy-2{--bs-gutter-y:0.5rem}.g-3,.gx-3{--bs-gutter-x:1rem}.g-3,.gy-3{--bs-gutter-y:1rem}.g-4,.gx-4{--bs-gutter-x:1.5rem}.g-4,.gy-4{--bs-gutter-y:1.5rem}.g-5,.gx-5{--bs-gutter-x:3rem}.g-5,.gy-5{--bs-gutter-y:3rem}@media (min-width:576px){.col-sm{flex:1 0 0%}.row-cols-sm-auto>*{flex:0 0 auto;width:auto}.row-cols-sm-1>*{flex:0 0 auto;width:100%}.row-cols-sm-2>*{flex:0 0 auto;width:50%}.row-cols-sm-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-sm-4>*{flex:0 0 auto;width:25%}.row-cols-sm-5>*{flex:0 0 auto;width:20%}.row-cols-sm-6>*{flex:0 0 auto;width:16.6666666667%}.col-sm-auto{flex:0 0 auto;width:auto}.col-sm-1{flex:0 0 auto;width:8.3333333333%}.col-sm-2{flex:0 0 auto;width:16.6666666667%}.col-sm-3{flex:0 0 auto;width:25%}.col-sm-4{flex:0 0 auto;width:33.3333333333%}.col-sm-5{flex:0 0 auto;width:41.6666666667%}.col-sm-6{flex:0 0 auto;width:50%}.col-sm-7{flex:0 0 auto;width:58.3333333333%}.col-sm-8{flex:0 0 auto;width:66.6666666667%}.col-sm-9{flex:0 0 auto;width:75%}.col-sm-10{flex:0 0 auto;width:83.3333333333%}.col-sm-11{flex:0 0 auto;width:91.6666666667%}.col-sm-12{flex:0 0 auto;width:100%}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.3333333333%}.offset-sm-2{margin-left:16.6666666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.3333333333%}.offset-sm-5{margin-left:41.6666666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.3333333333%}.offset-sm-8{margin-left:66.6666666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.3333333333%}.offset-sm-11{margin-left:91.6666666667%}.g-sm-0,.gx-sm-0{--bs-gutter-x:0}.g-sm-0,.gy-sm-0{--bs-gutter-y:0}.g-sm-1,.gx-sm-1{--bs-gutter-x:0.25rem}.g-sm-1,.gy-sm-1{--bs-gutter-y:0.25rem}.g-sm-2,.gx-sm-2{--bs-gutter-x:0.5rem}.g-sm-2,.gy-sm-2{--bs-gutter-y:0.5rem}.g-sm-3,.gx-sm-3{--bs-gutter-x:1rem}.g-sm-3,.gy-sm-3{--bs-gutter-y:1rem}.g-sm-4,.gx-sm-4{--bs-gutter-x:1.5rem}.g-sm-4,.gy-sm-4{--bs-gutter-y:1.5rem}.g-sm-5,.gx-sm-5{--bs-gutter-x:3rem}.g-sm-5,.gy-sm-5{--bs-gutter-y:3rem}}@media (min-width:768px){.col-md{flex:1 0 0%}.row-cols-md-auto>*{flex:0 0 auto;width:auto}.row-cols-md-1>*{flex:0 0 auto;width:100%}.row-cols-md-2>*{flex:0 0 auto;width:50%}.row-cols-md-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-md-4>*{flex:0 0 auto;width:25%}.row-cols-md-5>*{flex:0 0 auto;width:20%}.row-cols-md-6>*{flex:0 0 auto;width:16.6666666667%}.col-md-auto{flex:0 0 auto;width:auto}.col-md-1{flex:0 0 auto;width:8.3333333333%}.col-md-2{flex:0 0 auto;width:16.6666666667%}.col-md-3{flex:0 0 auto;width:25%}.col-md-4{flex:0 0 auto;width:33.3333333333%}.col-md-5{flex:0 0 auto;width:41.6666666667%}.col-md-6{flex:0 0 auto;width:50%}.col-md-7{flex:0 0 auto;width:58.3333333333%}.col-md-8{flex:0 0 auto;width:66.6666666667%}.col-md-9{flex:0 0 auto;width:75%}.col-md-10{flex:0 0 auto;width:83.3333333333%}.col-md-11{flex:0 0 auto;width:91.6666666667%}.col-md-12{flex:0 0 auto;width:100%}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.3333333333%}.offset-md-2{margin-left:16.6666666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.3333333333%}.offset-md-5{margin-left:41.6666666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.3333333333%}.offset-md-8{margin-left:66.6666666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.3333333333%}.offset-md-11{margin-left:91.6666666667%}.g-md-0,.gx-md-0{--bs-gutter-x:0}.g-md-0,.gy-md-0{--bs-gutter-y:0}.g-md-1,.gx-md-1{--bs-gutter-x:0.25rem}.g-md-1,.gy-md-1{--bs-gutter-y:0.25rem}.g-md-2,.gx-md-2{--bs-gutter-x:0.5rem}.g-md-2,.gy-md-2{--bs-gutter-y:0.5rem}.g-md-3,.gx-md-3{--bs-gutter-x:1rem}.g-md-3,.gy-md-3{--bs-gutter-y:1rem}.g-md-4,.gx-md-4{--bs-gutter-x:1.5rem}.g-md-4,.gy-md-4{--bs-gutter-y:1.5rem}.g-md-5,.gx-md-5{--bs-gutter-x:3rem}.g-md-5,.gy-md-5{--bs-gutter-y:3rem}}@media (min-width:992px){.col-lg{flex:1 0 0%}.row-cols-lg-auto>*{flex:0 0 auto;width:auto}.row-cols-lg-1>*{flex:0 0 auto;width:100%}.row-cols-lg-2>*{flex:0 0 auto;width:50%}.row-cols-lg-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-lg-4>*{flex:0 0 auto;width:25%}.row-cols-lg-5>*{flex:0 0 auto;width:20%}.row-cols-lg-6>*{flex:0 0 auto;width:16.6666666667%}.col-lg-auto{flex:0 0 auto;width:auto}.col-lg-1{flex:0 0 auto;width:8.3333333333%}.col-lg-2{flex:0 0 auto;width:16.6666666667%}.col-lg-3{flex:0 0 auto;width:25%}.col-lg-4{flex:0 0 auto;width:33.3333333333%}.col-lg-5{flex:0 0 auto;width:41.6666666667%}.col-lg-6{flex:0 0 auto;width:50%}.col-lg-7{flex:0 0 auto;width:58.3333333333%}.col-lg-8{flex:0 0 auto;width:66.6666666667%}.col-lg-9{flex:0 0 auto;width:75%}.col-lg-10{flex:0 0 auto;width:83.3333333333%}.col-lg-11{flex:0 0 auto;width:91.6666666667%}.col-lg-12{flex:0 0 auto;width:100%}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.3333333333%}.offset-lg-2{margin-left:16.6666666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.3333333333%}.offset-lg-5{margin-left:41.6666666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.3333333333%}.offset-lg-8{margin-left:66.6666666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.3333333333%}.offset-lg-11{margin-left:91.6666666667%}.g-lg-0,.gx-lg-0{--bs-gutter-x:0}.g-lg-0,.gy-lg-0{--bs-gutter-y:0}.g-lg-1,.gx-lg-1{--bs-gutter-x:0.25rem}.g-lg-1,.gy-lg-1{--bs-gutter-y:0.25rem}.g-lg-2,.gx-lg-2{--bs-gutter-x:0.5rem}.g-lg-2,.gy-lg-2{--bs-gutter-y:0.5rem}.g-lg-3,.gx-lg-3{--bs-gutter-x:1rem}.g-lg-3,.gy-lg-3{--bs-gutter-y:1rem}.g-lg-4,.gx-lg-4{--bs-gutter-x:1.5rem}.g-lg-4,.gy-lg-4{--bs-gutter-y:1.5rem}.g-lg-5,.gx-lg-5{--bs-gutter-x:3rem}.g-lg-5,.gy-lg-5{--bs-gutter-y:3rem}}@media (min-width:1200px){.col-xl{flex:1 0 0%}.row-cols-xl-auto>*{flex:0 0 auto;width:auto}.row-cols-xl-1>*{flex:0 0 auto;width:100%}.row-cols-xl-2>*{flex:0 0 auto;width:50%}.row-cols-xl-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-xl-4>*{flex:0 0 auto;width:25%}.row-cols-xl-5>*{flex:0 0 auto;width:20%}.row-cols-xl-6>*{flex:0 0 auto;width:16.6666666667%}.col-xl-auto{flex:0 0 auto;width:auto}.col-xl-1{flex:0 0 auto;width:8.3333333333%}.col-xl-2{flex:0 0 auto;width:16.6666666667%}.col-xl-3{flex:0 0 auto;width:25%}.col-xl-4{flex:0 0 auto;width:33.3333333333%}.col-xl-5{flex:0 0 auto;width:41.6666666667%}.col-xl-6{flex:0 0 auto;width:50%}.col-xl-7{flex:0 0 auto;width:58.3333333333%}.col-xl-8{flex:0 0 auto;width:66.6666666667%}.col-xl-9{flex:0 0 auto;width:75%}.col-xl-10{flex:0 0 auto;width:83.3333333333%}.col-xl-11{flex:0 0 auto;width:91.6666666667%}.col-xl-12{flex:0 0 auto;width:100%}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.3333333333%}.offset-xl-2{margin-left:16.6666666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.3333333333%}.offset-xl-5{margin-left:41.6666666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.3333333333%}.offset-xl-8{margin-left:66.6666666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.3333333333%}.offset-xl-11{margin-left:91.6666666667%}.g-xl-0,.gx-xl-0{--bs-gutter-x:0}.g-xl-0,.gy-xl-0{--bs-gutter-y:0}.g-xl-1,.gx-xl-1{--bs-gutter-x:0.25rem}.g-xl-1,.gy-xl-1{--bs-gutter-y:0.25rem}.g-xl-2,.gx-xl-2{--bs-gutter-x:0.5rem}.g-xl-2,.gy-xl-2{--bs-gutter-y:0.5rem}.g-xl-3,.gx-xl-3{--bs-gutter-x:1rem}.g-xl-3,.gy-xl-3{--bs-gutter-y:1rem}.g-xl-4,.gx-xl-4{--bs-gutter-x:1.5rem}.g-xl-4,.gy-xl-4{--bs-gutter-y:1.5rem}.g-xl-5,.gx-xl-5{--bs-gutter-x:3rem}.g-xl-5,.gy-xl-5{--bs-gutter-y:3rem}}@media (min-width:1400px){.col-xxl{flex:1 0 0%}.row-cols-xxl-auto>*{flex:0 0 auto;width:auto}.row-cols-xxl-1>*{flex:0 0 auto;width:100%}.row-cols-xxl-2>*{flex:0 0 auto;width:50%}.row-cols-xxl-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-xxl-4>*{flex:0 0 auto;width:25%}.row-cols-xxl-5>*{flex:0 0 auto;width:20%}.row-cols-xxl-6>*{flex:0 0 auto;width:16.6666666667%}.col-xxl-auto{flex:0 0 auto;width:auto}.col-xxl-1{flex:0 0 auto;width:8.3333333333%}.col-xxl-2{flex:0 0 auto;width:16.6666666667%}.col-xxl-3{flex:0 0 auto;width:25%}.col-xxl-4{flex:0 0 auto;width:33.3333333333%}.col-xxl-5{flex:0 0 auto;width:41.6666666667%}.col-xxl-6{flex:0 0 auto;width:50%}.col-xxl-7{flex:0 0 auto;width:58.3333333333%}.col-xxl-8{flex:0 0 auto;width:66.6666666667%}.col-xxl-9{flex:0 0 auto;width:75%}.col-xxl-10{flex:0 0 auto;width:83.3333333333%}.col-xxl-11{flex:0 0 auto;width:91.6666666667%}.col-xxl-12{flex:0 0 auto;width:100%}.offset-xxl-0{margin-left:0}.offset-xxl-1{margin-left:8.3333333333%}.offset-xxl-2{margin-left:16.6666666667%}.offset-xxl-3{margin-left:25%}.offset-xxl-4{margin-left:33.3333333333%}.offset-xxl-5{margin-left:41.6666666667%}.offset-xxl-6{margin-left:50%}.offset-xxl-7{margin-left:58.3333333333%}.offset-xxl-8{margin-left:66.6666666667%}.offset-xxl-9{margin-left:75%}.offset-xxl-10{margin-left:83.3333333333%}.offset-xxl-11{margin-left:91.6666666667%}.g-xxl-0,.gx-xxl-0{--bs-gutter-x:0}.g-xxl-0,.gy-xxl-0{--bs-gutter-y:0}.g-xxl-1,.gx-xxl-1{--bs-gutter-x:0.25rem}.g-xxl-1,.gy-xxl-1{--bs-gutter-y:0.25rem}.g-xxl-2,.gx-xxl-2{--bs-gutter-x:0.5rem}.g-xxl-2,.gy-xxl-2{--bs-gutter-y:0.5rem}.g-xxl-3,.gx-xxl-3{--bs-gutter-x:1rem}.g-xxl-3,.gy-xxl-3{--bs-gutter-y:1rem}.g-xxl-4,.gx-xxl-4{--bs-gutter-x:1.5rem}.g-xxl-4,.gy-xxl-4{--bs-gutter-y:1.5rem}.g-xxl-5,.gx-xxl-5{--bs-gutter-x:3rem}.g-xxl-5,.gy-xxl-5{--bs-gutter-y:3rem}}.table{--bs-table-bg:transparent;--bs-table-accent-bg:transparent;--bs-table-striped-color:#212529;--bs-table-striped-bg:rgba(0, 0, 0, 0.05);--bs-table-active-color:#212529;--bs-table-active-bg:rgba(0, 0, 0, 0.1);--bs-table-hover-color:#212529;--bs-table-hover-bg:rgba(0, 0, 0, 0.075);width:100%;margin-bottom:1rem;color:#212529;vertical-align:top;border-color:#dee2e6}.table>:not(caption)>*>*{padding:.5rem .5rem;background-color:var(--bs-table-bg);border-bottom-width:1px;box-shadow:inset 0 0 0 9999px var(--bs-table-accent-bg)}.table>tbody{vertical-align:inherit}.table>thead{vertical-align:bottom}.table>:not(:last-child)>:last-child>*{border-bottom-color:currentColor}.caption-top{caption-side:top}.table-sm>:not(caption)>*>*{padding:.25rem .25rem}.table-bordered>:not(caption)>*{border-width:1px 0}.table-bordered>:not(caption)>*>*{border-width:0 1px}.table-borderless>:not(caption)>*>*{border-bottom-width:0}.table-striped>tbody>tr:nth-of-type(odd){--bs-table-accent-bg:var(--bs-table-striped-bg);color:var(--bs-table-striped-color)}.table-active{--bs-table-accent-bg:var(--bs-table-active-bg);color:var(--bs-table-active-color)}.table-hover>tbody>tr:hover{--bs-table-accent-bg:var(--bs-table-hover-bg);color:var(--bs-table-hover-color)}.table-primary{--bs-table-bg:#cfe2ff;--bs-table-striped-bg:#c5d7f2;--bs-table-striped-color:#000;--bs-table-active-bg:#bacbe6;--bs-table-active-color:#000;--bs-table-hover-bg:#bfd1ec;--bs-table-hover-color:#000;color:#000;border-color:#bacbe6}.table-secondary{--bs-table-bg:#e2e3e5;--bs-table-striped-bg:#d7d8da;--bs-table-striped-color:#000;--bs-table-active-bg:#cbccce;--bs-table-active-color:#000;--bs-table-hover-bg:#d1d2d4;--bs-table-hover-color:#000;color:#000;border-color:#cbccce}.table-success{--bs-table-bg:#d1e7dd;--bs-table-striped-bg:#c7dbd2;--bs-table-striped-color:#000;--bs-table-active-bg:#bcd0c7;--bs-table-active-color:#000;--bs-table-hover-bg:#c1d6cc;--bs-table-hover-color:#000;color:#000;border-color:#bcd0c7}.table-info{--bs-table-bg:#cff4fc;--bs-table-striped-bg:#c5e8ef;--bs-table-striped-color:#000;--bs-table-active-bg:#badce3;--bs-table-active-color:#000;--bs-table-hover-bg:#bfe2e9;--bs-table-hover-color:#000;color:#000;border-color:#badce3}.table-warning{--bs-table-bg:#fff3cd;--bs-table-striped-bg:#f2e7c3;--bs-table-striped-color:#000;--bs-table-active-bg:#e6dbb9;--bs-table-active-color:#000;--bs-table-hover-bg:#ece1be;--bs-table-hover-color:#000;color:#000;border-color:#e6dbb9}.table-danger{--bs-table-bg:#f8d7da;--bs-table-striped-bg:#eccccf;--bs-table-striped-color:#000;--bs-table-active-bg:#dfc2c4;--bs-table-active-color:#000;--bs-table-hover-bg:#e5c7ca;--bs-table-hover-color:#000;color:#000;border-color:#dfc2c4}.table-light{--bs-table-bg:#f8f9fa;--bs-table-striped-bg:#ecedee;--bs-table-striped-color:#000;--bs-table-active-bg:#dfe0e1;--bs-table-active-color:#000;--bs-table-hover-bg:#e5e6e7;--bs-table-hover-color:#000;color:#000;border-color:#dfe0e1}.table-dark{--bs-table-bg:#212529;--bs-table-striped-bg:#2c3034;--bs-table-striped-color:#fff;--bs-table-active-bg:#373b3e;--bs-table-active-color:#fff;--bs-table-hover-bg:#323539;--bs-table-hover-color:#fff;color:#fff;border-color:#373b3e}.table-responsive{overflow-x:auto;-webkit-overflow-scrolling:touch}@media (max-width:575.98px){.table-responsive-sm{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:767.98px){.table-responsive-md{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:991.98px){.table-responsive-lg{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:1199.98px){.table-responsive-xl{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:1399.98px){.table-responsive-xxl{overflow-x:auto;-webkit-overflow-scrolling:touch}}.form-label{margin-bottom:.5rem}.col-form-label{padding-top:calc(.375rem + 1px);padding-bottom:calc(.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + 1px);padding-bottom:calc(.5rem + 1px);font-size:1.25rem}.col-form-label-sm{padding-top:calc(.25rem + 1px);padding-bottom:calc(.25rem + 1px);font-size:.875rem}.form-text{margin-top:.25rem;font-size:.875em;color:#6c757d}.form-control{display:block;width:100%;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#212529;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;-webkit-appearance:none;-moz-appearance:none;appearance:none;border-radius:.25rem;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control{transition:none}}.form-control[type=file]{overflow:hidden}.form-control[type=file]:not(:disabled):not([readonly]){cursor:pointer}.form-control:focus{color:#212529;background-color:#fff;border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-control::-webkit-date-and-time-value{height:1.5em}.form-control::-moz-placeholder{color:#6c757d;opacity:1}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}.form-control::file-selector-button{padding:.375rem .75rem;margin:-.375rem -.75rem;-webkit-margin-end:.75rem;margin-inline-end:.75rem;color:#212529;background-color:#e9ecef;pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:1px;border-radius:0;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control::file-selector-button{transition:none}}.form-control:hover:not(:disabled):not([readonly])::file-selector-button{background-color:#dde0e3}.form-control::-webkit-file-upload-button{padding:.375rem .75rem;margin:-.375rem -.75rem;-webkit-margin-end:.75rem;margin-inline-end:.75rem;color:#212529;background-color:#e9ecef;pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:1px;border-radius:0;-webkit-transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control::-webkit-file-upload-button{-webkit-transition:none;transition:none}}.form-control:hover:not(:disabled):not([readonly])::-webkit-file-upload-button{background-color:#dde0e3}.form-control-plaintext{display:block;width:100%;padding:.375rem 0;margin-bottom:0;line-height:1.5;color:#212529;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-lg,.form-control-plaintext.form-control-sm{padding-right:0;padding-left:0}.form-control-sm{min-height:calc(1.5em + .5rem + 2px);padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.form-control-sm::file-selector-button{padding:.25rem .5rem;margin:-.25rem -.5rem;-webkit-margin-end:.5rem;margin-inline-end:.5rem}.form-control-sm::-webkit-file-upload-button{padding:.25rem .5rem;margin:-.25rem -.5rem;-webkit-margin-end:.5rem;margin-inline-end:.5rem}.form-control-lg{min-height:calc(1.5em + 1rem + 2px);padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.form-control-lg::file-selector-button{padding:.5rem 1rem;margin:-.5rem -1rem;-webkit-margin-end:1rem;margin-inline-end:1rem}.form-control-lg::-webkit-file-upload-button{padding:.5rem 1rem;margin:-.5rem -1rem;-webkit-margin-end:1rem;margin-inline-end:1rem}textarea.form-control{min-height:calc(1.5em + .75rem + 2px)}textarea.form-control-sm{min-height:calc(1.5em + .5rem + 2px)}textarea.form-control-lg{min-height:calc(1.5em + 1rem + 2px)}.form-control-color{max-width:3rem;height:auto;padding:.375rem}.form-control-color:not(:disabled):not([readonly]){cursor:pointer}.form-control-color::-moz-color-swatch{height:1.5em;border-radius:.25rem}.form-control-color::-webkit-color-swatch{height:1.5em;border-radius:.25rem}.form-select{display:block;width:100%;padding:.375rem 2.25rem .375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#212529;background-color:#fff;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right .75rem center;background-size:16px 12px;border:1px solid #ced4da;border-radius:.25rem;-webkit-appearance:none;-moz-appearance:none;appearance:none}.form-select:focus{border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-select[multiple],.form-select[size]:not([size="1"]){padding-right:.75rem;background-image:none}.form-select:disabled{background-color:#e9ecef}.form-select:-moz-focusring{color:transparent;text-shadow:0 0 0 #212529}.form-select-sm{padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:.875rem}.form-select-lg{padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.25rem}.form-check{display:block;min-height:1.5rem;padding-left:1.5em;margin-bottom:.125rem}.form-check .form-check-input{float:left;margin-left:-1.5em}.form-check-input{width:1em;height:1em;margin-top:.25em;vertical-align:top;background-color:#fff;background-repeat:no-repeat;background-position:center;background-size:contain;border:1px solid rgba(0,0,0,.25);-webkit-appearance:none;-moz-appearance:none;appearance:none;-webkit-print-color-adjust:exact;color-adjust:exact}.form-check-input[type=checkbox]{border-radius:.25em}.form-check-input[type=radio]{border-radius:50%}.form-check-input:active{filter:brightness(90%)}.form-check-input:focus{border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-check-input:checked{background-color:#0d6efd;border-color:#0d6efd}.form-check-input:checked[type=checkbox]{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10l3 3l6-6'/%3e%3c/svg%3e")}.form-check-input:checked[type=radio]{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='2' fill='%23fff'/%3e%3c/svg%3e")}.form-check-input[type=checkbox]:indeterminate{background-color:#0d6efd;border-color:#0d6efd;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10h8'/%3e%3c/svg%3e")}.form-check-input:disabled{pointer-events:none;filter:none;opacity:.5}.form-check-input:disabled~.form-check-label,.form-check-input[disabled]~.form-check-label{opacity:.5}.form-switch{padding-left:2.5em}.form-switch .form-check-input{width:2em;margin-left:-2.5em;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='rgba%280, 0, 0, 0.25%29'/%3e%3c/svg%3e");background-position:left center;border-radius:2em;transition:background-position .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-switch .form-check-input{transition:none}}.form-switch .form-check-input:focus{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%2386b7fe'/%3e%3c/svg%3e")}.form-switch .form-check-input:checked{background-position:right center;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%23fff'/%3e%3c/svg%3e")}.form-check-inline{display:inline-block;margin-right:1rem}.btn-check{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.btn-check:disabled+.btn,.btn-check[disabled]+.btn{pointer-events:none;filter:none;opacity:.65}.form-range{width:100%;height:1.5rem;padding:0;background-color:transparent;-webkit-appearance:none;-moz-appearance:none;appearance:none}.form-range:focus{outline:0}.form-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(13,110,253,.25)}.form-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(13,110,253,.25)}.form-range::-moz-focus-outer{border:0}.form-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-.25rem;background-color:#0d6efd;border:0;border-radius:1rem;-webkit-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-webkit-appearance:none;appearance:none}@media (prefers-reduced-motion:reduce){.form-range::-webkit-slider-thumb{-webkit-transition:none;transition:none}}.form-range::-webkit-slider-thumb:active{background-color:#b6d4fe}.form-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.form-range::-moz-range-thumb{width:1rem;height:1rem;background-color:#0d6efd;border:0;border-radius:1rem;-moz-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-moz-appearance:none;appearance:none}@media (prefers-reduced-motion:reduce){.form-range::-moz-range-thumb{-moz-transition:none;transition:none}}.form-range::-moz-range-thumb:active{background-color:#b6d4fe}.form-range::-moz-range-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.form-range:disabled{pointer-events:none}.form-range:disabled::-webkit-slider-thumb{background-color:#adb5bd}.form-range:disabled::-moz-range-thumb{background-color:#adb5bd}.form-floating{position:relative}.form-floating>.form-control,.form-floating>.form-select{height:calc(3.5rem + 2px);padding:1rem .75rem}.form-floating>label{position:absolute;top:0;left:0;height:100%;padding:1rem .75rem;pointer-events:none;border:1px solid transparent;transform-origin:0 0;transition:opacity .1s ease-in-out,transform .1s ease-in-out}@media (prefers-reduced-motion:reduce){.form-floating>label{transition:none}}.form-floating>.form-control::-moz-placeholder{color:transparent}.form-floating>.form-control::placeholder{color:transparent}.form-floating>.form-control:not(:-moz-placeholder-shown){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:focus,.form-floating>.form-control:not(:placeholder-shown){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:-webkit-autofill{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-select{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:not(:-moz-placeholder-shown)~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control:focus~label,.form-floating>.form-control:not(:placeholder-shown)~label,.form-floating>.form-select~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control:-webkit-autofill~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.input-group{position:relative;display:flex;flex-wrap:wrap;align-items:stretch;width:100%}.input-group>.form-control,.input-group>.form-select{position:relative;flex:1 1 auto;width:1%;min-width:0}.input-group>.form-control:focus,.input-group>.form-select:focus{z-index:3}.input-group .btn{position:relative;z-index:2}.input-group .btn:focus{z-index:3}.input-group-text{display:flex;align-items:center;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:.25rem}.input-group-lg>.btn,.input-group-lg>.form-control,.input-group-lg>.form-select,.input-group-lg>.input-group-text{padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.input-group-sm>.btn,.input-group-sm>.form-control,.input-group-sm>.form-select,.input-group-sm>.input-group-text{padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.input-group-lg>.form-select,.input-group-sm>.form-select{padding-right:3rem}.input-group:not(.has-validation)>.dropdown-toggle:nth-last-child(n+3),.input-group:not(.has-validation)>:not(:last-child):not(.dropdown-toggle):not(.dropdown-menu){border-top-right-radius:0;border-bottom-right-radius:0}.input-group.has-validation>.dropdown-toggle:nth-last-child(n+4),.input-group.has-validation>:nth-last-child(n+3):not(.dropdown-toggle):not(.dropdown-menu){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>:not(:first-child):not(.dropdown-menu):not(.valid-tooltip):not(.valid-feedback):not(.invalid-tooltip):not(.invalid-feedback){margin-left:-1px;border-top-left-radius:0;border-bottom-left-radius:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:.875em;color:#198754}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;color:#fff;background-color:rgba(25,135,84,.9);border-radius:.25rem}.is-valid~.valid-feedback,.is-valid~.valid-tooltip,.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip{display:block}.form-control.is-valid,.was-validated .form-control:valid{border-color:#198754;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-valid:focus,.was-validated .form-control:valid:focus{border-color:#198754;box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.form-select.is-valid,.was-validated .form-select:valid{border-color:#198754}.form-select.is-valid:not([multiple]):not([size]),.form-select.is-valid:not([multiple])[size="1"],.was-validated .form-select:valid:not([multiple]):not([size]),.was-validated .form-select:valid:not([multiple])[size="1"]{padding-right:4.125rem;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3e%3c/svg%3e"),url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem)}.form-select.is-valid:focus,.was-validated .form-select:valid:focus{border-color:#198754;box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.form-check-input.is-valid,.was-validated .form-check-input:valid{border-color:#198754}.form-check-input.is-valid:checked,.was-validated .form-check-input:valid:checked{background-color:#198754}.form-check-input.is-valid:focus,.was-validated .form-check-input:valid:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.form-check-input.is-valid~.form-check-label,.was-validated .form-check-input:valid~.form-check-label{color:#198754}.form-check-inline .form-check-input~.valid-feedback{margin-left:.5em}.input-group .form-control.is-valid,.input-group .form-select.is-valid,.was-validated .input-group .form-control:valid,.was-validated .input-group .form-select:valid{z-index:1}.input-group .form-control.is-valid:focus,.input-group .form-select.is-valid:focus,.was-validated .input-group .form-control:valid:focus,.was-validated .input-group .form-select:valid:focus{z-index:3}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:.875em;color:#dc3545}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;color:#fff;background-color:rgba(220,53,69,.9);border-radius:.25rem}.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip,.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip{display:block}.form-control.is-invalid,.was-validated .form-control:invalid{border-color:#dc3545;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23dc3545'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-invalid:focus,.was-validated .form-control:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.form-select.is-invalid,.was-validated .form-select:invalid{border-color:#dc3545}.form-select.is-invalid:not([multiple]):not([size]),.form-select.is-invalid:not([multiple])[size="1"],.was-validated .form-select:invalid:not([multiple]):not([size]),.was-validated .form-select:invalid:not([multiple])[size="1"]{padding-right:4.125rem;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3e%3c/svg%3e"),url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23dc3545'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e");background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem)}.form-select.is-invalid:focus,.was-validated .form-select:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.form-check-input.is-invalid,.was-validated .form-check-input:invalid{border-color:#dc3545}.form-check-input.is-invalid:checked,.was-validated .form-check-input:invalid:checked{background-color:#dc3545}.form-check-input.is-invalid:focus,.was-validated .form-check-input:invalid:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.form-check-input.is-invalid~.form-check-label,.was-validated .form-check-input:invalid~.form-check-label{color:#dc3545}.form-check-inline .form-check-input~.invalid-feedback{margin-left:.5em}.input-group .form-control.is-invalid,.input-group .form-select.is-invalid,.was-validated .input-group .form-control:invalid,.was-validated .input-group .form-select:invalid{z-index:2}.input-group .form-control.is-invalid:focus,.input-group .form-select.is-invalid:focus,.was-validated .input-group .form-control:invalid:focus,.was-validated .input-group .form-select:invalid:focus{z-index:3}.btn{display:inline-block;font-weight:400;line-height:1.5;color:#212529;text-align:center;text-decoration:none;vertical-align:middle;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none;background-color:transparent;border:1px solid transparent;padding:.375rem .75rem;font-size:1rem;border-radius:.25rem;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.btn{transition:none}}.btn:hover{color:#212529}.btn-check:focus+.btn,.btn:focus{outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.btn.disabled,.btn:disabled,fieldset:disabled .btn{pointer-events:none;opacity:.65}.btn-primary{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-primary:hover{color:#fff;background-color:#0b5ed7;border-color:#0a58ca}.btn-check:focus+.btn-primary,.btn-primary:focus{color:#fff;background-color:#0b5ed7;border-color:#0a58ca;box-shadow:0 0 0 .25rem rgba(49,132,253,.5)}.btn-check:active+.btn-primary,.btn-check:checked+.btn-primary,.btn-primary.active,.btn-primary:active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#0a58ca;border-color:#0a53be}.btn-check:active+.btn-primary:focus,.btn-check:checked+.btn-primary:focus,.btn-primary.active:focus,.btn-primary:active:focus,.show>.btn-primary.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(49,132,253,.5)}.btn-primary.disabled,.btn-primary:disabled{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-secondary{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:hover{color:#fff;background-color:#5c636a;border-color:#565e64}.btn-check:focus+.btn-secondary,.btn-secondary:focus{color:#fff;background-color:#5c636a;border-color:#565e64;box-shadow:0 0 0 .25rem rgba(130,138,145,.5)}.btn-check:active+.btn-secondary,.btn-check:checked+.btn-secondary,.btn-secondary.active,.btn-secondary:active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#565e64;border-color:#51585e}.btn-check:active+.btn-secondary:focus,.btn-check:checked+.btn-secondary:focus,.btn-secondary.active:focus,.btn-secondary:active:focus,.show>.btn-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(130,138,145,.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-success{color:#fff;background-color:#198754;border-color:#198754}.btn-success:hover{color:#fff;background-color:#157347;border-color:#146c43}.btn-check:focus+.btn-success,.btn-success:focus{color:#fff;background-color:#157347;border-color:#146c43;box-shadow:0 0 0 .25rem rgba(60,153,110,.5)}.btn-check:active+.btn-success,.btn-check:checked+.btn-success,.btn-success.active,.btn-success:active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#146c43;border-color:#13653f}.btn-check:active+.btn-success:focus,.btn-check:checked+.btn-success:focus,.btn-success.active:focus,.btn-success:active:focus,.show>.btn-success.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(60,153,110,.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#198754;border-color:#198754}.btn-info{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-info:hover{color:#000;background-color:#31d2f2;border-color:#25cff2}.btn-check:focus+.btn-info,.btn-info:focus{color:#000;background-color:#31d2f2;border-color:#25cff2;box-shadow:0 0 0 .25rem rgba(11,172,204,.5)}.btn-check:active+.btn-info,.btn-check:checked+.btn-info,.btn-info.active,.btn-info:active,.show>.btn-info.dropdown-toggle{color:#000;background-color:#3dd5f3;border-color:#25cff2}.btn-check:active+.btn-info:focus,.btn-check:checked+.btn-info:focus,.btn-info.active:focus,.btn-info:active:focus,.show>.btn-info.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(11,172,204,.5)}.btn-info.disabled,.btn-info:disabled{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-warning{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-warning:hover{color:#000;background-color:#ffca2c;border-color:#ffc720}.btn-check:focus+.btn-warning,.btn-warning:focus{color:#000;background-color:#ffca2c;border-color:#ffc720;box-shadow:0 0 0 .25rem rgba(217,164,6,.5)}.btn-check:active+.btn-warning,.btn-check:checked+.btn-warning,.btn-warning.active,.btn-warning:active,.show>.btn-warning.dropdown-toggle{color:#000;background-color:#ffcd39;border-color:#ffc720}.btn-check:active+.btn-warning:focus,.btn-check:checked+.btn-warning:focus,.btn-warning.active:focus,.btn-warning:active:focus,.show>.btn-warning.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(217,164,6,.5)}.btn-warning.disabled,.btn-warning:disabled{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:hover{color:#fff;background-color:#bb2d3b;border-color:#b02a37}.btn-check:focus+.btn-danger,.btn-danger:focus{color:#fff;background-color:#bb2d3b;border-color:#b02a37;box-shadow:0 0 0 .25rem rgba(225,83,97,.5)}.btn-check:active+.btn-danger,.btn-check:checked+.btn-danger,.btn-danger.active,.btn-danger:active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#b02a37;border-color:#a52834}.btn-check:active+.btn-danger:focus,.btn-check:checked+.btn-danger:focus,.btn-danger.active:focus,.btn-danger:active:focus,.show>.btn-danger.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(225,83,97,.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-light{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:hover{color:#000;background-color:#f9fafb;border-color:#f9fafb}.btn-check:focus+.btn-light,.btn-light:focus{color:#000;background-color:#f9fafb;border-color:#f9fafb;box-shadow:0 0 0 .25rem rgba(211,212,213,.5)}.btn-check:active+.btn-light,.btn-check:checked+.btn-light,.btn-light.active,.btn-light:active,.show>.btn-light.dropdown-toggle{color:#000;background-color:#f9fafb;border-color:#f9fafb}.btn-check:active+.btn-light:focus,.btn-check:checked+.btn-light:focus,.btn-light.active:focus,.btn-light:active:focus,.show>.btn-light.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(211,212,213,.5)}.btn-light.disabled,.btn-light:disabled{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-dark{color:#fff;background-color:#212529;border-color:#212529}.btn-dark:hover{color:#fff;background-color:#1c1f23;border-color:#1a1e21}.btn-check:focus+.btn-dark,.btn-dark:focus{color:#fff;background-color:#1c1f23;border-color:#1a1e21;box-shadow:0 0 0 .25rem rgba(66,70,73,.5)}.btn-check:active+.btn-dark,.btn-check:checked+.btn-dark,.btn-dark.active,.btn-dark:active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#1a1e21;border-color:#191c1f}.btn-check:active+.btn-dark:focus,.btn-check:checked+.btn-dark:focus,.btn-dark.active:focus,.btn-dark:active:focus,.show>.btn-dark.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(66,70,73,.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-primary{color:#0d6efd;border-color:#0d6efd}.btn-outline-primary:hover{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-check:focus+.btn-outline-primary,.btn-outline-primary:focus{box-shadow:0 0 0 .25rem rgba(13,110,253,.5)}.btn-check:active+.btn-outline-primary,.btn-check:checked+.btn-outline-primary,.btn-outline-primary.active,.btn-outline-primary.dropdown-toggle.show,.btn-outline-primary:active{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-check:active+.btn-outline-primary:focus,.btn-check:checked+.btn-outline-primary:focus,.btn-outline-primary.active:focus,.btn-outline-primary.dropdown-toggle.show:focus,.btn-outline-primary:active:focus{box-shadow:0 0 0 .25rem rgba(13,110,253,.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#0d6efd;background-color:transparent}.btn-outline-secondary{color:#6c757d;border-color:#6c757d}.btn-outline-secondary:hover{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-check:focus+.btn-outline-secondary,.btn-outline-secondary:focus{box-shadow:0 0 0 .25rem rgba(108,117,125,.5)}.btn-check:active+.btn-outline-secondary,.btn-check:checked+.btn-outline-secondary,.btn-outline-secondary.active,.btn-outline-secondary.dropdown-toggle.show,.btn-outline-secondary:active{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-check:active+.btn-outline-secondary:focus,.btn-check:checked+.btn-outline-secondary:focus,.btn-outline-secondary.active:focus,.btn-outline-secondary.dropdown-toggle.show:focus,.btn-outline-secondary:active:focus{box-shadow:0 0 0 .25rem rgba(108,117,125,.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#6c757d;background-color:transparent}.btn-outline-success{color:#198754;border-color:#198754}.btn-outline-success:hover{color:#fff;background-color:#198754;border-color:#198754}.btn-check:focus+.btn-outline-success,.btn-outline-success:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,.5)}.btn-check:active+.btn-outline-success,.btn-check:checked+.btn-outline-success,.btn-outline-success.active,.btn-outline-success.dropdown-toggle.show,.btn-outline-success:active{color:#fff;background-color:#198754;border-color:#198754}.btn-check:active+.btn-outline-success:focus,.btn-check:checked+.btn-outline-success:focus,.btn-outline-success.active:focus,.btn-outline-success.dropdown-toggle.show:focus,.btn-outline-success:active:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#198754;background-color:transparent}.btn-outline-info{color:#0dcaf0;border-color:#0dcaf0}.btn-outline-info:hover{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-check:focus+.btn-outline-info,.btn-outline-info:focus{box-shadow:0 0 0 .25rem rgba(13,202,240,.5)}.btn-check:active+.btn-outline-info,.btn-check:checked+.btn-outline-info,.btn-outline-info.active,.btn-outline-info.dropdown-toggle.show,.btn-outline-info:active{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-check:active+.btn-outline-info:focus,.btn-check:checked+.btn-outline-info:focus,.btn-outline-info.active:focus,.btn-outline-info.dropdown-toggle.show:focus,.btn-outline-info:active:focus{box-shadow:0 0 0 .25rem rgba(13,202,240,.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#0dcaf0;background-color:transparent}.btn-outline-warning{color:#ffc107;border-color:#ffc107}.btn-outline-warning:hover{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-check:focus+.btn-outline-warning,.btn-outline-warning:focus{box-shadow:0 0 0 .25rem rgba(255,193,7,.5)}.btn-check:active+.btn-outline-warning,.btn-check:checked+.btn-outline-warning,.btn-outline-warning.active,.btn-outline-warning.dropdown-toggle.show,.btn-outline-warning:active{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-check:active+.btn-outline-warning:focus,.btn-check:checked+.btn-outline-warning:focus,.btn-outline-warning.active:focus,.btn-outline-warning.dropdown-toggle.show:focus,.btn-outline-warning:active:focus{box-shadow:0 0 0 .25rem rgba(255,193,7,.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#ffc107;background-color:transparent}.btn-outline-danger{color:#dc3545;border-color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-check:focus+.btn-outline-danger,.btn-outline-danger:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,.5)}.btn-check:active+.btn-outline-danger,.btn-check:checked+.btn-outline-danger,.btn-outline-danger.active,.btn-outline-danger.dropdown-toggle.show,.btn-outline-danger:active{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-check:active+.btn-outline-danger:focus,.btn-check:checked+.btn-outline-danger:focus,.btn-outline-danger.active:focus,.btn-outline-danger.dropdown-toggle.show:focus,.btn-outline-danger:active:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#dc3545;background-color:transparent}.btn-outline-light{color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:hover{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-check:focus+.btn-outline-light,.btn-outline-light:focus{box-shadow:0 0 0 .25rem rgba(248,249,250,.5)}.btn-check:active+.btn-outline-light,.btn-check:checked+.btn-outline-light,.btn-outline-light.active,.btn-outline-light.dropdown-toggle.show,.btn-outline-light:active{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-check:active+.btn-outline-light:focus,.btn-check:checked+.btn-outline-light:focus,.btn-outline-light.active:focus,.btn-outline-light.dropdown-toggle.show:focus,.btn-outline-light:active:focus{box-shadow:0 0 0 .25rem rgba(248,249,250,.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f8f9fa;background-color:transparent}.btn-outline-dark{color:#212529;border-color:#212529}.btn-outline-dark:hover{color:#fff;background-color:#212529;border-color:#212529}.btn-check:focus+.btn-outline-dark,.btn-outline-dark:focus{box-shadow:0 0 0 .25rem rgba(33,37,41,.5)}.btn-check:active+.btn-outline-dark,.btn-check:checked+.btn-outline-dark,.btn-outline-dark.active,.btn-outline-dark.dropdown-toggle.show,.btn-outline-dark:active{color:#fff;background-color:#212529;border-color:#212529}.btn-check:active+.btn-outline-dark:focus,.btn-check:checked+.btn-outline-dark:focus,.btn-outline-dark.active:focus,.btn-outline-dark.dropdown-toggle.show:focus,.btn-outline-dark:active:focus{box-shadow:0 0 0 .25rem rgba(33,37,41,.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#212529;background-color:transparent}.btn-link{font-weight:400;color:#0d6efd;text-decoration:underline}.btn-link:hover{color:#0a58ca}.btn-link.disabled,.btn-link:disabled{color:#6c757d}.btn-group-lg>.btn,.btn-lg{padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.btn-group-sm>.btn,.btn-sm{padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.fade{transition:opacity .15s linear}@media (prefers-reduced-motion:reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{height:0;overflow:hidden;transition:height .35s ease}@media (prefers-reduced-motion:reduce){.collapsing{transition:none}}.dropdown,.dropend,.dropstart,.dropup{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-bottom:0;border-left:.3em solid transparent}.dropdown-toggle:empty::after{margin-left:0}.dropdown-menu{position:absolute;z-index:1000;display:none;min-width:10rem;padding:.5rem 0;margin:0;font-size:1rem;color:#212529;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.15);border-radius:.25rem}.dropdown-menu[data-bs-popper]{top:100%;left:0;margin-top:.125rem}.dropdown-menu-start{--bs-position:start}.dropdown-menu-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-end{--bs-position:end}.dropdown-menu-end[data-bs-popper]{right:0;left:auto}@media (min-width:576px){.dropdown-menu-sm-start{--bs-position:start}.dropdown-menu-sm-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-sm-end{--bs-position:end}.dropdown-menu-sm-end[data-bs-popper]{right:0;left:auto}}@media (min-width:768px){.dropdown-menu-md-start{--bs-position:start}.dropdown-menu-md-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-md-end{--bs-position:end}.dropdown-menu-md-end[data-bs-popper]{right:0;left:auto}}@media (min-width:992px){.dropdown-menu-lg-start{--bs-position:start}.dropdown-menu-lg-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-lg-end{--bs-position:end}.dropdown-menu-lg-end[data-bs-popper]{right:0;left:auto}}@media (min-width:1200px){.dropdown-menu-xl-start{--bs-position:start}.dropdown-menu-xl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xl-end{--bs-position:end}.dropdown-menu-xl-end[data-bs-popper]{right:0;left:auto}}@media (min-width:1400px){.dropdown-menu-xxl-start{--bs-position:start}.dropdown-menu-xxl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xxl-end{--bs-position:end}.dropdown-menu-xxl-end[data-bs-popper]{right:0;left:auto}}.dropup .dropdown-menu[data-bs-popper]{top:auto;bottom:100%;margin-top:0;margin-bottom:.125rem}.dropup .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid transparent;border-bottom:.3em solid;border-left:.3em solid transparent}.dropup .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-menu[data-bs-popper]{top:0;right:auto;left:100%;margin-top:0;margin-left:.125rem}.dropend .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:0;border-bottom:.3em solid transparent;border-left:.3em solid}.dropend .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-toggle::after{vertical-align:0}.dropstart .dropdown-menu[data-bs-popper]{top:0;right:100%;left:auto;margin-top:0;margin-right:.125rem}.dropstart .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:""}.dropstart .dropdown-toggle::after{display:none}.dropstart .dropdown-toggle::before{display:inline-block;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:.3em solid;border-bottom:.3em solid transparent}.dropstart .dropdown-toggle:empty::after{margin-left:0}.dropstart .dropdown-toggle::before{vertical-align:0}.dropdown-divider{height:0;margin:.5rem 0;overflow:hidden;border-top:1px solid rgba(0,0,0,.15)}.dropdown-item{display:block;width:100%;padding:.25rem 1rem;clear:both;font-weight:400;color:#212529;text-align:inherit;text-decoration:none;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:focus,.dropdown-item:hover{color:#1e2125;background-color:#e9ecef}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#0d6efd}.dropdown-item.disabled,.dropdown-item:disabled{color:#adb5bd;pointer-events:none;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:.5rem 1rem;margin-bottom:0;font-size:.875rem;color:#6c757d;white-space:nowrap}.dropdown-item-text{display:block;padding:.25rem 1rem;color:#212529}.dropdown-menu-dark{color:#dee2e6;background-color:#343a40;border-color:rgba(0,0,0,.15)}.dropdown-menu-dark .dropdown-item{color:#dee2e6}.dropdown-menu-dark .dropdown-item:focus,.dropdown-menu-dark .dropdown-item:hover{color:#fff;background-color:rgba(255,255,255,.15)}.dropdown-menu-dark .dropdown-item.active,.dropdown-menu-dark .dropdown-item:active{color:#fff;background-color:#0d6efd}.dropdown-menu-dark .dropdown-item.disabled,.dropdown-menu-dark .dropdown-item:disabled{color:#adb5bd}.dropdown-menu-dark .dropdown-divider{border-color:rgba(0,0,0,.15)}.dropdown-menu-dark .dropdown-item-text{color:#dee2e6}.dropdown-menu-dark .dropdown-header{color:#adb5bd}.btn-group,.btn-group-vertical{position:relative;display:inline-flex;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;flex:1 1 auto}.btn-group-vertical>.btn-check:checked+.btn,.btn-group-vertical>.btn-check:focus+.btn,.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn-check:checked+.btn,.btn-group>.btn-check:focus+.btn,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:1}.btn-toolbar{display:flex;flex-wrap:wrap;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn-group:not(:first-child),.btn-group>.btn:not(:first-child){margin-left:-1px}.btn-group>.btn-group:not(:last-child)>.btn,.btn-group>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:not(:first-child)>.btn,.btn-group>.btn:nth-child(n+3),.btn-group>:not(.btn-check)+.btn{border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:.5625rem;padding-left:.5625rem}.dropdown-toggle-split::after,.dropend .dropdown-toggle-split::after,.dropup .dropdown-toggle-split::after{margin-left:0}.dropstart .dropdown-toggle-split::before{margin-right:0}.btn-group-sm>.btn+.dropdown-toggle-split,.btn-sm+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-group-lg>.btn+.dropdown-toggle-split,.btn-lg+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn-group-vertical{flex-direction:column;align-items:flex-start;justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn-group:not(:first-child),.btn-group-vertical>.btn:not(:first-child){margin-top:-1px}.btn-group-vertical>.btn-group:not(:last-child)>.btn,.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child)>.btn,.btn-group-vertical>.btn~.btn{border-top-left-radius:0;border-top-right-radius:0}.nav{display:flex;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:.5rem 1rem;color:#0d6efd;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out}@media (prefers-reduced-motion:reduce){.nav-link{transition:none}}.nav-link:focus,.nav-link:hover{color:#0a58ca}.nav-link.disabled{color:#6c757d;pointer-events:none;cursor:default}.nav-tabs{border-bottom:1px solid #dee2e6}.nav-tabs .nav-link{margin-bottom:-1px;background:0 0;border:1px solid transparent;border-top-left-radius:.25rem;border-top-right-radius:.25rem}.nav-tabs .nav-link:focus,.nav-tabs .nav-link:hover{border-color:#e9ecef #e9ecef #dee2e6;isolation:isolate}.nav-tabs .nav-link.disabled{color:#6c757d;background-color:transparent;border-color:transparent}.nav-tabs .nav-item.show .nav-link,.nav-tabs .nav-link.active{color:#495057;background-color:#fff;border-color:#dee2e6 #dee2e6 #fff}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{background:0 0;border:0;border-radius:.25rem}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#0d6efd}.nav-fill .nav-item,.nav-fill>.nav-link{flex:1 1 auto;text-align:center}.nav-justified .nav-item,.nav-justified>.nav-link{flex-basis:0;flex-grow:1;text-align:center}.nav-fill .nav-item .nav-link,.nav-justified .nav-item .nav-link{width:100%}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;display:flex;flex-wrap:wrap;align-items:center;justify-content:space-between;padding-top:.5rem;padding-bottom:.5rem}.navbar>.container,.navbar>.container-fluid,.navbar>.container-lg,.navbar>.container-md,.navbar>.container-sm,.navbar>.container-xl,.navbar>.container-xxl{display:flex;flex-wrap:inherit;align-items:center;justify-content:space-between}.navbar-brand{padding-top:.3125rem;padding-bottom:.3125rem;margin-right:1rem;font-size:1.25rem;text-decoration:none;white-space:nowrap}.navbar-nav{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static}.navbar-text{padding-top:.5rem;padding-bottom:.5rem}.navbar-collapse{flex-basis:100%;flex-grow:1;align-items:center}.navbar-toggler{padding:.25rem .75rem;font-size:1.25rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:.25rem;transition:box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.navbar-toggler{transition:none}}.navbar-toggler:hover{text-decoration:none}.navbar-toggler:focus{text-decoration:none;outline:0;box-shadow:0 0 0 .25rem}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;background-repeat:no-repeat;background-position:center;background-size:100%}.navbar-nav-scroll{max-height:var(--bs-scroll-height,75vh);overflow-y:auto}@media (min-width:576px){.navbar-expand-sm{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-sm .navbar-nav-scroll{overflow:visible}.navbar-expand-sm .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}}@media (min-width:768px){.navbar-expand-md{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-md .navbar-nav-scroll{overflow:visible}.navbar-expand-md .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}}@media (min-width:992px){.navbar-expand-lg{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-lg .navbar-nav-scroll{overflow:visible}.navbar-expand-lg .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}}@media (min-width:1200px){.navbar-expand-xl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xl .navbar-nav-scroll{overflow:visible}.navbar-expand-xl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}}@media (min-width:1400px){.navbar-expand-xxl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xxl .navbar-nav{flex-direction:row}.navbar-expand-xxl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xxl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xxl .navbar-nav-scroll{overflow:visible}.navbar-expand-xxl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xxl .navbar-toggler{display:none}}.navbar-expand{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand .navbar-nav-scroll{overflow:visible}.navbar-expand .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-light .navbar-brand{color:rgba(0,0,0,.9)}.navbar-light .navbar-brand:focus,.navbar-light .navbar-brand:hover{color:rgba(0,0,0,.9)}.navbar-light .navbar-nav .nav-link{color:rgba(0,0,0,.55)}.navbar-light .navbar-nav .nav-link:focus,.navbar-light .navbar-nav .nav-link:hover{color:rgba(0,0,0,.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(0,0,0,.3)}.navbar-light .navbar-nav .nav-link.active,.navbar-light .navbar-nav .show>.nav-link{color:rgba(0,0,0,.9)}.navbar-light .navbar-toggler{color:rgba(0,0,0,.55);border-color:rgba(0,0,0,.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%280, 0, 0, 0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.navbar-light .navbar-text{color:rgba(0,0,0,.55)}.navbar-light .navbar-text a,.navbar-light .navbar-text a:focus,.navbar-light .navbar-text a:hover{color:rgba(0,0,0,.9)}.navbar-dark .navbar-brand{color:#fff}.navbar-dark .navbar-brand:focus,.navbar-dark .navbar-brand:hover{color:#fff}.navbar-dark .navbar-nav .nav-link{color:rgba(255,255,255,.55)}.navbar-dark .navbar-nav .nav-link:focus,.navbar-dark .navbar-nav .nav-link:hover{color:rgba(255,255,255,.75)}.navbar-dark .navbar-nav .nav-link.disabled{color:rgba(255,255,255,.25)}.navbar-dark .navbar-nav .nav-link.active,.navbar-dark .navbar-nav .show>.nav-link{color:#fff}.navbar-dark .navbar-toggler{color:rgba(255,255,255,.55);border-color:rgba(255,255,255,.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%28255, 255, 255, 0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.navbar-dark .navbar-text{color:rgba(255,255,255,.55)}.navbar-dark .navbar-text a,.navbar-dark .navbar-text a:focus,.navbar-dark .navbar-text a:hover{color:#fff}.card{position:relative;display:flex;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid rgba(0,0,0,.125);border-radius:.25rem}.card>hr{margin-right:0;margin-left:0}.card>.list-group{border-top:inherit;border-bottom:inherit}.card>.list-group:first-child{border-top-width:0;border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card>.list-group:last-child{border-bottom-width:0;border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card>.card-header+.list-group,.card>.list-group+.card-footer{border-top:0}.card-body{flex:1 1 auto;padding:1rem 1rem}.card-title{margin-bottom:.5rem}.card-subtitle{margin-top:-.25rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:1rem}.card-header{padding:.5rem 1rem;margin-bottom:0;background-color:rgba(0,0,0,.03);border-bottom:1px solid rgba(0,0,0,.125)}.card-header:first-child{border-radius:calc(.25rem - 1px) calc(.25rem - 1px) 0 0}.card-footer{padding:.5rem 1rem;background-color:rgba(0,0,0,.03);border-top:1px solid rgba(0,0,0,.125)}.card-footer:last-child{border-radius:0 0 calc(.25rem - 1px) calc(.25rem - 1px)}.card-header-tabs{margin-right:-.5rem;margin-bottom:-.5rem;margin-left:-.5rem;border-bottom:0}.card-header-pills{margin-right:-.5rem;margin-left:-.5rem}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(.25rem - 1px)}.card-img,.card-img-bottom,.card-img-top{width:100%}.card-img,.card-img-top{border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card-img,.card-img-bottom{border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card-group>.card{margin-bottom:.75rem}@media (min-width:576px){.card-group{display:flex;flex-flow:row wrap}.card-group>.card{flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:not(:last-child) .card-header,.card-group>.card:not(:last-child) .card-img-top{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-footer,.card-group>.card:not(:last-child) .card-img-bottom{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:not(:first-child) .card-header,.card-group>.card:not(:first-child) .card-img-top{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-footer,.card-group>.card:not(:first-child) .card-img-bottom{border-bottom-left-radius:0}}.accordion-button{position:relative;display:flex;align-items:center;width:100%;padding:1rem 1.25rem;font-size:1rem;color:#212529;text-align:left;background-color:#fff;border:0;border-radius:0;overflow-anchor:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out,border-radius .15s ease}@media (prefers-reduced-motion:reduce){.accordion-button{transition:none}}.accordion-button:not(.collapsed){color:#0c63e4;background-color:#e7f1ff;box-shadow:inset 0 -1px 0 rgba(0,0,0,.125)}.accordion-button:not(.collapsed)::after{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%230c63e4'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");transform:rotate(-180deg)}.accordion-button::after{flex-shrink:0;width:1.25rem;height:1.25rem;margin-left:auto;content:"";background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23212529'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");background-repeat:no-repeat;background-size:1.25rem;transition:transform .2s ease-in-out}@media (prefers-reduced-motion:reduce){.accordion-button::after{transition:none}}.accordion-button:hover{z-index:2}.accordion-button:focus{z-index:3;border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.accordion-header{margin-bottom:0}.accordion-item{background-color:#fff;border:1px solid rgba(0,0,0,.125)}.accordion-item:first-of-type{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.accordion-item:first-of-type .accordion-button{border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.accordion-item:not(:first-of-type){border-top:0}.accordion-item:last-of-type{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.accordion-item:last-of-type .accordion-button.collapsed{border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.accordion-item:last-of-type .accordion-collapse{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.accordion-body{padding:1rem 1.25rem}.accordion-flush .accordion-collapse{border-width:0}.accordion-flush .accordion-item{border-right:0;border-left:0;border-radius:0}.accordion-flush .accordion-item:first-child{border-top:0}.accordion-flush .accordion-item:last-child{border-bottom:0}.accordion-flush .accordion-item .accordion-button{border-radius:0}.breadcrumb{display:flex;flex-wrap:wrap;padding:0 0;margin-bottom:1rem;list-style:none}.breadcrumb-item+.breadcrumb-item{padding-left:.5rem}.breadcrumb-item+.breadcrumb-item::before{float:left;padding-right:.5rem;color:#6c757d;content:var(--bs-breadcrumb-divider, "/")}.breadcrumb-item.active{color:#6c757d}.pagination{display:flex;padding-left:0;list-style:none}.page-link{position:relative;display:block;color:#0d6efd;text-decoration:none;background-color:#fff;border:1px solid #dee2e6;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.page-link{transition:none}}.page-link:hover{z-index:2;color:#0a58ca;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:3;color:#0a58ca;background-color:#e9ecef;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.page-item:not(:first-child) .page-link{margin-left:-1px}.page-item.active .page-link{z-index:3;color:#fff;background-color:#0d6efd;border-color:#0d6efd}.page-item.disabled .page-link{color:#6c757d;pointer-events:none;background-color:#fff;border-color:#dee2e6}.page-link{padding:.375rem .75rem}.page-item:first-child .page-link{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem}.page-item:last-child .page-link{border-top-right-radius:.25rem;border-bottom-right-radius:.25rem}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.25rem}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:.3rem;border-bottom-left-radius:.3rem}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:.3rem;border-bottom-right-radius:.3rem}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:.2rem;border-bottom-left-radius:.2rem}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:.2rem;border-bottom-right-radius:.2rem}.badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.alert{position:relative;padding:1rem 1rem;margin-bottom:1rem;border:1px solid transparent;border-radius:.25rem}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:3rem}.alert-dismissible .btn-close{position:absolute;top:0;right:0;z-index:2;padding:1.25rem 1rem}.alert-primary{color:#084298;background-color:#cfe2ff;border-color:#b6d4fe}.alert-primary .alert-link{color:#06357a}.alert-secondary{color:#41464b;background-color:#e2e3e5;border-color:#d3d6d8}.alert-secondary .alert-link{color:#34383c}.alert-success{color:#0f5132;background-color:#d1e7dd;border-color:#badbcc}.alert-success .alert-link{color:#0c4128}.alert-info{color:#055160;background-color:#cff4fc;border-color:#b6effb}.alert-info .alert-link{color:#04414d}.alert-warning{color:#664d03;background-color:#fff3cd;border-color:#ffecb5}.alert-warning .alert-link{color:#523e02}.alert-danger{color:#842029;background-color:#f8d7da;border-color:#f5c2c7}.alert-danger .alert-link{color:#6a1a21}.alert-light{color:#636464;background-color:#fefefe;border-color:#fdfdfe}.alert-light .alert-link{color:#4f5050}.alert-dark{color:#141619;background-color:#d3d3d4;border-color:#bcbebf}.alert-dark .alert-link{color:#101214}@-webkit-keyframes progress-bar-stripes{0%{background-position-x:1rem}}@keyframes progress-bar-stripes{0%{background-position-x:1rem}}.progress{display:flex;height:1rem;overflow:hidden;font-size:.75rem;background-color:#e9ecef;border-radius:.25rem}.progress-bar{display:flex;flex-direction:column;justify-content:center;overflow:hidden;color:#fff;text-align:center;white-space:nowrap;background-color:#0d6efd;transition:width .6s ease}@media (prefers-reduced-motion:reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:1s linear infinite progress-bar-stripes;animation:1s linear infinite progress-bar-stripes}@media (prefers-reduced-motion:reduce){.progress-bar-animated{-webkit-animation:none;animation:none}}.list-group{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:.25rem}.list-group-numbered{list-style-type:none;counter-reset:section}.list-group-numbered>li::before{content:counters(section, ".") ". ";counter-increment:section}.list-group-item-action{width:100%;color:#495057;text-align:inherit}.list-group-item-action:focus,.list-group-item-action:hover{z-index:1;color:#495057;text-decoration:none;background-color:#f8f9fa}.list-group-item-action:active{color:#212529;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:.5rem 1rem;color:#212529;text-decoration:none;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item.disabled,.list-group-item:disabled{color:#6c757d;pointer-events:none;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#0d6efd;border-color:#0d6efd}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{margin-top:-1px;border-top-width:1px}.list-group-horizontal{flex-direction:row}.list-group-horizontal>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal>.list-group-item.active{margin-top:0}.list-group-horizontal>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}@media (min-width:576px){.list-group-horizontal-sm{flex-direction:row}.list-group-horizontal-sm>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-sm>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-sm>.list-group-item.active{margin-top:0}.list-group-horizontal-sm>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-sm>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:768px){.list-group-horizontal-md{flex-direction:row}.list-group-horizontal-md>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-md>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-md>.list-group-item.active{margin-top:0}.list-group-horizontal-md>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-md>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:992px){.list-group-horizontal-lg{flex-direction:row}.list-group-horizontal-lg>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-lg>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-lg>.list-group-item.active{margin-top:0}.list-group-horizontal-lg>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-lg>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:1200px){.list-group-horizontal-xl{flex-direction:row}.list-group-horizontal-xl>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-xl>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-xl>.list-group-item.active{margin-top:0}.list-group-horizontal-xl>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xl>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:1400px){.list-group-horizontal-xxl{flex-direction:row}.list-group-horizontal-xxl>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-xxl>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-xxl>.list-group-item.active{margin-top:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}.list-group-flush{border-radius:0}.list-group-flush>.list-group-item{border-width:0 0 1px}.list-group-flush>.list-group-item:last-child{border-bottom-width:0}.list-group-item-primary{color:#084298;background-color:#cfe2ff}.list-group-item-primary.list-group-item-action:focus,.list-group-item-primary.list-group-item-action:hover{color:#084298;background-color:#bacbe6}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#084298;border-color:#084298}.list-group-item-secondary{color:#41464b;background-color:#e2e3e5}.list-group-item-secondary.list-group-item-action:focus,.list-group-item-secondary.list-group-item-action:hover{color:#41464b;background-color:#cbccce}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#41464b;border-color:#41464b}.list-group-item-success{color:#0f5132;background-color:#d1e7dd}.list-group-item-success.list-group-item-action:focus,.list-group-item-success.list-group-item-action:hover{color:#0f5132;background-color:#bcd0c7}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#0f5132;border-color:#0f5132}.list-group-item-info{color:#055160;background-color:#cff4fc}.list-group-item-info.list-group-item-action:focus,.list-group-item-info.list-group-item-action:hover{color:#055160;background-color:#badce3}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#055160;border-color:#055160}.list-group-item-warning{color:#664d03;background-color:#fff3cd}.list-group-item-warning.list-group-item-action:focus,.list-group-item-warning.list-group-item-action:hover{color:#664d03;background-color:#e6dbb9}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#664d03;border-color:#664d03}.list-group-item-danger{color:#842029;background-color:#f8d7da}.list-group-item-danger.list-group-item-action:focus,.list-group-item-danger.list-group-item-action:hover{color:#842029;background-color:#dfc2c4}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#842029;border-color:#842029}.list-group-item-light{color:#636464;background-color:#fefefe}.list-group-item-light.list-group-item-action:focus,.list-group-item-light.list-group-item-action:hover{color:#636464;background-color:#e5e5e5}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#636464;border-color:#636464}.list-group-item-dark{color:#141619;background-color:#d3d3d4}.list-group-item-dark.list-group-item-action:focus,.list-group-item-dark.list-group-item-action:hover{color:#141619;background-color:#bebebf}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#141619;border-color:#141619}.btn-close{box-sizing:content-box;width:1em;height:1em;padding:.25em .25em;color:#000;background:transparent url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23000'%3e%3cpath d='M.293.293a1 1 0 011.414 0L8 6.586 14.293.293a1 1 0 111.414 1.414L9.414 8l6.293 6.293a1 1 0 01-1.414 1.414L8 9.414l-6.293 6.293a1 1 0 01-1.414-1.414L6.586 8 .293 1.707a1 1 0 010-1.414z'/%3e%3c/svg%3e") center/1em auto no-repeat;border:0;border-radius:.25rem;opacity:.5}.btn-close:hover{color:#000;text-decoration:none;opacity:.75}.btn-close:focus{outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25);opacity:1}.btn-close.disabled,.btn-close:disabled{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none;opacity:.25}.btn-close-white{filter:invert(1) grayscale(100%) brightness(200%)}.toast{width:350px;max-width:100%;font-size:.875rem;pointer-events:auto;background-color:rgba(255,255,255,.85);background-clip:padding-box;border:1px solid rgba(0,0,0,.1);box-shadow:0 .5rem 1rem rgba(0,0,0,.15);border-radius:.25rem}.toast:not(.showing):not(.show){opacity:0}.toast.hide{display:none}.toast-container{width:-webkit-max-content;width:-moz-max-content;width:max-content;max-width:100%;pointer-events:none}.toast-container>:not(:last-child){margin-bottom:.75rem}.toast-header{display:flex;align-items:center;padding:.5rem .75rem;color:#6c757d;background-color:rgba(255,255,255,.85);background-clip:padding-box;border-bottom:1px solid rgba(0,0,0,.05);border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.toast-header .btn-close{margin-right:-.375rem;margin-left:.75rem}.toast-body{padding:.75rem;word-wrap:break-word}.modal{position:fixed;top:0;left:0;z-index:1060;display:none;width:100%;height:100%;overflow-x:hidden;overflow-y:auto;outline:0}.modal-dialog{position:relative;width:auto;margin:.5rem;pointer-events:none}.modal.fade .modal-dialog{transition:transform .3s ease-out;transform:translate(0,-50px)}@media (prefers-reduced-motion:reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal.modal-static .modal-dialog{transform:scale(1.02)}.modal-dialog-scrollable{height:calc(100% - 1rem)}.modal-dialog-scrollable .modal-content{max-height:100%;overflow:hidden}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{display:flex;align-items:center;min-height:calc(100% - 1rem)}.modal-content{position:relative;display:flex;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem;outline:0}.modal-backdrop{position:fixed;top:0;left:0;z-index:1040;width:100vw;height:100vh;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{display:flex;flex-shrink:0;align-items:center;justify-content:space-between;padding:1rem 1rem;border-bottom:1px solid #dee2e6;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.modal-header .btn-close{padding:.5rem .5rem;margin:-.5rem -.5rem -.5rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;flex:1 1 auto;padding:1rem}.modal-footer{display:flex;flex-wrap:wrap;flex-shrink:0;align-items:center;justify-content:flex-end;padding:.75rem;border-top:1px solid #dee2e6;border-bottom-right-radius:calc(.3rem - 1px);border-bottom-left-radius:calc(.3rem - 1px)}.modal-footer>*{margin:.25rem}@media (min-width:576px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-scrollable{height:calc(100% - 3.5rem)}.modal-dialog-centered{min-height:calc(100% - 3.5rem)}.modal-sm{max-width:300px}}@media (min-width:992px){.modal-lg,.modal-xl{max-width:800px}}@media (min-width:1200px){.modal-xl{max-width:1140px}}.modal-fullscreen{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen .modal-header{border-radius:0}.modal-fullscreen .modal-body{overflow-y:auto}.modal-fullscreen .modal-footer{border-radius:0}@media (max-width:575.98px){.modal-fullscreen-sm-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-sm-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-sm-down .modal-header{border-radius:0}.modal-fullscreen-sm-down .modal-body{overflow-y:auto}.modal-fullscreen-sm-down .modal-footer{border-radius:0}}@media (max-width:767.98px){.modal-fullscreen-md-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-md-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-md-down .modal-header{border-radius:0}.modal-fullscreen-md-down .modal-body{overflow-y:auto}.modal-fullscreen-md-down .modal-footer{border-radius:0}}@media (max-width:991.98px){.modal-fullscreen-lg-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-lg-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-lg-down .modal-header{border-radius:0}.modal-fullscreen-lg-down .modal-body{overflow-y:auto}.modal-fullscreen-lg-down .modal-footer{border-radius:0}}@media (max-width:1199.98px){.modal-fullscreen-xl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xl-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-xl-down .modal-header{border-radius:0}.modal-fullscreen-xl-down .modal-body{overflow-y:auto}.modal-fullscreen-xl-down .modal-footer{border-radius:0}}@media (max-width:1399.98px){.modal-fullscreen-xxl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xxl-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-xxl-down .modal-header{border-radius:0}.modal-fullscreen-xxl-down .modal-body{overflow-y:auto}.modal-fullscreen-xxl-down .modal-footer{border-radius:0}}.tooltip{position:absolute;z-index:1080;display:block;margin:0;font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:.9}.tooltip .tooltip-arrow{position:absolute;display:block;width:.8rem;height:.4rem}.tooltip .tooltip-arrow::before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-auto[data-popper-placement^=top],.bs-tooltip-top{padding:.4rem 0}.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow,.bs-tooltip-top .tooltip-arrow{bottom:0}.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow::before,.bs-tooltip-top .tooltip-arrow::before{top:-1px;border-width:.4rem .4rem 0;border-top-color:#000}.bs-tooltip-auto[data-popper-placement^=right],.bs-tooltip-end{padding:0 .4rem}.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow,.bs-tooltip-end .tooltip-arrow{left:0;width:.4rem;height:.8rem}.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow::before,.bs-tooltip-end .tooltip-arrow::before{right:-1px;border-width:.4rem .4rem .4rem 0;border-right-color:#000}.bs-tooltip-auto[data-popper-placement^=bottom],.bs-tooltip-bottom{padding:.4rem 0}.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow,.bs-tooltip-bottom .tooltip-arrow{top:0}.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow::before,.bs-tooltip-bottom .tooltip-arrow::before{bottom:-1px;border-width:0 .4rem .4rem;border-bottom-color:#000}.bs-tooltip-auto[data-popper-placement^=left],.bs-tooltip-start{padding:0 .4rem}.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow,.bs-tooltip-start .tooltip-arrow{right:0;width:.4rem;height:.8rem}.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow::before,.bs-tooltip-start .tooltip-arrow::before{left:-1px;border-width:.4rem 0 .4rem .4rem;border-left-color:#000}.tooltip-inner{max-width:200px;padding:.25rem .5rem;color:#fff;text-align:center;background-color:#000;border-radius:.25rem}.popover{position:absolute;top:0;left:0;z-index:1070;display:block;max-width:276px;font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem}.popover .popover-arrow{position:absolute;display:block;width:1rem;height:.5rem}.popover .popover-arrow::after,.popover .popover-arrow::before{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow,.bs-popover-top>.popover-arrow{bottom:calc(-.5rem - 1px)}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::before,.bs-popover-top>.popover-arrow::before{bottom:0;border-width:.5rem .5rem 0;border-top-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::after,.bs-popover-top>.popover-arrow::after{bottom:1px;border-width:.5rem .5rem 0;border-top-color:#fff}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow,.bs-popover-end>.popover-arrow{left:calc(-.5rem - 1px);width:.5rem;height:1rem}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::before,.bs-popover-end>.popover-arrow::before{left:0;border-width:.5rem .5rem .5rem 0;border-right-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::after,.bs-popover-end>.popover-arrow::after{left:1px;border-width:.5rem .5rem .5rem 0;border-right-color:#fff}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow,.bs-popover-bottom>.popover-arrow{top:calc(-.5rem - 1px)}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::before,.bs-popover-bottom>.popover-arrow::before{top:0;border-width:0 .5rem .5rem .5rem;border-bottom-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::after,.bs-popover-bottom>.popover-arrow::after{top:1px;border-width:0 .5rem .5rem .5rem;border-bottom-color:#fff}.bs-popover-auto[data-popper-placement^=bottom] .popover-header::before,.bs-popover-bottom .popover-header::before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-.5rem;content:"";border-bottom:1px solid #f0f0f0}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow,.bs-popover-start>.popover-arrow{right:calc(-.5rem - 1px);width:.5rem;height:1rem}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::before,.bs-popover-start>.popover-arrow::before{right:0;border-width:.5rem 0 .5rem .5rem;border-left-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::after,.bs-popover-start>.popover-arrow::after{right:1px;border-width:.5rem 0 .5rem .5rem;border-left-color:#fff}.popover-header{padding:.5rem 1rem;margin-bottom:0;font-size:1rem;background-color:#f0f0f0;border-bottom:1px solid #d8d8d8;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.popover-header:empty{display:none}.popover-body{padding:1rem 1rem;color:#212529}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner::after{display:block;clear:both;content:""}.carousel-item{position:relative;display:none;float:left;width:100%;margin-right:-100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;transition:transform .6s ease-in-out}@media (prefers-reduced-motion:reduce){.carousel-item{transition:none}}.carousel-item-next,.carousel-item-prev,.carousel-item.active{display:block}.active.carousel-item-end,.carousel-item-next:not(.carousel-item-start){transform:translateX(100%)}.active.carousel-item-start,.carousel-item-prev:not(.carousel-item-end){transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;transform:none}.carousel-fade .carousel-item-next.carousel-item-start,.carousel-fade .carousel-item-prev.carousel-item-end,.carousel-fade .carousel-item.active{z-index:1;opacity:1}.carousel-fade .active.carousel-item-end,.carousel-fade .active.carousel-item-start{z-index:0;opacity:0;transition:opacity 0s .6s}@media (prefers-reduced-motion:reduce){.carousel-fade .active.carousel-item-end,.carousel-fade .active.carousel-item-start{transition:none}}.carousel-control-next,.carousel-control-prev{position:absolute;top:0;bottom:0;z-index:1;display:flex;align-items:center;justify-content:center;width:15%;padding:0;color:#fff;text-align:center;background:0 0;border:0;opacity:.5;transition:opacity .15s ease}@media (prefers-reduced-motion:reduce){.carousel-control-next,.carousel-control-prev{transition:none}}.carousel-control-next:focus,.carousel-control-next:hover,.carousel-control-prev:focus,.carousel-control-prev:hover{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-next-icon,.carousel-control-prev-icon{display:inline-block;width:2rem;height:2rem;background-repeat:no-repeat;background-position:50%;background-size:100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e")}.carousel-control-next-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e")}.carousel-indicators{position:absolute;right:0;bottom:0;left:0;z-index:2;display:flex;justify-content:center;padding:0;margin-right:15%;margin-bottom:1rem;margin-left:15%;list-style:none}.carousel-indicators [data-bs-target]{box-sizing:content-box;flex:0 1 auto;width:30px;height:3px;padding:0;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:#fff;background-clip:padding-box;border:0;border-top:10px solid transparent;border-bottom:10px solid transparent;opacity:.5;transition:opacity .6s ease}@media (prefers-reduced-motion:reduce){.carousel-indicators [data-bs-target]{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{position:absolute;right:15%;bottom:1.25rem;left:15%;padding-top:1.25rem;padding-bottom:1.25rem;color:#fff;text-align:center}.carousel-dark .carousel-control-next-icon,.carousel-dark .carousel-control-prev-icon{filter:invert(1) grayscale(100)}.carousel-dark .carousel-indicators [data-bs-target]{background-color:#000}.carousel-dark .carousel-caption{color:#000}@-webkit-keyframes spinner-border{to{transform:rotate(360deg)}}@keyframes spinner-border{to{transform:rotate(360deg)}}.spinner-border{display:inline-block;width:2rem;height:2rem;vertical-align:-.125em;border:.25em solid currentColor;border-right-color:transparent;border-radius:50%;-webkit-animation:.75s linear infinite spinner-border;animation:.75s linear infinite spinner-border}.spinner-border-sm{width:1rem;height:1rem;border-width:.2em}@-webkit-keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}.spinner-grow{display:inline-block;width:2rem;height:2rem;vertical-align:-.125em;background-color:currentColor;border-radius:50%;opacity:0;-webkit-animation:.75s linear infinite spinner-grow;animation:.75s linear infinite spinner-grow}.spinner-grow-sm{width:1rem;height:1rem}@media (prefers-reduced-motion:reduce){.spinner-border,.spinner-grow{-webkit-animation-duration:1.5s;animation-duration:1.5s}}.offcanvas{position:fixed;bottom:0;z-index:1050;display:flex;flex-direction:column;max-width:100%;visibility:hidden;background-color:#fff;background-clip:padding-box;outline:0;transition:transform .3s ease-in-out}@media (prefers-reduced-motion:reduce){.offcanvas{transition:none}}.offcanvas-header{display:flex;align-items:center;justify-content:space-between;padding:1rem 1rem}.offcanvas-header .btn-close{padding:.5rem .5rem;margin:-.5rem -.5rem -.5rem auto}.offcanvas-title{margin-bottom:0;line-height:1.5}.offcanvas-body{flex-grow:1;padding:1rem 1rem;overflow-y:auto}.offcanvas-start{top:0;left:0;width:400px;border-right:1px solid rgba(0,0,0,.2);transform:translateX(-100%)}.offcanvas-end{top:0;right:0;width:400px;border-left:1px solid rgba(0,0,0,.2);transform:translateX(100%)}.offcanvas-top{top:0;right:0;left:0;height:30vh;max-height:100%;border-bottom:1px solid rgba(0,0,0,.2);transform:translateY(-100%)}.offcanvas-bottom{right:0;left:0;height:30vh;max-height:100%;border-top:1px solid rgba(0,0,0,.2);transform:translateY(100%)}.offcanvas.show{transform:none}.clearfix::after{display:block;clear:both;content:""}.link-primary{color:#0d6efd}.link-primary:focus,.link-primary:hover{color:#0a58ca}.link-secondary{color:#6c757d}.link-secondary:focus,.link-secondary:hover{color:#565e64}.link-success{color:#198754}.link-success:focus,.link-success:hover{color:#146c43}.link-info{color:#0dcaf0}.link-info:focus,.link-info:hover{color:#3dd5f3}.link-warning{color:#ffc107}.link-warning:focus,.link-warning:hover{color:#ffcd39}.link-danger{color:#dc3545}.link-danger:focus,.link-danger:hover{color:#b02a37}.link-light{color:#f8f9fa}.link-light:focus,.link-light:hover{color:#f9fafb}.link-dark{color:#212529}.link-dark:focus,.link-dark:hover{color:#1a1e21}.ratio{position:relative;width:100%}.ratio::before{display:block;padding-top:var(--bs-aspect-ratio);content:""}.ratio>*{position:absolute;top:0;left:0;width:100%;height:100%}.ratio-1x1{--bs-aspect-ratio:100%}.ratio-4x3{--bs-aspect-ratio:calc(3 / 4 * 100%)}.ratio-16x9{--bs-aspect-ratio:calc(9 / 16 * 100%)}.ratio-21x9{--bs-aspect-ratio:calc(9 / 21 * 100%)}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}.sticky-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}@media (min-width:576px){.sticky-sm-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}@media (min-width:768px){.sticky-md-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}@media (min-width:992px){.sticky-lg-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}@media (min-width:1200px){.sticky-xl-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}@media (min-width:1400px){.sticky-xxl-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}.visually-hidden,.visually-hidden-focusable:not(:focus):not(:focus-within){position:absolute!important;width:1px!important;height:1px!important;padding:0!important;margin:-1px!important;overflow:hidden!important;clip:rect(0,0,0,0)!important;white-space:nowrap!important;border:0!important}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.align-baseline{vertical-align:baseline!important}.align-top{vertical-align:top!important}.align-middle{vertical-align:middle!important}.align-bottom{vertical-align:bottom!important}.align-text-bottom{vertical-align:text-bottom!important}.align-text-top{vertical-align:text-top!important}.float-start{float:left!important}.float-end{float:right!important}.float-none{float:none!important}.overflow-auto{overflow:auto!important}.overflow-hidden{overflow:hidden!important}.overflow-visible{overflow:visible!important}.overflow-scroll{overflow:scroll!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-grid{display:grid!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:flex!important}.d-inline-flex{display:inline-flex!important}.d-none{display:none!important}.shadow{box-shadow:0 .5rem 1rem rgba(0,0,0,.15)!important}.shadow-sm{box-shadow:0 .125rem .25rem rgba(0,0,0,.075)!important}.shadow-lg{box-shadow:0 1rem 3rem rgba(0,0,0,.175)!important}.shadow-none{box-shadow:none!important}.position-static{position:static!important}.position-relative{position:relative!important}.position-absolute{position:absolute!important}.position-fixed{position:fixed!important}.position-sticky{position:-webkit-sticky!important;position:sticky!important}.top-0{top:0!important}.top-50{top:50%!important}.top-100{top:100%!important}.bottom-0{bottom:0!important}.bottom-50{bottom:50%!important}.bottom-100{bottom:100%!important}.start-0{left:0!important}.start-50{left:50%!important}.start-100{left:100%!important}.end-0{right:0!important}.end-50{right:50%!important}.end-100{right:100%!important}.translate-middle{transform:translate(-50%,-50%)!important}.translate-middle-x{transform:translateX(-50%)!important}.translate-middle-y{transform:translateY(-50%)!important}.border{border:1px solid #dee2e6!important}.border-0{border:0!important}.border-top{border-top:1px solid #dee2e6!important}.border-top-0{border-top:0!important}.border-end{border-right:1px solid #dee2e6!important}.border-end-0{border-right:0!important}.border-bottom{border-bottom:1px solid #dee2e6!important}.border-bottom-0{border-bottom:0!important}.border-start{border-left:1px solid #dee2e6!important}.border-start-0{border-left:0!important}.border-primary{border-color:#0d6efd!important}.border-secondary{border-color:#6c757d!important}.border-success{border-color:#198754!important}.border-info{border-color:#0dcaf0!important}.border-warning{border-color:#ffc107!important}.border-danger{border-color:#dc3545!important}.border-light{border-color:#f8f9fa!important}.border-dark{border-color:#212529!important}.border-white{border-color:#fff!important}.border-1{border-width:1px!important}.border-2{border-width:2px!important}.border-3{border-width:3px!important}.border-4{border-width:4px!important}.border-5{border-width:5px!important}.w-25{width:25%!important}.w-50{width:50%!important}.w-75{width:75%!important}.w-100{width:100%!important}.w-auto{width:auto!important}.mw-100{max-width:100%!important}.vw-100{width:100vw!important}.min-vw-100{min-width:100vw!important}.h-25{height:25%!important}.h-50{height:50%!important}.h-75{height:75%!important}.h-100{height:100%!important}.h-auto{height:auto!important}.mh-100{max-height:100%!important}.vh-100{height:100vh!important}.min-vh-100{min-height:100vh!important}.flex-fill{flex:1 1 auto!important}.flex-row{flex-direction:row!important}.flex-column{flex-direction:column!important}.flex-row-reverse{flex-direction:row-reverse!important}.flex-column-reverse{flex-direction:column-reverse!important}.flex-grow-0{flex-grow:0!important}.flex-grow-1{flex-grow:1!important}.flex-shrink-0{flex-shrink:0!important}.flex-shrink-1{flex-shrink:1!important}.flex-wrap{flex-wrap:wrap!important}.flex-nowrap{flex-wrap:nowrap!important}.flex-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-0{gap:0!important}.gap-1{gap:.25rem!important}.gap-2{gap:.5rem!important}.gap-3{gap:1rem!important}.gap-4{gap:1.5rem!important}.gap-5{gap:3rem!important}.justify-content-start{justify-content:flex-start!important}.justify-content-end{justify-content:flex-end!important}.justify-content-center{justify-content:center!important}.justify-content-between{justify-content:space-between!important}.justify-content-around{justify-content:space-around!important}.justify-content-evenly{justify-content:space-evenly!important}.align-items-start{align-items:flex-start!important}.align-items-end{align-items:flex-end!important}.align-items-center{align-items:center!important}.align-items-baseline{align-items:baseline!important}.align-items-stretch{align-items:stretch!important}.align-content-start{align-content:flex-start!important}.align-content-end{align-content:flex-end!important}.align-content-center{align-content:center!important}.align-content-between{align-content:space-between!important}.align-content-around{align-content:space-around!important}.align-content-stretch{align-content:stretch!important}.align-self-auto{align-self:auto!important}.align-self-start{align-self:flex-start!important}.align-self-end{align-self:flex-end!important}.align-self-center{align-self:center!important}.align-self-baseline{align-self:baseline!important}.align-self-stretch{align-self:stretch!important}.order-first{order:-1!important}.order-0{order:0!important}.order-1{order:1!important}.order-2{order:2!important}.order-3{order:3!important}.order-4{order:4!important}.order-5{order:5!important}.order-last{order:6!important}.m-0{margin:0!important}.m-1{margin:.25rem!important}.m-2{margin:.5rem!important}.m-3{margin:1rem!important}.m-4{margin:1.5rem!important}.m-5{margin:3rem!important}.m-auto{margin:auto!important}.mx-0{margin-right:0!important;margin-left:0!important}.mx-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-3{margin-right:1rem!important;margin-left:1rem!important}.mx-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-5{margin-right:3rem!important;margin-left:3rem!important}.mx-auto{margin-right:auto!important;margin-left:auto!important}.my-0{margin-top:0!important;margin-bottom:0!important}.my-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-0{margin-top:0!important}.mt-1{margin-top:.25rem!important}.mt-2{margin-top:.5rem!important}.mt-3{margin-top:1rem!important}.mt-4{margin-top:1.5rem!important}.mt-5{margin-top:3rem!important}.mt-auto{margin-top:auto!important}.me-0{margin-right:0!important}.me-1{margin-right:.25rem!important}.me-2{margin-right:.5rem!important}.me-3{margin-right:1rem!important}.me-4{margin-right:1.5rem!important}.me-5{margin-right:3rem!important}.me-auto{margin-right:auto!important}.mb-0{margin-bottom:0!important}.mb-1{margin-bottom:.25rem!important}.mb-2{margin-bottom:.5rem!important}.mb-3{margin-bottom:1rem!important}.mb-4{margin-bottom:1.5rem!important}.mb-5{margin-bottom:3rem!important}.mb-auto{margin-bottom:auto!important}.ms-0{margin-left:0!important}.ms-1{margin-left:.25rem!important}.ms-2{margin-left:.5rem!important}.ms-3{margin-left:1rem!important}.ms-4{margin-left:1.5rem!important}.ms-5{margin-left:3rem!important}.ms-auto{margin-left:auto!important}.p-0{padding:0!important}.p-1{padding:.25rem!important}.p-2{padding:.5rem!important}.p-3{padding:1rem!important}.p-4{padding:1.5rem!important}.p-5{padding:3rem!important}.px-0{padding-right:0!important;padding-left:0!important}.px-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-3{padding-right:1rem!important;padding-left:1rem!important}.px-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-5{padding-right:3rem!important;padding-left:3rem!important}.py-0{padding-top:0!important;padding-bottom:0!important}.py-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-0{padding-top:0!important}.pt-1{padding-top:.25rem!important}.pt-2{padding-top:.5rem!important}.pt-3{padding-top:1rem!important}.pt-4{padding-top:1.5rem!important}.pt-5{padding-top:3rem!important}.pe-0{padding-right:0!important}.pe-1{padding-right:.25rem!important}.pe-2{padding-right:.5rem!important}.pe-3{padding-right:1rem!important}.pe-4{padding-right:1.5rem!important}.pe-5{padding-right:3rem!important}.pb-0{padding-bottom:0!important}.pb-1{padding-bottom:.25rem!important}.pb-2{padding-bottom:.5rem!important}.pb-3{padding-bottom:1rem!important}.pb-4{padding-bottom:1.5rem!important}.pb-5{padding-bottom:3rem!important}.ps-0{padding-left:0!important}.ps-1{padding-left:.25rem!important}.ps-2{padding-left:.5rem!important}.ps-3{padding-left:1rem!important}.ps-4{padding-left:1.5rem!important}.ps-5{padding-left:3rem!important}.font-monospace{font-family:var(--bs-font-monospace)!important}.fs-1{font-size:calc(1.375rem + 1.5vw)!important}.fs-2{font-size:calc(1.325rem + .9vw)!important}.fs-3{font-size:calc(1.3rem + .6vw)!important}.fs-4{font-size:calc(1.275rem + .3vw)!important}.fs-5{font-size:1.25rem!important}.fs-6{font-size:1rem!important}.fst-italic{font-style:italic!important}.fst-normal{font-style:normal!important}.fw-light{font-weight:300!important}.fw-lighter{font-weight:lighter!important}.fw-normal{font-weight:400!important}.fw-bold{font-weight:700!important}.fw-bolder{font-weight:bolder!important}.lh-1{line-height:1!important}.lh-sm{line-height:1.25!important}.lh-base{line-height:1.5!important}.lh-lg{line-height:2!important}.text-start{text-align:left!important}.text-end{text-align:right!important}.text-center{text-align:center!important}.text-decoration-none{text-decoration:none!important}.text-decoration-underline{text-decoration:underline!important}.text-decoration-line-through{text-decoration:line-through!important}.text-lowercase{text-transform:lowercase!important}.text-uppercase{text-transform:uppercase!important}.text-capitalize{text-transform:capitalize!important}.text-wrap{white-space:normal!important}.text-nowrap{white-space:nowrap!important}.text-break{word-wrap:break-word!important;word-break:break-word!important}.text-primary{color:#0d6efd!important}.text-secondary{color:#6c757d!important}.text-success{color:#198754!important}.text-info{color:#0dcaf0!important}.text-warning{color:#ffc107!important}.text-danger{color:#dc3545!important}.text-light{color:#f8f9fa!important}.text-dark{color:#212529!important}.text-white{color:#fff!important}.text-body{color:#212529!important}.text-muted{color:#6c757d!important}.text-black-50{color:rgba(0,0,0,.5)!important}.text-white-50{color:rgba(255,255,255,.5)!important}.text-reset{color:inherit!important}.bg-primary{background-color:#0d6efd!important}.bg-secondary{background-color:#6c757d!important}.bg-success{background-color:#198754!important}.bg-info{background-color:#0dcaf0!important}.bg-warning{background-color:#ffc107!important}.bg-danger{background-color:#dc3545!important}.bg-light{background-color:#f8f9fa!important}.bg-dark{background-color:#212529!important}.bg-body{background-color:#fff!important}.bg-white{background-color:#fff!important}.bg-transparent{background-color:transparent!important}.bg-gradient{background-image:var(--bs-gradient)!important}.user-select-all{-webkit-user-select:all!important;-moz-user-select:all!important;user-select:all!important}.user-select-auto{-webkit-user-select:auto!important;-moz-user-select:auto!important;user-select:auto!important}.user-select-none{-webkit-user-select:none!important;-moz-user-select:none!important;user-select:none!important}.pe-none{pointer-events:none!important}.pe-auto{pointer-events:auto!important}.rounded{border-radius:.25rem!important}.rounded-0{border-radius:0!important}.rounded-1{border-radius:.2rem!important}.rounded-2{border-radius:.25rem!important}.rounded-3{border-radius:.3rem!important}.rounded-circle{border-radius:50%!important}.rounded-pill{border-radius:50rem!important}.rounded-top{border-top-left-radius:.25rem!important;border-top-right-radius:.25rem!important}.rounded-end{border-top-right-radius:.25rem!important;border-bottom-right-radius:.25rem!important}.rounded-bottom{border-bottom-right-radius:.25rem!important;border-bottom-left-radius:.25rem!important}.rounded-start{border-bottom-left-radius:.25rem!important;border-top-left-radius:.25rem!important}.visible{visibility:visible!important}.invisible{visibility:hidden!important}@media (min-width:576px){.float-sm-start{float:left!important}.float-sm-end{float:right!important}.float-sm-none{float:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-grid{display:grid!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:flex!important}.d-sm-inline-flex{display:inline-flex!important}.d-sm-none{display:none!important}.flex-sm-fill{flex:1 1 auto!important}.flex-sm-row{flex-direction:row!important}.flex-sm-column{flex-direction:column!important}.flex-sm-row-reverse{flex-direction:row-reverse!important}.flex-sm-column-reverse{flex-direction:column-reverse!important}.flex-sm-grow-0{flex-grow:0!important}.flex-sm-grow-1{flex-grow:1!important}.flex-sm-shrink-0{flex-shrink:0!important}.flex-sm-shrink-1{flex-shrink:1!important}.flex-sm-wrap{flex-wrap:wrap!important}.flex-sm-nowrap{flex-wrap:nowrap!important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-sm-0{gap:0!important}.gap-sm-1{gap:.25rem!important}.gap-sm-2{gap:.5rem!important}.gap-sm-3{gap:1rem!important}.gap-sm-4{gap:1.5rem!important}.gap-sm-5{gap:3rem!important}.justify-content-sm-start{justify-content:flex-start!important}.justify-content-sm-end{justify-content:flex-end!important}.justify-content-sm-center{justify-content:center!important}.justify-content-sm-between{justify-content:space-between!important}.justify-content-sm-around{justify-content:space-around!important}.justify-content-sm-evenly{justify-content:space-evenly!important}.align-items-sm-start{align-items:flex-start!important}.align-items-sm-end{align-items:flex-end!important}.align-items-sm-center{align-items:center!important}.align-items-sm-baseline{align-items:baseline!important}.align-items-sm-stretch{align-items:stretch!important}.align-content-sm-start{align-content:flex-start!important}.align-content-sm-end{align-content:flex-end!important}.align-content-sm-center{align-content:center!important}.align-content-sm-between{align-content:space-between!important}.align-content-sm-around{align-content:space-around!important}.align-content-sm-stretch{align-content:stretch!important}.align-self-sm-auto{align-self:auto!important}.align-self-sm-start{align-self:flex-start!important}.align-self-sm-end{align-self:flex-end!important}.align-self-sm-center{align-self:center!important}.align-self-sm-baseline{align-self:baseline!important}.align-self-sm-stretch{align-self:stretch!important}.order-sm-first{order:-1!important}.order-sm-0{order:0!important}.order-sm-1{order:1!important}.order-sm-2{order:2!important}.order-sm-3{order:3!important}.order-sm-4{order:4!important}.order-sm-5{order:5!important}.order-sm-last{order:6!important}.m-sm-0{margin:0!important}.m-sm-1{margin:.25rem!important}.m-sm-2{margin:.5rem!important}.m-sm-3{margin:1rem!important}.m-sm-4{margin:1.5rem!important}.m-sm-5{margin:3rem!important}.m-sm-auto{margin:auto!important}.mx-sm-0{margin-right:0!important;margin-left:0!important}.mx-sm-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-sm-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-sm-3{margin-right:1rem!important;margin-left:1rem!important}.mx-sm-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-sm-5{margin-right:3rem!important;margin-left:3rem!important}.mx-sm-auto{margin-right:auto!important;margin-left:auto!important}.my-sm-0{margin-top:0!important;margin-bottom:0!important}.my-sm-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-sm-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-sm-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-sm-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-sm-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-sm-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-sm-0{margin-top:0!important}.mt-sm-1{margin-top:.25rem!important}.mt-sm-2{margin-top:.5rem!important}.mt-sm-3{margin-top:1rem!important}.mt-sm-4{margin-top:1.5rem!important}.mt-sm-5{margin-top:3rem!important}.mt-sm-auto{margin-top:auto!important}.me-sm-0{margin-right:0!important}.me-sm-1{margin-right:.25rem!important}.me-sm-2{margin-right:.5rem!important}.me-sm-3{margin-right:1rem!important}.me-sm-4{margin-right:1.5rem!important}.me-sm-5{margin-right:3rem!important}.me-sm-auto{margin-right:auto!important}.mb-sm-0{margin-bottom:0!important}.mb-sm-1{margin-bottom:.25rem!important}.mb-sm-2{margin-bottom:.5rem!important}.mb-sm-3{margin-bottom:1rem!important}.mb-sm-4{margin-bottom:1.5rem!important}.mb-sm-5{margin-bottom:3rem!important}.mb-sm-auto{margin-bottom:auto!important}.ms-sm-0{margin-left:0!important}.ms-sm-1{margin-left:.25rem!important}.ms-sm-2{margin-left:.5rem!important}.ms-sm-3{margin-left:1rem!important}.ms-sm-4{margin-left:1.5rem!important}.ms-sm-5{margin-left:3rem!important}.ms-sm-auto{margin-left:auto!important}.p-sm-0{padding:0!important}.p-sm-1{padding:.25rem!important}.p-sm-2{padding:.5rem!important}.p-sm-3{padding:1rem!important}.p-sm-4{padding:1.5rem!important}.p-sm-5{padding:3rem!important}.px-sm-0{padding-right:0!important;padding-left:0!important}.px-sm-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-sm-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-sm-3{padding-right:1rem!important;padding-left:1rem!important}.px-sm-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-sm-5{padding-right:3rem!important;padding-left:3rem!important}.py-sm-0{padding-top:0!important;padding-bottom:0!important}.py-sm-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-sm-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-sm-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-sm-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-sm-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-sm-0{padding-top:0!important}.pt-sm-1{padding-top:.25rem!important}.pt-sm-2{padding-top:.5rem!important}.pt-sm-3{padding-top:1rem!important}.pt-sm-4{padding-top:1.5rem!important}.pt-sm-5{padding-top:3rem!important}.pe-sm-0{padding-right:0!important}.pe-sm-1{padding-right:.25rem!important}.pe-sm-2{padding-right:.5rem!important}.pe-sm-3{padding-right:1rem!important}.pe-sm-4{padding-right:1.5rem!important}.pe-sm-5{padding-right:3rem!important}.pb-sm-0{padding-bottom:0!important}.pb-sm-1{padding-bottom:.25rem!important}.pb-sm-2{padding-bottom:.5rem!important}.pb-sm-3{padding-bottom:1rem!important}.pb-sm-4{padding-bottom:1.5rem!important}.pb-sm-5{padding-bottom:3rem!important}.ps-sm-0{padding-left:0!important}.ps-sm-1{padding-left:.25rem!important}.ps-sm-2{padding-left:.5rem!important}.ps-sm-3{padding-left:1rem!important}.ps-sm-4{padding-left:1.5rem!important}.ps-sm-5{padding-left:3rem!important}.text-sm-start{text-align:left!important}.text-sm-end{text-align:right!important}.text-sm-center{text-align:center!important}}@media (min-width:768px){.float-md-start{float:left!important}.float-md-end{float:right!important}.float-md-none{float:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-grid{display:grid!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:flex!important}.d-md-inline-flex{display:inline-flex!important}.d-md-none{display:none!important}.flex-md-fill{flex:1 1 auto!important}.flex-md-row{flex-direction:row!important}.flex-md-column{flex-direction:column!important}.flex-md-row-reverse{flex-direction:row-reverse!important}.flex-md-column-reverse{flex-direction:column-reverse!important}.flex-md-grow-0{flex-grow:0!important}.flex-md-grow-1{flex-grow:1!important}.flex-md-shrink-0{flex-shrink:0!important}.flex-md-shrink-1{flex-shrink:1!important}.flex-md-wrap{flex-wrap:wrap!important}.flex-md-nowrap{flex-wrap:nowrap!important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-md-0{gap:0!important}.gap-md-1{gap:.25rem!important}.gap-md-2{gap:.5rem!important}.gap-md-3{gap:1rem!important}.gap-md-4{gap:1.5rem!important}.gap-md-5{gap:3rem!important}.justify-content-md-start{justify-content:flex-start!important}.justify-content-md-end{justify-content:flex-end!important}.justify-content-md-center{justify-content:center!important}.justify-content-md-between{justify-content:space-between!important}.justify-content-md-around{justify-content:space-around!important}.justify-content-md-evenly{justify-content:space-evenly!important}.align-items-md-start{align-items:flex-start!important}.align-items-md-end{align-items:flex-end!important}.align-items-md-center{align-items:center!important}.align-items-md-baseline{align-items:baseline!important}.align-items-md-stretch{align-items:stretch!important}.align-content-md-start{align-content:flex-start!important}.align-content-md-end{align-content:flex-end!important}.align-content-md-center{align-content:center!important}.align-content-md-between{align-content:space-between!important}.align-content-md-around{align-content:space-around!important}.align-content-md-stretch{align-content:stretch!important}.align-self-md-auto{align-self:auto!important}.align-self-md-start{align-self:flex-start!important}.align-self-md-end{align-self:flex-end!important}.align-self-md-center{align-self:center!important}.align-self-md-baseline{align-self:baseline!important}.align-self-md-stretch{align-self:stretch!important}.order-md-first{order:-1!important}.order-md-0{order:0!important}.order-md-1{order:1!important}.order-md-2{order:2!important}.order-md-3{order:3!important}.order-md-4{order:4!important}.order-md-5{order:5!important}.order-md-last{order:6!important}.m-md-0{margin:0!important}.m-md-1{margin:.25rem!important}.m-md-2{margin:.5rem!important}.m-md-3{margin:1rem!important}.m-md-4{margin:1.5rem!important}.m-md-5{margin:3rem!important}.m-md-auto{margin:auto!important}.mx-md-0{margin-right:0!important;margin-left:0!important}.mx-md-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-md-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-md-3{margin-right:1rem!important;margin-left:1rem!important}.mx-md-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-md-5{margin-right:3rem!important;margin-left:3rem!important}.mx-md-auto{margin-right:auto!important;margin-left:auto!important}.my-md-0{margin-top:0!important;margin-bottom:0!important}.my-md-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-md-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-md-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-md-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-md-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-md-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-md-0{margin-top:0!important}.mt-md-1{margin-top:.25rem!important}.mt-md-2{margin-top:.5rem!important}.mt-md-3{margin-top:1rem!important}.mt-md-4{margin-top:1.5rem!important}.mt-md-5{margin-top:3rem!important}.mt-md-auto{margin-top:auto!important}.me-md-0{margin-right:0!important}.me-md-1{margin-right:.25rem!important}.me-md-2{margin-right:.5rem!important}.me-md-3{margin-right:1rem!important}.me-md-4{margin-right:1.5rem!important}.me-md-5{margin-right:3rem!important}.me-md-auto{margin-right:auto!important}.mb-md-0{margin-bottom:0!important}.mb-md-1{margin-bottom:.25rem!important}.mb-md-2{margin-bottom:.5rem!important}.mb-md-3{margin-bottom:1rem!important}.mb-md-4{margin-bottom:1.5rem!important}.mb-md-5{margin-bottom:3rem!important}.mb-md-auto{margin-bottom:auto!important}.ms-md-0{margin-left:0!important}.ms-md-1{margin-left:.25rem!important}.ms-md-2{margin-left:.5rem!important}.ms-md-3{margin-left:1rem!important}.ms-md-4{margin-left:1.5rem!important}.ms-md-5{margin-left:3rem!important}.ms-md-auto{margin-left:auto!important}.p-md-0{padding:0!important}.p-md-1{padding:.25rem!important}.p-md-2{padding:.5rem!important}.p-md-3{padding:1rem!important}.p-md-4{padding:1.5rem!important}.p-md-5{padding:3rem!important}.px-md-0{padding-right:0!important;padding-left:0!important}.px-md-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-md-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-md-3{padding-right:1rem!important;padding-left:1rem!important}.px-md-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-md-5{padding-right:3rem!important;padding-left:3rem!important}.py-md-0{padding-top:0!important;padding-bottom:0!important}.py-md-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-md-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-md-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-md-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-md-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-md-0{padding-top:0!important}.pt-md-1{padding-top:.25rem!important}.pt-md-2{padding-top:.5rem!important}.pt-md-3{padding-top:1rem!important}.pt-md-4{padding-top:1.5rem!important}.pt-md-5{padding-top:3rem!important}.pe-md-0{padding-right:0!important}.pe-md-1{padding-right:.25rem!important}.pe-md-2{padding-right:.5rem!important}.pe-md-3{padding-right:1rem!important}.pe-md-4{padding-right:1.5rem!important}.pe-md-5{padding-right:3rem!important}.pb-md-0{padding-bottom:0!important}.pb-md-1{padding-bottom:.25rem!important}.pb-md-2{padding-bottom:.5rem!important}.pb-md-3{padding-bottom:1rem!important}.pb-md-4{padding-bottom:1.5rem!important}.pb-md-5{padding-bottom:3rem!important}.ps-md-0{padding-left:0!important}.ps-md-1{padding-left:.25rem!important}.ps-md-2{padding-left:.5rem!important}.ps-md-3{padding-left:1rem!important}.ps-md-4{padding-left:1.5rem!important}.ps-md-5{padding-left:3rem!important}.text-md-start{text-align:left!important}.text-md-end{text-align:right!important}.text-md-center{text-align:center!important}}@media (min-width:992px){.float-lg-start{float:left!important}.float-lg-end{float:right!important}.float-lg-none{float:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-grid{display:grid!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:flex!important}.d-lg-inline-flex{display:inline-flex!important}.d-lg-none{display:none!important}.flex-lg-fill{flex:1 1 auto!important}.flex-lg-row{flex-direction:row!important}.flex-lg-column{flex-direction:column!important}.flex-lg-row-reverse{flex-direction:row-reverse!important}.flex-lg-column-reverse{flex-direction:column-reverse!important}.flex-lg-grow-0{flex-grow:0!important}.flex-lg-grow-1{flex-grow:1!important}.flex-lg-shrink-0{flex-shrink:0!important}.flex-lg-shrink-1{flex-shrink:1!important}.flex-lg-wrap{flex-wrap:wrap!important}.flex-lg-nowrap{flex-wrap:nowrap!important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-lg-0{gap:0!important}.gap-lg-1{gap:.25rem!important}.gap-lg-2{gap:.5rem!important}.gap-lg-3{gap:1rem!important}.gap-lg-4{gap:1.5rem!important}.gap-lg-5{gap:3rem!important}.justify-content-lg-start{justify-content:flex-start!important}.justify-content-lg-end{justify-content:flex-end!important}.justify-content-lg-center{justify-content:center!important}.justify-content-lg-between{justify-content:space-between!important}.justify-content-lg-around{justify-content:space-around!important}.justify-content-lg-evenly{justify-content:space-evenly!important}.align-items-lg-start{align-items:flex-start!important}.align-items-lg-end{align-items:flex-end!important}.align-items-lg-center{align-items:center!important}.align-items-lg-baseline{align-items:baseline!important}.align-items-lg-stretch{align-items:stretch!important}.align-content-lg-start{align-content:flex-start!important}.align-content-lg-end{align-content:flex-end!important}.align-content-lg-center{align-content:center!important}.align-content-lg-between{align-content:space-between!important}.align-content-lg-around{align-content:space-around!important}.align-content-lg-stretch{align-content:stretch!important}.align-self-lg-auto{align-self:auto!important}.align-self-lg-start{align-self:flex-start!important}.align-self-lg-end{align-self:flex-end!important}.align-self-lg-center{align-self:center!important}.align-self-lg-baseline{align-self:baseline!important}.align-self-lg-stretch{align-self:stretch!important}.order-lg-first{order:-1!important}.order-lg-0{order:0!important}.order-lg-1{order:1!important}.order-lg-2{order:2!important}.order-lg-3{order:3!important}.order-lg-4{order:4!important}.order-lg-5{order:5!important}.order-lg-last{order:6!important}.m-lg-0{margin:0!important}.m-lg-1{margin:.25rem!important}.m-lg-2{margin:.5rem!important}.m-lg-3{margin:1rem!important}.m-lg-4{margin:1.5rem!important}.m-lg-5{margin:3rem!important}.m-lg-auto{margin:auto!important}.mx-lg-0{margin-right:0!important;margin-left:0!important}.mx-lg-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-lg-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-lg-3{margin-right:1rem!important;margin-left:1rem!important}.mx-lg-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-lg-5{margin-right:3rem!important;margin-left:3rem!important}.mx-lg-auto{margin-right:auto!important;margin-left:auto!important}.my-lg-0{margin-top:0!important;margin-bottom:0!important}.my-lg-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-lg-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-lg-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-lg-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-lg-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-lg-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-lg-0{margin-top:0!important}.mt-lg-1{margin-top:.25rem!important}.mt-lg-2{margin-top:.5rem!important}.mt-lg-3{margin-top:1rem!important}.mt-lg-4{margin-top:1.5rem!important}.mt-lg-5{margin-top:3rem!important}.mt-lg-auto{margin-top:auto!important}.me-lg-0{margin-right:0!important}.me-lg-1{margin-right:.25rem!important}.me-lg-2{margin-right:.5rem!important}.me-lg-3{margin-right:1rem!important}.me-lg-4{margin-right:1.5rem!important}.me-lg-5{margin-right:3rem!important}.me-lg-auto{margin-right:auto!important}.mb-lg-0{margin-bottom:0!important}.mb-lg-1{margin-bottom:.25rem!important}.mb-lg-2{margin-bottom:.5rem!important}.mb-lg-3{margin-bottom:1rem!important}.mb-lg-4{margin-bottom:1.5rem!important}.mb-lg-5{margin-bottom:3rem!important}.mb-lg-auto{margin-bottom:auto!important}.ms-lg-0{margin-left:0!important}.ms-lg-1{margin-left:.25rem!important}.ms-lg-2{margin-left:.5rem!important}.ms-lg-3{margin-left:1rem!important}.ms-lg-4{margin-left:1.5rem!important}.ms-lg-5{margin-left:3rem!important}.ms-lg-auto{margin-left:auto!important}.p-lg-0{padding:0!important}.p-lg-1{padding:.25rem!important}.p-lg-2{padding:.5rem!important}.p-lg-3{padding:1rem!important}.p-lg-4{padding:1.5rem!important}.p-lg-5{padding:3rem!important}.px-lg-0{padding-right:0!important;padding-left:0!important}.px-lg-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-lg-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-lg-3{padding-right:1rem!important;padding-left:1rem!important}.px-lg-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-lg-5{padding-right:3rem!important;padding-left:3rem!important}.py-lg-0{padding-top:0!important;padding-bottom:0!important}.py-lg-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-lg-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-lg-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-lg-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-lg-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-lg-0{padding-top:0!important}.pt-lg-1{padding-top:.25rem!important}.pt-lg-2{padding-top:.5rem!important}.pt-lg-3{padding-top:1rem!important}.pt-lg-4{padding-top:1.5rem!important}.pt-lg-5{padding-top:3rem!important}.pe-lg-0{padding-right:0!important}.pe-lg-1{padding-right:.25rem!important}.pe-lg-2{padding-right:.5rem!important}.pe-lg-3{padding-right:1rem!important}.pe-lg-4{padding-right:1.5rem!important}.pe-lg-5{padding-right:3rem!important}.pb-lg-0{padding-bottom:0!important}.pb-lg-1{padding-bottom:.25rem!important}.pb-lg-2{padding-bottom:.5rem!important}.pb-lg-3{padding-bottom:1rem!important}.pb-lg-4{padding-bottom:1.5rem!important}.pb-lg-5{padding-bottom:3rem!important}.ps-lg-0{padding-left:0!important}.ps-lg-1{padding-left:.25rem!important}.ps-lg-2{padding-left:.5rem!important}.ps-lg-3{padding-left:1rem!important}.ps-lg-4{padding-left:1.5rem!important}.ps-lg-5{padding-left:3rem!important}.text-lg-start{text-align:left!important}.text-lg-end{text-align:right!important}.text-lg-center{text-align:center!important}}@media (min-width:1200px){.float-xl-start{float:left!important}.float-xl-end{float:right!important}.float-xl-none{float:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-grid{display:grid!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:flex!important}.d-xl-inline-flex{display:inline-flex!important}.d-xl-none{display:none!important}.flex-xl-fill{flex:1 1 auto!important}.flex-xl-row{flex-direction:row!important}.flex-xl-column{flex-direction:column!important}.flex-xl-row-reverse{flex-direction:row-reverse!important}.flex-xl-column-reverse{flex-direction:column-reverse!important}.flex-xl-grow-0{flex-grow:0!important}.flex-xl-grow-1{flex-grow:1!important}.flex-xl-shrink-0{flex-shrink:0!important}.flex-xl-shrink-1{flex-shrink:1!important}.flex-xl-wrap{flex-wrap:wrap!important}.flex-xl-nowrap{flex-wrap:nowrap!important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-xl-0{gap:0!important}.gap-xl-1{gap:.25rem!important}.gap-xl-2{gap:.5rem!important}.gap-xl-3{gap:1rem!important}.gap-xl-4{gap:1.5rem!important}.gap-xl-5{gap:3rem!important}.justify-content-xl-start{justify-content:flex-start!important}.justify-content-xl-end{justify-content:flex-end!important}.justify-content-xl-center{justify-content:center!important}.justify-content-xl-between{justify-content:space-between!important}.justify-content-xl-around{justify-content:space-around!important}.justify-content-xl-evenly{justify-content:space-evenly!important}.align-items-xl-start{align-items:flex-start!important}.align-items-xl-end{align-items:flex-end!important}.align-items-xl-center{align-items:center!important}.align-items-xl-baseline{align-items:baseline!important}.align-items-xl-stretch{align-items:stretch!important}.align-content-xl-start{align-content:flex-start!important}.align-content-xl-end{align-content:flex-end!important}.align-content-xl-center{align-content:center!important}.align-content-xl-between{align-content:space-between!important}.align-content-xl-around{align-content:space-around!important}.align-content-xl-stretch{align-content:stretch!important}.align-self-xl-auto{align-self:auto!important}.align-self-xl-start{align-self:flex-start!important}.align-self-xl-end{align-self:flex-end!important}.align-self-xl-center{align-self:center!important}.align-self-xl-baseline{align-self:baseline!important}.align-self-xl-stretch{align-self:stretch!important}.order-xl-first{order:-1!important}.order-xl-0{order:0!important}.order-xl-1{order:1!important}.order-xl-2{order:2!important}.order-xl-3{order:3!important}.order-xl-4{order:4!important}.order-xl-5{order:5!important}.order-xl-last{order:6!important}.m-xl-0{margin:0!important}.m-xl-1{margin:.25rem!important}.m-xl-2{margin:.5rem!important}.m-xl-3{margin:1rem!important}.m-xl-4{margin:1.5rem!important}.m-xl-5{margin:3rem!important}.m-xl-auto{margin:auto!important}.mx-xl-0{margin-right:0!important;margin-left:0!important}.mx-xl-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-xl-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-xl-3{margin-right:1rem!important;margin-left:1rem!important}.mx-xl-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-xl-5{margin-right:3rem!important;margin-left:3rem!important}.mx-xl-auto{margin-right:auto!important;margin-left:auto!important}.my-xl-0{margin-top:0!important;margin-bottom:0!important}.my-xl-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-xl-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-xl-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-xl-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-xl-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-xl-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-xl-0{margin-top:0!important}.mt-xl-1{margin-top:.25rem!important}.mt-xl-2{margin-top:.5rem!important}.mt-xl-3{margin-top:1rem!important}.mt-xl-4{margin-top:1.5rem!important}.mt-xl-5{margin-top:3rem!important}.mt-xl-auto{margin-top:auto!important}.me-xl-0{margin-right:0!important}.me-xl-1{margin-right:.25rem!important}.me-xl-2{margin-right:.5rem!important}.me-xl-3{margin-right:1rem!important}.me-xl-4{margin-right:1.5rem!important}.me-xl-5{margin-right:3rem!important}.me-xl-auto{margin-right:auto!important}.mb-xl-0{margin-bottom:0!important}.mb-xl-1{margin-bottom:.25rem!important}.mb-xl-2{margin-bottom:.5rem!important}.mb-xl-3{margin-bottom:1rem!important}.mb-xl-4{margin-bottom:1.5rem!important}.mb-xl-5{margin-bottom:3rem!important}.mb-xl-auto{margin-bottom:auto!important}.ms-xl-0{margin-left:0!important}.ms-xl-1{margin-left:.25rem!important}.ms-xl-2{margin-left:.5rem!important}.ms-xl-3{margin-left:1rem!important}.ms-xl-4{margin-left:1.5rem!important}.ms-xl-5{margin-left:3rem!important}.ms-xl-auto{margin-left:auto!important}.p-xl-0{padding:0!important}.p-xl-1{padding:.25rem!important}.p-xl-2{padding:.5rem!important}.p-xl-3{padding:1rem!important}.p-xl-4{padding:1.5rem!important}.p-xl-5{padding:3rem!important}.px-xl-0{padding-right:0!important;padding-left:0!important}.px-xl-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-xl-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-xl-3{padding-right:1rem!important;padding-left:1rem!important}.px-xl-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-xl-5{padding-right:3rem!important;padding-left:3rem!important}.py-xl-0{padding-top:0!important;padding-bottom:0!important}.py-xl-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-xl-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-xl-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-xl-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-xl-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-xl-0{padding-top:0!important}.pt-xl-1{padding-top:.25rem!important}.pt-xl-2{padding-top:.5rem!important}.pt-xl-3{padding-top:1rem!important}.pt-xl-4{padding-top:1.5rem!important}.pt-xl-5{padding-top:3rem!important}.pe-xl-0{padding-right:0!important}.pe-xl-1{padding-right:.25rem!important}.pe-xl-2{padding-right:.5rem!important}.pe-xl-3{padding-right:1rem!important}.pe-xl-4{padding-right:1.5rem!important}.pe-xl-5{padding-right:3rem!important}.pb-xl-0{padding-bottom:0!important}.pb-xl-1{padding-bottom:.25rem!important}.pb-xl-2{padding-bottom:.5rem!important}.pb-xl-3{padding-bottom:1rem!important}.pb-xl-4{padding-bottom:1.5rem!important}.pb-xl-5{padding-bottom:3rem!important}.ps-xl-0{padding-left:0!important}.ps-xl-1{padding-left:.25rem!important}.ps-xl-2{padding-left:.5rem!important}.ps-xl-3{padding-left:1rem!important}.ps-xl-4{padding-left:1.5rem!important}.ps-xl-5{padding-left:3rem!important}.text-xl-start{text-align:left!important}.text-xl-end{text-align:right!important}.text-xl-center{text-align:center!important}}@media (min-width:1400px){.float-xxl-start{float:left!important}.float-xxl-end{float:right!important}.float-xxl-none{float:none!important}.d-xxl-inline{display:inline!important}.d-xxl-inline-block{display:inline-block!important}.d-xxl-block{display:block!important}.d-xxl-grid{display:grid!important}.d-xxl-table{display:table!important}.d-xxl-table-row{display:table-row!important}.d-xxl-table-cell{display:table-cell!important}.d-xxl-flex{display:flex!important}.d-xxl-inline-flex{display:inline-flex!important}.d-xxl-none{display:none!important}.flex-xxl-fill{flex:1 1 auto!important}.flex-xxl-row{flex-direction:row!important}.flex-xxl-column{flex-direction:column!important}.flex-xxl-row-reverse{flex-direction:row-reverse!important}.flex-xxl-column-reverse{flex-direction:column-reverse!important}.flex-xxl-grow-0{flex-grow:0!important}.flex-xxl-grow-1{flex-grow:1!important}.flex-xxl-shrink-0{flex-shrink:0!important}.flex-xxl-shrink-1{flex-shrink:1!important}.flex-xxl-wrap{flex-wrap:wrap!important}.flex-xxl-nowrap{flex-wrap:nowrap!important}.flex-xxl-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-xxl-0{gap:0!important}.gap-xxl-1{gap:.25rem!important}.gap-xxl-2{gap:.5rem!important}.gap-xxl-3{gap:1rem!important}.gap-xxl-4{gap:1.5rem!important}.gap-xxl-5{gap:3rem!important}.justify-content-xxl-start{justify-content:flex-start!important}.justify-content-xxl-end{justify-content:flex-end!important}.justify-content-xxl-center{justify-content:center!important}.justify-content-xxl-between{justify-content:space-between!important}.justify-content-xxl-around{justify-content:space-around!important}.justify-content-xxl-evenly{justify-content:space-evenly!important}.align-items-xxl-start{align-items:flex-start!important}.align-items-xxl-end{align-items:flex-end!important}.align-items-xxl-center{align-items:center!important}.align-items-xxl-baseline{align-items:baseline!important}.align-items-xxl-stretch{align-items:stretch!important}.align-content-xxl-start{align-content:flex-start!important}.align-content-xxl-end{align-content:flex-end!important}.align-content-xxl-center{align-content:center!important}.align-content-xxl-between{align-content:space-between!important}.align-content-xxl-around{align-content:space-around!important}.align-content-xxl-stretch{align-content:stretch!important}.align-self-xxl-auto{align-self:auto!important}.align-self-xxl-start{align-self:flex-start!important}.align-self-xxl-end{align-self:flex-end!important}.align-self-xxl-center{align-self:center!important}.align-self-xxl-baseline{align-self:baseline!important}.align-self-xxl-stretch{align-self:stretch!important}.order-xxl-first{order:-1!important}.order-xxl-0{order:0!important}.order-xxl-1{order:1!important}.order-xxl-2{order:2!important}.order-xxl-3{order:3!important}.order-xxl-4{order:4!important}.order-xxl-5{order:5!important}.order-xxl-last{order:6!important}.m-xxl-0{margin:0!important}.m-xxl-1{margin:.25rem!important}.m-xxl-2{margin:.5rem!important}.m-xxl-3{margin:1rem!important}.m-xxl-4{margin:1.5rem!important}.m-xxl-5{margin:3rem!important}.m-xxl-auto{margin:auto!important}.mx-xxl-0{margin-right:0!important;margin-left:0!important}.mx-xxl-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-xxl-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-xxl-3{margin-right:1rem!important;margin-left:1rem!important}.mx-xxl-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-xxl-5{margin-right:3rem!important;margin-left:3rem!important}.mx-xxl-auto{margin-right:auto!important;margin-left:auto!important}.my-xxl-0{margin-top:0!important;margin-bottom:0!important}.my-xxl-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-xxl-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-xxl-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-xxl-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-xxl-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-xxl-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-xxl-0{margin-top:0!important}.mt-xxl-1{margin-top:.25rem!important}.mt-xxl-2{margin-top:.5rem!important}.mt-xxl-3{margin-top:1rem!important}.mt-xxl-4{margin-top:1.5rem!important}.mt-xxl-5{margin-top:3rem!important}.mt-xxl-auto{margin-top:auto!important}.me-xxl-0{margin-right:0!important}.me-xxl-1{margin-right:.25rem!important}.me-xxl-2{margin-right:.5rem!important}.me-xxl-3{margin-right:1rem!important}.me-xxl-4{margin-right:1.5rem!important}.me-xxl-5{margin-right:3rem!important}.me-xxl-auto{margin-right:auto!important}.mb-xxl-0{margin-bottom:0!important}.mb-xxl-1{margin-bottom:.25rem!important}.mb-xxl-2{margin-bottom:.5rem!important}.mb-xxl-3{margin-bottom:1rem!important}.mb-xxl-4{margin-bottom:1.5rem!important}.mb-xxl-5{margin-bottom:3rem!important}.mb-xxl-auto{margin-bottom:auto!important}.ms-xxl-0{margin-left:0!important}.ms-xxl-1{margin-left:.25rem!important}.ms-xxl-2{margin-left:.5rem!important}.ms-xxl-3{margin-left:1rem!important}.ms-xxl-4{margin-left:1.5rem!important}.ms-xxl-5{margin-left:3rem!important}.ms-xxl-auto{margin-left:auto!important}.p-xxl-0{padding:0!important}.p-xxl-1{padding:.25rem!important}.p-xxl-2{padding:.5rem!important}.p-xxl-3{padding:1rem!important}.p-xxl-4{padding:1.5rem!important}.p-xxl-5{padding:3rem!important}.px-xxl-0{padding-right:0!important;padding-left:0!important}.px-xxl-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-xxl-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-xxl-3{padding-right:1rem!important;padding-left:1rem!important}.px-xxl-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-xxl-5{padding-right:3rem!important;padding-left:3rem!important}.py-xxl-0{padding-top:0!important;padding-bottom:0!important}.py-xxl-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-xxl-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-xxl-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-xxl-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-xxl-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-xxl-0{padding-top:0!important}.pt-xxl-1{padding-top:.25rem!important}.pt-xxl-2{padding-top:.5rem!important}.pt-xxl-3{padding-top:1rem!important}.pt-xxl-4{padding-top:1.5rem!important}.pt-xxl-5{padding-top:3rem!important}.pe-xxl-0{padding-right:0!important}.pe-xxl-1{padding-right:.25rem!important}.pe-xxl-2{padding-right:.5rem!important}.pe-xxl-3{padding-right:1rem!important}.pe-xxl-4{padding-right:1.5rem!important}.pe-xxl-5{padding-right:3rem!important}.pb-xxl-0{padding-bottom:0!important}.pb-xxl-1{padding-bottom:.25rem!important}.pb-xxl-2{padding-bottom:.5rem!important}.pb-xxl-3{padding-bottom:1rem!important}.pb-xxl-4{padding-bottom:1.5rem!important}.pb-xxl-5{padding-bottom:3rem!important}.ps-xxl-0{padding-left:0!important}.ps-xxl-1{padding-left:.25rem!important}.ps-xxl-2{padding-left:.5rem!important}.ps-xxl-3{padding-left:1rem!important}.ps-xxl-4{padding-left:1.5rem!important}.ps-xxl-5{padding-left:3rem!important}.text-xxl-start{text-align:left!important}.text-xxl-end{text-align:right!important}.text-xxl-center{text-align:center!important}}@media (min-width:1200px){.fs-1{font-size:2.5rem!important}.fs-2{font-size:2rem!important}.fs-3{font-size:1.75rem!important}.fs-4{font-size:1.5rem!important}}@media print{.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-grid{display:grid!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:flex!important}.d-print-inline-flex{display:inline-flex!important}.d-print-none{display:none!important}} \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/js/bootstrap.bundle.min.js b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/js/bootstrap.bundle.min.js deleted file mode 100644 index 52e2ddde..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/bootstrap/js/bootstrap.bundle.min.js +++ /dev/null @@ -1,15 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -/*! - * Bootstrap v5.0.1 (https://getbootstrap.com/) - * Copyright 2011-2021 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) - */ -!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):(t="undefined"!=typeof globalThis?globalThis:t||self).bootstrap=e()}(this,(function(){"use strict";const t={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter(t=>t.matches(e)),parents(t,e){const i=[];let n=t.parentNode;for(;n&&n.nodeType===Node.ELEMENT_NODE&&3!==n.nodeType;)n.matches(e)&&i.push(n),n=n.parentNode;return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]}},e=t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t},i=t=>{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i="#"+i.split("#")[1]),e=i&&"#"!==i?i.trim():null}return e},n=t=>{const e=i(t);return e&&document.querySelector(e)?e:null},s=t=>{const e=i(t);return e?document.querySelector(e):null},o=t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0},r=t=>{t.dispatchEvent(new Event("transitionend"))},a=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),l=e=>a(e)?e.jquery?e[0]:e:"string"==typeof e&&e.length>0?t.findOne(e):null,c=(t,e)=>{let i=!1;const n=e+5;t.addEventListener("transitionend",(function e(){i=!0,t.removeEventListener("transitionend",e)})),setTimeout(()=>{i||r(t)},n)},d=(t,e,i)=>{Object.keys(i).forEach(n=>{const s=i[n],o=e[n],r=o&&a(o)?"element":null==(l=o)?""+l:{}.toString.call(l).match(/\s([a-z]+)/i)[1].toLowerCase();var l;if(!new RegExp(s).test(r))throw new TypeError(`${t.toUpperCase()}: Option "${n}" provided type "${r}" but expected type "${s}".`)})},h=t=>{if(!t)return!1;if(t.style&&t.parentNode&&t.parentNode.style){const e=getComputedStyle(t),i=getComputedStyle(t.parentNode);return"none"!==e.display&&"none"!==i.display&&"hidden"!==e.visibility}return!1},u=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),f=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?f(t.parentNode):null},p=()=>{},m=t=>t.offsetHeight,g=()=>{const{jQuery:t}=window;return t&&!document.body.hasAttribute("data-bs-no-jquery")?t:null},_=()=>"rtl"===document.documentElement.dir,b=t=>{var e;e=()=>{const e=g();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?document.addEventListener("DOMContentLoaded",e):e()},v=t=>{"function"==typeof t&&t()},y=new Map;var w={set(t,e,i){y.has(t)||y.set(t,new Map);const n=y.get(t);n.has(e)||0===n.size?n.set(e,i):console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(n.keys())[0]}.`)},get:(t,e)=>y.has(t)&&y.get(t).get(e)||null,remove(t,e){if(!y.has(t))return;const i=y.get(t);i.delete(e),0===i.size&&y.delete(t)}};const E=/[^.]*(?=\..*)\.|.*/,T=/\..*/,A=/::\d+$/,L={};let O=1;const k={mouseenter:"mouseover",mouseleave:"mouseout"},C=/^(mouseenter|mouseleave)/i,x=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function D(t,e){return e&&`${e}::${O++}`||t.uidEvent||O++}function N(t){const e=D(t);return t.uidEvent=e,L[e]=L[e]||{},L[e]}function S(t,e,i=null){const n=Object.keys(t);for(let s=0,o=n.length;sfunction(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};n?n=t(n):i=t(i)}const[o,r,a]=I(e,i,n),l=N(t),c=l[a]||(l[a]={}),d=S(c,r,o?i:null);if(d)return void(d.oneOff=d.oneOff&&s);const h=D(r,e.replace(E,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(let a=o.length;a--;)if(o[a]===r)return s.delegateTarget=r,n.oneOff&&H.off(t,s.type,e,i),i.apply(r,[s]);return null}}(t,i,n):function(t,e){return function i(n){return n.delegateTarget=t,i.oneOff&&H.off(t,n.type,e),e.apply(t,[n])}}(t,i);u.delegationSelector=o?i:null,u.originalHandler=r,u.oneOff=s,u.uidEvent=h,c[h]=u,t.addEventListener(a,u,o)}function P(t,e,i,n,s){const o=S(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function M(t){return t=t.replace(T,""),k[t]||t}const H={on(t,e,i,n){j(t,e,i,n,!1)},one(t,e,i,n){j(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=I(e,i,n),a=r!==e,l=N(t),c=e.startsWith(".");if(void 0!==o){if(!l||!l[r])return;return void P(t,l,r,o,s?i:null)}c&&Object.keys(l).forEach(i=>{!function(t,e,i,n){const s=e[i]||{};Object.keys(s).forEach(o=>{if(o.includes(n)){const n=s[o];P(t,e,i,n.originalHandler,n.delegationSelector)}})}(t,l,i,e.slice(1))});const d=l[r]||{};Object.keys(d).forEach(i=>{const n=i.replace(A,"");if(!a||e.includes(n)){const e=d[i];P(t,l,r,e.originalHandler,e.delegationSelector)}})},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=g(),s=M(e),o=e!==s,r=x.has(s);let a,l=!0,c=!0,d=!1,h=null;return o&&n&&(a=n.Event(e,i),n(t).trigger(a),l=!a.isPropagationStopped(),c=!a.isImmediatePropagationStopped(),d=a.isDefaultPrevented()),r?(h=document.createEvent("HTMLEvents"),h.initEvent(s,l,!0)):h=new CustomEvent(e,{bubbles:l,cancelable:!0}),void 0!==i&&Object.keys(i).forEach(t=>{Object.defineProperty(h,t,{get:()=>i[t]})}),d&&h.preventDefault(),c&&t.dispatchEvent(h),h.defaultPrevented&&void 0!==a&&a.preventDefault(),h}};class R{constructor(t){(t=l(t))&&(this._element=t,w.set(this._element,this.constructor.DATA_KEY,this))}dispose(){w.remove(this._element,this.constructor.DATA_KEY),H.off(this._element,this.constructor.EVENT_KEY),Object.getOwnPropertyNames(this).forEach(t=>{this[t]=null})}_queueCallback(t,e,i=!0){if(!i)return void v(t);const n=o(e);H.one(e,"transitionend",()=>v(t)),c(e,n)}static getInstance(t){return w.get(t,this.DATA_KEY)}static get VERSION(){return"5.0.1"}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}static get DATA_KEY(){return"bs."+this.NAME}static get EVENT_KEY(){return"."+this.DATA_KEY}}class B extends R{static get NAME(){return"alert"}close(t){const e=t?this._getRootElement(t):this._element,i=this._triggerCloseEvent(e);null===i||i.defaultPrevented||this._removeElement(e)}_getRootElement(t){return s(t)||t.closest(".alert")}_triggerCloseEvent(t){return H.trigger(t,"close.bs.alert")}_removeElement(t){t.classList.remove("show");const e=t.classList.contains("fade");this._queueCallback(()=>this._destroyElement(t),t,e)}_destroyElement(t){t.parentNode&&t.parentNode.removeChild(t),H.trigger(t,"closed.bs.alert")}static jQueryInterface(t){return this.each((function(){let e=w.get(this,"bs.alert");e||(e=new B(this)),"close"===t&&e[t](this)}))}static handleDismiss(t){return function(e){e&&e.preventDefault(),t.close(this)}}}H.on(document,"click.bs.alert.data-api",'[data-bs-dismiss="alert"]',B.handleDismiss(new B)),b(B);class W extends R{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){let e=w.get(this,"bs.button");e||(e=new W(this)),"toggle"===t&&e[t]()}))}}function q(t){return"true"===t||"false"!==t&&(t===Number(t).toString()?Number(t):""===t||"null"===t?null:t)}function z(t){return t.replace(/[A-Z]/g,t=>"-"+t.toLowerCase())}H.on(document,"click.bs.button.data-api",'[data-bs-toggle="button"]',t=>{t.preventDefault();const e=t.target.closest('[data-bs-toggle="button"]');let i=w.get(e,"bs.button");i||(i=new W(e)),i.toggle()}),b(W);const U={setDataAttribute(t,e,i){t.setAttribute("data-bs-"+z(e),i)},removeDataAttribute(t,e){t.removeAttribute("data-bs-"+z(e))},getDataAttributes(t){if(!t)return{};const e={};return Object.keys(t.dataset).filter(t=>t.startsWith("bs")).forEach(i=>{let n=i.replace(/^bs/,"");n=n.charAt(0).toLowerCase()+n.slice(1,n.length),e[n]=q(t.dataset[i])}),e},getDataAttribute:(t,e)=>q(t.getAttribute("data-bs-"+z(e))),offset(t){const e=t.getBoundingClientRect();return{top:e.top+document.body.scrollTop,left:e.left+document.body.scrollLeft}},position:t=>({top:t.offsetTop,left:t.offsetLeft})},$={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0,touch:!0},F={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean",touch:"boolean"},V="next",K="prev",X="left",Y="right";class Q extends R{constructor(e,i){super(e),this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this.touchStartX=0,this.touchDeltaX=0,this._config=this._getConfig(i),this._indicatorsElement=t.findOne(".carousel-indicators",this._element),this._touchSupported="ontouchstart"in document.documentElement||navigator.maxTouchPoints>0,this._pointerEvent=Boolean(window.PointerEvent),this._addEventListeners()}static get Default(){return $}static get NAME(){return"carousel"}next(){this._isSliding||this._slide(V)}nextWhenVisible(){!document.hidden&&h(this._element)&&this.next()}prev(){this._isSliding||this._slide(K)}pause(e){e||(this._isPaused=!0),t.findOne(".carousel-item-next, .carousel-item-prev",this._element)&&(r(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null}cycle(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config&&this._config.interval&&!this._isPaused&&(this._updateInterval(),this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))}to(e){this._activeElement=t.findOne(".active.carousel-item",this._element);const i=this._getItemIndex(this._activeElement);if(e>this._items.length-1||e<0)return;if(this._isSliding)return void H.one(this._element,"slid.bs.carousel",()=>this.to(e));if(i===e)return this.pause(),void this.cycle();const n=e>i?V:K;this._slide(n,this._items[e])}_getConfig(t){return t={...$,...t},d("carousel",t,F),t}_handleSwipe(){const t=Math.abs(this.touchDeltaX);if(t<=40)return;const e=t/this.touchDeltaX;this.touchDeltaX=0,e&&this._slide(e>0?Y:X)}_addEventListeners(){this._config.keyboard&&H.on(this._element,"keydown.bs.carousel",t=>this._keydown(t)),"hover"===this._config.pause&&(H.on(this._element,"mouseenter.bs.carousel",t=>this.pause(t)),H.on(this._element,"mouseleave.bs.carousel",t=>this.cycle(t))),this._config.touch&&this._touchSupported&&this._addTouchEventListeners()}_addTouchEventListeners(){const e=t=>{!this._pointerEvent||"pen"!==t.pointerType&&"touch"!==t.pointerType?this._pointerEvent||(this.touchStartX=t.touches[0].clientX):this.touchStartX=t.clientX},i=t=>{this.touchDeltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this.touchStartX},n=t=>{!this._pointerEvent||"pen"!==t.pointerType&&"touch"!==t.pointerType||(this.touchDeltaX=t.clientX-this.touchStartX),this._handleSwipe(),"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout(t=>this.cycle(t),500+this._config.interval))};t.find(".carousel-item img",this._element).forEach(t=>{H.on(t,"dragstart.bs.carousel",t=>t.preventDefault())}),this._pointerEvent?(H.on(this._element,"pointerdown.bs.carousel",t=>e(t)),H.on(this._element,"pointerup.bs.carousel",t=>n(t)),this._element.classList.add("pointer-event")):(H.on(this._element,"touchstart.bs.carousel",t=>e(t)),H.on(this._element,"touchmove.bs.carousel",t=>i(t)),H.on(this._element,"touchend.bs.carousel",t=>n(t)))}_keydown(t){/input|textarea/i.test(t.target.tagName)||("ArrowLeft"===t.key?(t.preventDefault(),this._slide(Y)):"ArrowRight"===t.key&&(t.preventDefault(),this._slide(X)))}_getItemIndex(e){return this._items=e&&e.parentNode?t.find(".carousel-item",e.parentNode):[],this._items.indexOf(e)}_getItemByOrder(t,e){const i=t===V,n=t===K,s=this._getItemIndex(e),o=this._items.length-1;if((n&&0===s||i&&s===o)&&!this._config.wrap)return e;const r=(s+(n?-1:1))%this._items.length;return-1===r?this._items[this._items.length-1]:this._items[r]}_triggerSlideEvent(e,i){const n=this._getItemIndex(e),s=this._getItemIndex(t.findOne(".active.carousel-item",this._element));return H.trigger(this._element,"slide.bs.carousel",{relatedTarget:e,direction:i,from:s,to:n})}_setActiveIndicatorElement(e){if(this._indicatorsElement){const i=t.findOne(".active",this._indicatorsElement);i.classList.remove("active"),i.removeAttribute("aria-current");const n=t.find("[data-bs-target]",this._indicatorsElement);for(let t=0;t{H.trigger(this._element,"slid.bs.carousel",{relatedTarget:r,direction:u,from:o,to:a})};if(this._element.classList.contains("slide")){r.classList.add(h),m(r),s.classList.add(d),r.classList.add(d);const t=()=>{r.classList.remove(d,h),r.classList.add("active"),s.classList.remove("active",h,d),this._isSliding=!1,setTimeout(f,0)};this._queueCallback(t,s,!0)}else s.classList.remove("active"),r.classList.add("active"),this._isSliding=!1,f();l&&this.cycle()}_directionToOrder(t){return[Y,X].includes(t)?_()?t===X?K:V:t===X?V:K:t}_orderToDirection(t){return[V,K].includes(t)?_()?t===K?X:Y:t===K?Y:X:t}static carouselInterface(t,e){let i=w.get(t,"bs.carousel"),n={...$,...U.getDataAttributes(t)};"object"==typeof e&&(n={...n,...e});const s="string"==typeof e?e:n.slide;if(i||(i=new Q(t,n)),"number"==typeof e)i.to(e);else if("string"==typeof s){if(void 0===i[s])throw new TypeError(`No method named "${s}"`);i[s]()}else n.interval&&n.ride&&(i.pause(),i.cycle())}static jQueryInterface(t){return this.each((function(){Q.carouselInterface(this,t)}))}static dataApiClickHandler(t){const e=s(this);if(!e||!e.classList.contains("carousel"))return;const i={...U.getDataAttributes(e),...U.getDataAttributes(this)},n=this.getAttribute("data-bs-slide-to");n&&(i.interval=!1),Q.carouselInterface(e,i),n&&w.get(e,"bs.carousel").to(n),t.preventDefault()}}H.on(document,"click.bs.carousel.data-api","[data-bs-slide], [data-bs-slide-to]",Q.dataApiClickHandler),H.on(window,"load.bs.carousel.data-api",()=>{const e=t.find('[data-bs-ride="carousel"]');for(let t=0,i=e.length;tt===this._element);null!==o&&r.length&&(this._selector=o,this._triggerArray.push(i))}this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}static get Default(){return G}static get NAME(){return"collapse"}toggle(){this._element.classList.contains("show")?this.hide():this.show()}show(){if(this._isTransitioning||this._element.classList.contains("show"))return;let e,i;this._parent&&(e=t.find(".show, .collapsing",this._parent).filter(t=>"string"==typeof this._config.parent?t.getAttribute("data-bs-parent")===this._config.parent:t.classList.contains("collapse")),0===e.length&&(e=null));const n=t.findOne(this._selector);if(e){const t=e.find(t=>n!==t);if(i=t?w.get(t,"bs.collapse"):null,i&&i._isTransitioning)return}if(H.trigger(this._element,"show.bs.collapse").defaultPrevented)return;e&&e.forEach(t=>{n!==t&&J.collapseInterface(t,"hide"),i||w.set(t,"bs.collapse",null)});const s=this._getDimension();this._element.classList.remove("collapse"),this._element.classList.add("collapsing"),this._element.style[s]=0,this._triggerArray.length&&this._triggerArray.forEach(t=>{t.classList.remove("collapsed"),t.setAttribute("aria-expanded",!0)}),this.setTransitioning(!0);const o="scroll"+(s[0].toUpperCase()+s.slice(1));this._queueCallback(()=>{this._element.classList.remove("collapsing"),this._element.classList.add("collapse","show"),this._element.style[s]="",this.setTransitioning(!1),H.trigger(this._element,"shown.bs.collapse")},this._element,!0),this._element.style[s]=this._element[o]+"px"}hide(){if(this._isTransitioning||!this._element.classList.contains("show"))return;if(H.trigger(this._element,"hide.bs.collapse").defaultPrevented)return;const t=this._getDimension();this._element.style[t]=this._element.getBoundingClientRect()[t]+"px",m(this._element),this._element.classList.add("collapsing"),this._element.classList.remove("collapse","show");const e=this._triggerArray.length;if(e>0)for(let t=0;t{this.setTransitioning(!1),this._element.classList.remove("collapsing"),this._element.classList.add("collapse"),H.trigger(this._element,"hidden.bs.collapse")},this._element,!0)}setTransitioning(t){this._isTransitioning=t}_getConfig(t){return(t={...G,...t}).toggle=Boolean(t.toggle),d("collapse",t,Z),t}_getDimension(){return this._element.classList.contains("width")?"width":"height"}_getParent(){let{parent:e}=this._config;e=l(e);const i=`[data-bs-toggle="collapse"][data-bs-parent="${e}"]`;return t.find(i,e).forEach(t=>{const e=s(t);this._addAriaAndCollapsedClass(e,[t])}),e}_addAriaAndCollapsedClass(t,e){if(!t||!e.length)return;const i=t.classList.contains("show");e.forEach(t=>{i?t.classList.remove("collapsed"):t.classList.add("collapsed"),t.setAttribute("aria-expanded",i)})}static collapseInterface(t,e){let i=w.get(t,"bs.collapse");const n={...G,...U.getDataAttributes(t),..."object"==typeof e&&e?e:{}};if(!i&&n.toggle&&"string"==typeof e&&/show|hide/.test(e)&&(n.toggle=!1),i||(i=new J(t,n)),"string"==typeof e){if(void 0===i[e])throw new TypeError(`No method named "${e}"`);i[e]()}}static jQueryInterface(t){return this.each((function(){J.collapseInterface(this,t)}))}}H.on(document,"click.bs.collapse.data-api",'[data-bs-toggle="collapse"]',(function(e){("A"===e.target.tagName||e.delegateTarget&&"A"===e.delegateTarget.tagName)&&e.preventDefault();const i=U.getDataAttributes(this),s=n(this);t.find(s).forEach(t=>{const e=w.get(t,"bs.collapse");let n;e?(null===e._parent&&"string"==typeof i.parent&&(e._config.parent=i.parent,e._parent=e._getParent()),n="toggle"):n=i,J.collapseInterface(t,n)})})),b(J);var tt="top",et="bottom",it="right",nt="left",st=[tt,et,it,nt],ot=st.reduce((function(t,e){return t.concat([e+"-start",e+"-end"])}),[]),rt=[].concat(st,["auto"]).reduce((function(t,e){return t.concat([e,e+"-start",e+"-end"])}),[]),at=["beforeRead","read","afterRead","beforeMain","main","afterMain","beforeWrite","write","afterWrite"];function lt(t){return t?(t.nodeName||"").toLowerCase():null}function ct(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function dt(t){return t instanceof ct(t).Element||t instanceof Element}function ht(t){return t instanceof ct(t).HTMLElement||t instanceof HTMLElement}function ut(t){return"undefined"!=typeof ShadowRoot&&(t instanceof ct(t).ShadowRoot||t instanceof ShadowRoot)}var ft={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];ht(s)&<(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});ht(n)&<(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function pt(t){return t.split("-")[0]}function mt(t){var e=t.getBoundingClientRect();return{width:e.width,height:e.height,top:e.top,right:e.right,bottom:e.bottom,left:e.left,x:e.left,y:e.top}}function gt(t){var e=mt(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function _t(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&ut(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function bt(t){return ct(t).getComputedStyle(t)}function vt(t){return["table","td","th"].indexOf(lt(t))>=0}function yt(t){return((dt(t)?t.ownerDocument:t.document)||window.document).documentElement}function wt(t){return"html"===lt(t)?t:t.assignedSlot||t.parentNode||(ut(t)?t.host:null)||yt(t)}function Et(t){return ht(t)&&"fixed"!==bt(t).position?t.offsetParent:null}function Tt(t){for(var e=ct(t),i=Et(t);i&&vt(i)&&"static"===bt(i).position;)i=Et(i);return i&&("html"===lt(i)||"body"===lt(i)&&"static"===bt(i).position)?e:i||function(t){var e=-1!==navigator.userAgent.toLowerCase().indexOf("firefox");if(-1!==navigator.userAgent.indexOf("Trident")&&ht(t)&&"fixed"===bt(t).position)return null;for(var i=wt(t);ht(i)&&["html","body"].indexOf(lt(i))<0;){var n=bt(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function At(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}var Lt=Math.max,Ot=Math.min,kt=Math.round;function Ct(t,e,i){return Lt(t,Ot(e,i))}function xt(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function Dt(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}var Nt={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,i=t.state,n=t.name,s=t.options,o=i.elements.arrow,r=i.modifiersData.popperOffsets,a=pt(i.placement),l=At(a),c=[nt,it].indexOf(a)>=0?"height":"width";if(o&&r){var d=function(t,e){return xt("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:Dt(t,st))}(s.padding,i),h=gt(o),u="y"===l?tt:nt,f="y"===l?et:it,p=i.rects.reference[c]+i.rects.reference[l]-r[l]-i.rects.popper[c],m=r[l]-i.rects.reference[l],g=Tt(o),_=g?"y"===l?g.clientHeight||0:g.clientWidth||0:0,b=p/2-m/2,v=d[u],y=_-h[c]-d[f],w=_/2-h[c]/2+b,E=Ct(v,w,y),T=l;i.modifiersData[n]=((e={})[T]=E,e.centerOffset=E-w,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&_t(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]},St={top:"auto",right:"auto",bottom:"auto",left:"auto"};function It(t){var e,i=t.popper,n=t.popperRect,s=t.placement,o=t.offsets,r=t.position,a=t.gpuAcceleration,l=t.adaptive,c=t.roundOffsets,d=!0===c?function(t){var e=t.x,i=t.y,n=window.devicePixelRatio||1;return{x:kt(kt(e*n)/n)||0,y:kt(kt(i*n)/n)||0}}(o):"function"==typeof c?c(o):o,h=d.x,u=void 0===h?0:h,f=d.y,p=void 0===f?0:f,m=o.hasOwnProperty("x"),g=o.hasOwnProperty("y"),_=nt,b=tt,v=window;if(l){var y=Tt(i),w="clientHeight",E="clientWidth";y===ct(i)&&"static"!==bt(y=yt(i)).position&&(w="scrollHeight",E="scrollWidth"),y=y,s===tt&&(b=et,p-=y[w]-n.height,p*=a?1:-1),s===nt&&(_=it,u-=y[E]-n.width,u*=a?1:-1)}var T,A=Object.assign({position:r},l&&St);return a?Object.assign({},A,((T={})[b]=g?"0":"",T[_]=m?"0":"",T.transform=(v.devicePixelRatio||1)<2?"translate("+u+"px, "+p+"px)":"translate3d("+u+"px, "+p+"px, 0)",T)):Object.assign({},A,((e={})[b]=g?p+"px":"",e[_]=m?u+"px":"",e.transform="",e))}var jt={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:pt(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,It(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,It(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}},Pt={passive:!0},Mt={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=ct(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,Pt)})),a&&l.addEventListener("resize",i.update,Pt),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,Pt)})),a&&l.removeEventListener("resize",i.update,Pt)}},data:{}},Ht={left:"right",right:"left",bottom:"top",top:"bottom"};function Rt(t){return t.replace(/left|right|bottom|top/g,(function(t){return Ht[t]}))}var Bt={start:"end",end:"start"};function Wt(t){return t.replace(/start|end/g,(function(t){return Bt[t]}))}function qt(t){var e=ct(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function zt(t){return mt(yt(t)).left+qt(t).scrollLeft}function Ut(t){var e=bt(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function $t(t,e){var i;void 0===e&&(e=[]);var n=function t(e){return["html","body","#document"].indexOf(lt(e))>=0?e.ownerDocument.body:ht(e)&&Ut(e)?e:t(wt(e))}(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=ct(n),r=s?[o].concat(o.visualViewport||[],Ut(n)?n:[]):n,a=e.concat(r);return s?a:a.concat($t(wt(r)))}function Ft(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function Vt(t,e){return"viewport"===e?Ft(function(t){var e=ct(t),i=yt(t),n=e.visualViewport,s=i.clientWidth,o=i.clientHeight,r=0,a=0;return n&&(s=n.width,o=n.height,/^((?!chrome|android).)*safari/i.test(navigator.userAgent)||(r=n.offsetLeft,a=n.offsetTop)),{width:s,height:o,x:r+zt(t),y:a}}(t)):ht(e)?function(t){var e=mt(t);return e.top=e.top+t.clientTop,e.left=e.left+t.clientLeft,e.bottom=e.top+t.clientHeight,e.right=e.left+t.clientWidth,e.width=t.clientWidth,e.height=t.clientHeight,e.x=e.left,e.y=e.top,e}(e):Ft(function(t){var e,i=yt(t),n=qt(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=Lt(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=Lt(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+zt(t),l=-n.scrollTop;return"rtl"===bt(s||i).direction&&(a+=Lt(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(yt(t)))}function Kt(t){return t.split("-")[1]}function Xt(t){var e,i=t.reference,n=t.element,s=t.placement,o=s?pt(s):null,r=s?Kt(s):null,a=i.x+i.width/2-n.width/2,l=i.y+i.height/2-n.height/2;switch(o){case tt:e={x:a,y:i.y-n.height};break;case et:e={x:a,y:i.y+i.height};break;case it:e={x:i.x+i.width,y:l};break;case nt:e={x:i.x-n.width,y:l};break;default:e={x:i.x,y:i.y}}var c=o?At(o):null;if(null!=c){var d="y"===c?"height":"width";switch(r){case"start":e[c]=e[c]-(i[d]/2-n[d]/2);break;case"end":e[c]=e[c]+(i[d]/2-n[d]/2)}}return e}function Yt(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=void 0===n?t.placement:n,o=i.boundary,r=void 0===o?"clippingParents":o,a=i.rootBoundary,l=void 0===a?"viewport":a,c=i.elementContext,d=void 0===c?"popper":c,h=i.altBoundary,u=void 0!==h&&h,f=i.padding,p=void 0===f?0:f,m=xt("number"!=typeof p?p:Dt(p,st)),g="popper"===d?"reference":"popper",_=t.elements.reference,b=t.rects.popper,v=t.elements[u?g:d],y=function(t,e,i){var n="clippingParents"===e?function(t){var e=$t(wt(t)),i=["absolute","fixed"].indexOf(bt(t).position)>=0&&ht(t)?Tt(t):t;return dt(i)?e.filter((function(t){return dt(t)&&_t(t,i)&&"body"!==lt(t)})):[]}(t):[].concat(e),s=[].concat(n,[i]),o=s[0],r=s.reduce((function(e,i){var n=Vt(t,i);return e.top=Lt(n.top,e.top),e.right=Ot(n.right,e.right),e.bottom=Ot(n.bottom,e.bottom),e.left=Lt(n.left,e.left),e}),Vt(t,o));return r.width=r.right-r.left,r.height=r.bottom-r.top,r.x=r.left,r.y=r.top,r}(dt(v)?v:v.contextElement||yt(t.elements.popper),r,l),w=mt(_),E=Xt({reference:w,element:b,strategy:"absolute",placement:s}),T=Ft(Object.assign({},b,E)),A="popper"===d?T:w,L={top:y.top-A.top+m.top,bottom:A.bottom-y.bottom+m.bottom,left:y.left-A.left+m.left,right:A.right-y.right+m.right},O=t.modifiersData.offset;if("popper"===d&&O){var k=O[s];Object.keys(L).forEach((function(t){var e=[it,et].indexOf(t)>=0?1:-1,i=[tt,et].indexOf(t)>=0?"y":"x";L[t]+=k[i]*e}))}return L}function Qt(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,a=i.flipVariations,l=i.allowedAutoPlacements,c=void 0===l?rt:l,d=Kt(n),h=d?a?ot:ot.filter((function(t){return Kt(t)===d})):st,u=h.filter((function(t){return c.indexOf(t)>=0}));0===u.length&&(u=h);var f=u.reduce((function(e,i){return e[i]=Yt(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[pt(i)],e}),{});return Object.keys(f).sort((function(t,e){return f[t]-f[e]}))}var Gt={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name;if(!e.modifiersData[n]._skip){for(var s=i.mainAxis,o=void 0===s||s,r=i.altAxis,a=void 0===r||r,l=i.fallbackPlacements,c=i.padding,d=i.boundary,h=i.rootBoundary,u=i.altBoundary,f=i.flipVariations,p=void 0===f||f,m=i.allowedAutoPlacements,g=e.options.placement,_=pt(g),b=l||(_!==g&&p?function(t){if("auto"===pt(t))return[];var e=Rt(t);return[Wt(t),e,Wt(e)]}(g):[Rt(g)]),v=[g].concat(b).reduce((function(t,i){return t.concat("auto"===pt(i)?Qt(e,{placement:i,boundary:d,rootBoundary:h,padding:c,flipVariations:p,allowedAutoPlacements:m}):i)}),[]),y=e.rects.reference,w=e.rects.popper,E=new Map,T=!0,A=v[0],L=0;L=0,D=x?"width":"height",N=Yt(e,{placement:O,boundary:d,rootBoundary:h,altBoundary:u,padding:c}),S=x?C?it:nt:C?et:tt;y[D]>w[D]&&(S=Rt(S));var I=Rt(S),j=[];if(o&&j.push(N[k]<=0),a&&j.push(N[S]<=0,N[I]<=0),j.every((function(t){return t}))){A=O,T=!1;break}E.set(O,j)}if(T)for(var P=function(t){var e=v.find((function(e){var i=E.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return A=e,"break"},M=p?3:1;M>0&&"break"!==P(M);M--);e.placement!==A&&(e.modifiersData[n]._skip=!0,e.placement=A,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function Zt(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function Jt(t){return[tt,it,et,nt].some((function(e){return t[e]>=0}))}var te={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=Yt(e,{elementContext:"reference"}),a=Yt(e,{altBoundary:!0}),l=Zt(r,n),c=Zt(a,s,o),d=Jt(l),h=Jt(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:d,hasPopperEscaped:h},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":d,"data-popper-escaped":h})}},ee={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,i=t.options,n=t.name,s=i.offset,o=void 0===s?[0,0]:s,r=rt.reduce((function(t,i){return t[i]=function(t,e,i){var n=pt(t),s=[nt,tt].indexOf(n)>=0?-1:1,o="function"==typeof i?i(Object.assign({},e,{placement:t})):i,r=o[0],a=o[1];return r=r||0,a=(a||0)*s,[nt,it].indexOf(n)>=0?{x:a,y:r}:{x:r,y:a}}(i,e.rects,o),t}),{}),a=r[e.placement],l=a.x,c=a.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=l,e.modifiersData.popperOffsets.y+=c),e.modifiersData[n]=r}},ie={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=Xt({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},ne={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name,s=i.mainAxis,o=void 0===s||s,r=i.altAxis,a=void 0!==r&&r,l=i.boundary,c=i.rootBoundary,d=i.altBoundary,h=i.padding,u=i.tether,f=void 0===u||u,p=i.tetherOffset,m=void 0===p?0:p,g=Yt(e,{boundary:l,rootBoundary:c,padding:h,altBoundary:d}),_=pt(e.placement),b=Kt(e.placement),v=!b,y=At(_),w="x"===y?"y":"x",E=e.modifiersData.popperOffsets,T=e.rects.reference,A=e.rects.popper,L="function"==typeof m?m(Object.assign({},e.rects,{placement:e.placement})):m,O={x:0,y:0};if(E){if(o||a){var k="y"===y?tt:nt,C="y"===y?et:it,x="y"===y?"height":"width",D=E[y],N=E[y]+g[k],S=E[y]-g[C],I=f?-A[x]/2:0,j="start"===b?T[x]:A[x],P="start"===b?-A[x]:-T[x],M=e.elements.arrow,H=f&&M?gt(M):{width:0,height:0},R=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},B=R[k],W=R[C],q=Ct(0,T[x],H[x]),z=v?T[x]/2-I-q-B-L:j-q-B-L,U=v?-T[x]/2+I+q+W+L:P+q+W+L,$=e.elements.arrow&&Tt(e.elements.arrow),F=$?"y"===y?$.clientTop||0:$.clientLeft||0:0,V=e.modifiersData.offset?e.modifiersData.offset[e.placement][y]:0,K=E[y]+z-V-F,X=E[y]+U-V;if(o){var Y=Ct(f?Ot(N,K):N,D,f?Lt(S,X):S);E[y]=Y,O[y]=Y-D}if(a){var Q="x"===y?tt:nt,G="x"===y?et:it,Z=E[w],J=Z+g[Q],st=Z-g[G],ot=Ct(f?Ot(J,K):J,Z,f?Lt(st,X):st);E[w]=ot,O[w]=ot-Z}}e.modifiersData[n]=O}},requiresIfExists:["offset"]};function se(t,e,i){void 0===i&&(i=!1);var n,s,o=yt(e),r=mt(t),a=ht(e),l={scrollLeft:0,scrollTop:0},c={x:0,y:0};return(a||!a&&!i)&&(("body"!==lt(e)||Ut(o))&&(l=(n=e)!==ct(n)&&ht(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:qt(n)),ht(e)?((c=mt(e)).x+=e.clientLeft,c.y+=e.clientTop):o&&(c.x=zt(o))),{x:r.left+l.scrollLeft-c.x,y:r.top+l.scrollTop-c.y,width:r.width,height:r.height}}var oe={placement:"bottom",modifiers:[],strategy:"absolute"};function re(){for(var t=arguments.length,e=new Array(t),i=0;i"applyStyles"===t.name&&!1===t.enabled);this._popper=de(e,this._menu,i),n&&U.setDataAttribute(this._menu,"popper","static")}"ontouchstart"in document.documentElement&&!t.closest(".navbar-nav")&&[].concat(...document.body.children).forEach(t=>H.on(t,"mouseover",p)),this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.toggle("show"),this._element.classList.toggle("show"),H.trigger(this._element,"shown.bs.dropdown",e)}}hide(){if(u(this._element)||!this._menu.classList.contains("show"))return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_addEventListeners(){H.on(this._element,"click.bs.dropdown",t=>{t.preventDefault(),this.toggle()})}_completeHide(t){H.trigger(this._element,"hide.bs.dropdown",t).defaultPrevented||("ontouchstart"in document.documentElement&&[].concat(...document.body.children).forEach(t=>H.off(t,"mouseover",p)),this._popper&&this._popper.destroy(),this._menu.classList.remove("show"),this._element.classList.remove("show"),this._element.setAttribute("aria-expanded","false"),U.removeDataAttribute(this._menu,"popper"),H.trigger(this._element,"hidden.bs.dropdown",t))}_getConfig(t){if(t={...this.constructor.Default,...U.getDataAttributes(this._element),...t},d("dropdown",t,this.constructor.DefaultType),"object"==typeof t.reference&&!a(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError("dropdown".toUpperCase()+': Option "reference" provided type "object" without a required "getBoundingClientRect" method.');return t}_getMenuElement(){return t.next(this._element,".dropdown-menu")[0]}_getPlacement(){const t=this._element.parentNode;if(t.classList.contains("dropend"))return _e;if(t.classList.contains("dropstart"))return be;const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?pe:fe:e?ge:me}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map(t=>Number.parseInt(t,10)):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return"static"===this._config.display&&(t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,..."function"==typeof this._config.popperConfig?this._config.popperConfig(t):this._config.popperConfig}}_selectMenuItem(e){const i=t.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter(h);if(!i.length)return;let n=i.indexOf(e.target);"ArrowUp"===e.key&&n>0&&n--,"ArrowDown"===e.key&&nthis.matches('[data-bs-toggle="dropdown"]')?this:t.prev(this,'[data-bs-toggle="dropdown"]')[0];if("Escape"===e.key)return n().focus(),void we.clearMenus();i||"ArrowUp"!==e.key&&"ArrowDown"!==e.key?i&&"Space"!==e.key?we.getInstance(n())._selectMenuItem(e):we.clearMenus():n().click()}}H.on(document,"keydown.bs.dropdown.data-api",'[data-bs-toggle="dropdown"]',we.dataApiKeydownHandler),H.on(document,"keydown.bs.dropdown.data-api",".dropdown-menu",we.dataApiKeydownHandler),H.on(document,"click.bs.dropdown.data-api",we.clearMenus),H.on(document,"keyup.bs.dropdown.data-api",we.clearMenus),H.on(document,"click.bs.dropdown.data-api",'[data-bs-toggle="dropdown"]',(function(t){t.preventDefault(),we.dropdownInterface(this)})),b(we);const Ee=()=>{const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)},Te=(t=Ee())=>{Ae(),Le("body","paddingRight",e=>e+t),Le(".fixed-top, .fixed-bottom, .is-fixed, .sticky-top","paddingRight",e=>e+t),Le(".sticky-top","marginRight",e=>e-t)},Ae=()=>{const t=document.body.style.overflow;t&&U.setDataAttribute(document.body,"overflow",t),document.body.style.overflow="hidden"},Le=(e,i,n)=>{const s=Ee();t.find(e).forEach(t=>{if(t!==document.body&&window.innerWidth>t.clientWidth+s)return;const e=t.style[i],o=window.getComputedStyle(t)[i];U.setDataAttribute(t,i,e),t.style[i]=n(Number.parseFloat(o))+"px"})},Oe=()=>{ke("body","overflow"),ke("body","paddingRight"),ke(".fixed-top, .fixed-bottom, .is-fixed, .sticky-top","paddingRight"),ke(".sticky-top","marginRight")},ke=(e,i)=>{t.find(e).forEach(t=>{const e=U.getDataAttribute(t,i);void 0===e?t.style.removeProperty(i):(U.removeDataAttribute(t,i),t.style[i]=e)})},Ce={isVisible:!0,isAnimated:!1,rootElement:document.body,clickCallback:null},xe={isVisible:"boolean",isAnimated:"boolean",rootElement:"element",clickCallback:"(function|null)"};class De{constructor(t){this._config=this._getConfig(t),this._isAppended=!1,this._element=null}show(t){this._config.isVisible?(this._append(),this._config.isAnimated&&m(this._getElement()),this._getElement().classList.add("show"),this._emulateAnimation(()=>{v(t)})):v(t)}hide(t){this._config.isVisible?(this._getElement().classList.remove("show"),this._emulateAnimation(()=>{this.dispose(),v(t)})):v(t)}_getElement(){if(!this._element){const t=document.createElement("div");t.className="modal-backdrop",this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_getConfig(t){return(t={...Ce,..."object"==typeof t?t:{}}).rootElement=t.rootElement||document.body,d("backdrop",t,xe),t}_append(){this._isAppended||(this._config.rootElement.appendChild(this._getElement()),H.on(this._getElement(),"mousedown.bs.backdrop",()=>{v(this._config.clickCallback)}),this._isAppended=!0)}dispose(){this._isAppended&&(H.off(this._element,"mousedown.bs.backdrop"),this._getElement().parentNode.removeChild(this._element),this._isAppended=!1)}_emulateAnimation(t){if(!this._config.isAnimated)return void v(t);const e=o(this._getElement());H.one(this._getElement(),"transitionend",()=>v(t)),c(this._getElement(),e)}}const Ne={backdrop:!0,keyboard:!0,focus:!0},Se={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean"};class Ie extends R{constructor(e,i){super(e),this._config=this._getConfig(i),this._dialog=t.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._isShown=!1,this._ignoreBackdropClick=!1,this._isTransitioning=!1}static get Default(){return Ne}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){if(this._isShown||this._isTransitioning)return;this._isAnimated()&&(this._isTransitioning=!0);const e=H.trigger(this._element,"show.bs.modal",{relatedTarget:t});this._isShown||e.defaultPrevented||(this._isShown=!0,Te(),document.body.classList.add("modal-open"),this._adjustDialog(),this._setEscapeEvent(),this._setResizeEvent(),H.on(this._element,"click.dismiss.bs.modal",'[data-bs-dismiss="modal"]',t=>this.hide(t)),H.on(this._dialog,"mousedown.dismiss.bs.modal",()=>{H.one(this._element,"mouseup.dismiss.bs.modal",t=>{t.target===this._element&&(this._ignoreBackdropClick=!0)})}),this._showBackdrop(()=>this._showElement(t)))}hide(t){if(t&&t.preventDefault(),!this._isShown||this._isTransitioning)return;if(H.trigger(this._element,"hide.bs.modal").defaultPrevented)return;this._isShown=!1;const e=this._isAnimated();e&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),H.off(document,"focusin.bs.modal"),this._element.classList.remove("show"),H.off(this._element,"click.dismiss.bs.modal"),H.off(this._dialog,"mousedown.dismiss.bs.modal"),this._queueCallback(()=>this._hideModal(),this._element,e)}dispose(){[window,this._dialog].forEach(t=>H.off(t,".bs.modal")),this._backdrop.dispose(),super.dispose(),H.off(document,"focusin.bs.modal")}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new De({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_getConfig(t){return t={...Ne,...U.getDataAttributes(this._element),...t},d("modal",t,Se),t}_showElement(e){const i=this._isAnimated(),n=t.findOne(".modal-body",this._dialog);this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.appendChild(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0,n&&(n.scrollTop=0),i&&m(this._element),this._element.classList.add("show"),this._config.focus&&this._enforceFocus(),this._queueCallback(()=>{this._config.focus&&this._element.focus(),this._isTransitioning=!1,H.trigger(this._element,"shown.bs.modal",{relatedTarget:e})},this._dialog,i)}_enforceFocus(){H.off(document,"focusin.bs.modal"),H.on(document,"focusin.bs.modal",t=>{document===t.target||this._element===t.target||this._element.contains(t.target)||this._element.focus()})}_setEscapeEvent(){this._isShown?H.on(this._element,"keydown.dismiss.bs.modal",t=>{this._config.keyboard&&"Escape"===t.key?(t.preventDefault(),this.hide()):this._config.keyboard||"Escape"!==t.key||this._triggerBackdropTransition()}):H.off(this._element,"keydown.dismiss.bs.modal")}_setResizeEvent(){this._isShown?H.on(window,"resize.bs.modal",()=>this._adjustDialog()):H.off(window,"resize.bs.modal")}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide(()=>{document.body.classList.remove("modal-open"),this._resetAdjustments(),Oe(),H.trigger(this._element,"hidden.bs.modal")})}_showBackdrop(t){H.on(this._element,"click.dismiss.bs.modal",t=>{this._ignoreBackdropClick?this._ignoreBackdropClick=!1:t.target===t.currentTarget&&(!0===this._config.backdrop?this.hide():"static"===this._config.backdrop&&this._triggerBackdropTransition())}),this._backdrop.show(t)}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(H.trigger(this._element,"hidePrevented.bs.modal").defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight;t||(this._element.style.overflowY="hidden"),this._element.classList.add("modal-static");const e=o(this._dialog);H.off(this._element,"transitionend"),H.one(this._element,"transitionend",()=>{this._element.classList.remove("modal-static"),t||(H.one(this._element,"transitionend",()=>{this._element.style.overflowY=""}),c(this._element,e))}),c(this._element,e),this._element.focus()}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=Ee(),i=e>0;(!i&&t&&!_()||i&&!t&&_())&&(this._element.style.paddingLeft=e+"px"),(i&&!t&&!_()||!i&&t&&_())&&(this._element.style.paddingRight=e+"px")}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=Ie.getInstance(this)||new Ie(this,"object"==typeof t?t:{});if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}H.on(document,"click.bs.modal.data-api",'[data-bs-toggle="modal"]',(function(t){const e=s(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),H.one(e,"show.bs.modal",t=>{t.defaultPrevented||H.one(e,"hidden.bs.modal",()=>{h(this)&&this.focus()})}),(Ie.getInstance(e)||new Ie(e)).toggle(this)})),b(Ie);const je={backdrop:!0,keyboard:!0,scroll:!1},Pe={backdrop:"boolean",keyboard:"boolean",scroll:"boolean"};class Me extends R{constructor(t,e){super(t),this._config=this._getConfig(e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._addEventListeners()}static get NAME(){return"offcanvas"}static get Default(){return je}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||H.trigger(this._element,"show.bs.offcanvas",{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._element.style.visibility="visible",this._backdrop.show(),this._config.scroll||(Te(),this._enforceFocusOnElement(this._element)),this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add("show"),this._queueCallback(()=>{H.trigger(this._element,"shown.bs.offcanvas",{relatedTarget:t})},this._element,!0))}hide(){this._isShown&&(H.trigger(this._element,"hide.bs.offcanvas").defaultPrevented||(H.off(document,"focusin.bs.offcanvas"),this._element.blur(),this._isShown=!1,this._element.classList.remove("show"),this._backdrop.hide(),this._queueCallback(()=>{this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._element.style.visibility="hidden",this._config.scroll||Oe(),H.trigger(this._element,"hidden.bs.offcanvas")},this._element,!0)))}dispose(){this._backdrop.dispose(),super.dispose(),H.off(document,"focusin.bs.offcanvas")}_getConfig(t){return t={...je,...U.getDataAttributes(this._element),..."object"==typeof t?t:{}},d("offcanvas",t,Pe),t}_initializeBackDrop(){return new De({isVisible:this._config.backdrop,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:()=>this.hide()})}_enforceFocusOnElement(t){H.off(document,"focusin.bs.offcanvas"),H.on(document,"focusin.bs.offcanvas",e=>{document===e.target||t===e.target||t.contains(e.target)||t.focus()}),t.focus()}_addEventListeners(){H.on(this._element,"click.dismiss.bs.offcanvas",'[data-bs-dismiss="offcanvas"]',()=>this.hide()),H.on(this._element,"keydown.dismiss.bs.offcanvas",t=>{this._config.keyboard&&"Escape"===t.key&&this.hide()})}static jQueryInterface(t){return this.each((function(){const e=w.get(this,"bs.offcanvas")||new Me(this,"object"==typeof t?t:{});if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}H.on(document,"click.bs.offcanvas.data-api",'[data-bs-toggle="offcanvas"]',(function(e){const i=s(this);if(["A","AREA"].includes(this.tagName)&&e.preventDefault(),u(this))return;H.one(i,"hidden.bs.offcanvas",()=>{h(this)&&this.focus()});const n=t.findOne(".offcanvas.show");n&&n!==i&&Me.getInstance(n).hide(),(w.get(i,"bs.offcanvas")||new Me(i)).toggle(this)})),H.on(window,"load.bs.offcanvas.data-api",()=>{t.find(".offcanvas.show").forEach(t=>(w.get(t,"bs.offcanvas")||new Me(t)).show())}),b(Me);const He=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Re=/^(?:(?:https?|mailto|ftp|tel|file):|[^#&/:?]*(?:[#/?]|$))/i,Be=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[\d+/a-z]+=*$/i,We=(t,e)=>{const i=t.nodeName.toLowerCase();if(e.includes(i))return!He.has(i)||Boolean(Re.test(t.nodeValue)||Be.test(t.nodeValue));const n=e.filter(t=>t instanceof RegExp);for(let t=0,e=n.length;t{We(t,a)||i.removeAttribute(t.nodeName)})}return n.body.innerHTML}const ze=new RegExp("(^|\\s)bs-tooltip\\S+","g"),Ue=new Set(["sanitize","allowList","sanitizeFn"]),$e={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(array|string|function)",container:"(string|element|boolean)",fallbackPlacements:"array",boundary:"(string|element)",customClass:"(string|function)",sanitize:"boolean",sanitizeFn:"(null|function)",allowList:"object",popperConfig:"(null|object|function)"},Fe={AUTO:"auto",TOP:"top",RIGHT:_()?"left":"right",BOTTOM:"bottom",LEFT:_()?"right":"left"},Ve={animation:!0,template:'',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:[0,0],container:!1,fallbackPlacements:["top","right","bottom","left"],boundary:"clippingParents",customClass:"",sanitize:!0,sanitizeFn:null,allowList:{"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},popperConfig:null},Ke={HIDE:"hide.bs.tooltip",HIDDEN:"hidden.bs.tooltip",SHOW:"show.bs.tooltip",SHOWN:"shown.bs.tooltip",INSERTED:"inserted.bs.tooltip",CLICK:"click.bs.tooltip",FOCUSIN:"focusin.bs.tooltip",FOCUSOUT:"focusout.bs.tooltip",MOUSEENTER:"mouseenter.bs.tooltip",MOUSELEAVE:"mouseleave.bs.tooltip"};class Xe extends R{constructor(t,e){if(void 0===he)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t),this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this._config=this._getConfig(e),this.tip=null,this._setListeners()}static get Default(){return Ve}static get NAME(){return"tooltip"}static get Event(){return Ke}static get DefaultType(){return $e}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(t){if(this._isEnabled)if(t){const e=this._initializeOnDelegatedTarget(t);e._activeTrigger.click=!e._activeTrigger.click,e._isWithActiveTrigger()?e._enter(null,e):e._leave(null,e)}else{if(this.getTipElement().classList.contains("show"))return void this._leave(null,this);this._enter(null,this)}}dispose(){clearTimeout(this._timeout),H.off(this._element.closest(".modal"),"hide.bs.modal",this._hideModalHandler),this.tip&&this.tip.parentNode&&this.tip.parentNode.removeChild(this.tip),this._popper&&this._popper.destroy(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this.isWithContent()||!this._isEnabled)return;const t=H.trigger(this._element,this.constructor.Event.SHOW),i=f(this._element),n=null===i?this._element.ownerDocument.documentElement.contains(this._element):i.contains(this._element);if(t.defaultPrevented||!n)return;const s=this.getTipElement(),o=e(this.constructor.NAME);s.setAttribute("id",o),this._element.setAttribute("aria-describedby",o),this.setContent(),this._config.animation&&s.classList.add("fade");const r="function"==typeof this._config.placement?this._config.placement.call(this,s,this._element):this._config.placement,a=this._getAttachment(r);this._addAttachmentClass(a);const{container:l}=this._config;w.set(s,this.constructor.DATA_KEY,this),this._element.ownerDocument.documentElement.contains(this.tip)||(l.appendChild(s),H.trigger(this._element,this.constructor.Event.INSERTED)),this._popper?this._popper.update():this._popper=de(this._element,s,this._getPopperConfig(a)),s.classList.add("show");const c="function"==typeof this._config.customClass?this._config.customClass():this._config.customClass;c&&s.classList.add(...c.split(" ")),"ontouchstart"in document.documentElement&&[].concat(...document.body.children).forEach(t=>{H.on(t,"mouseover",p)});const d=this.tip.classList.contains("fade");this._queueCallback(()=>{const t=this._hoverState;this._hoverState=null,H.trigger(this._element,this.constructor.Event.SHOWN),"out"===t&&this._leave(null,this)},this.tip,d)}hide(){if(!this._popper)return;const t=this.getTipElement();if(H.trigger(this._element,this.constructor.Event.HIDE).defaultPrevented)return;t.classList.remove("show"),"ontouchstart"in document.documentElement&&[].concat(...document.body.children).forEach(t=>H.off(t,"mouseover",p)),this._activeTrigger.click=!1,this._activeTrigger.focus=!1,this._activeTrigger.hover=!1;const e=this.tip.classList.contains("fade");this._queueCallback(()=>{this._isWithActiveTrigger()||("show"!==this._hoverState&&t.parentNode&&t.parentNode.removeChild(t),this._cleanTipClass(),this._element.removeAttribute("aria-describedby"),H.trigger(this._element,this.constructor.Event.HIDDEN),this._popper&&(this._popper.destroy(),this._popper=null))},this.tip,e),this._hoverState=""}update(){null!==this._popper&&this._popper.update()}isWithContent(){return Boolean(this.getTitle())}getTipElement(){if(this.tip)return this.tip;const t=document.createElement("div");return t.innerHTML=this._config.template,this.tip=t.children[0],this.tip}setContent(){const e=this.getTipElement();this.setElementContent(t.findOne(".tooltip-inner",e),this.getTitle()),e.classList.remove("fade","show")}setElementContent(t,e){if(null!==t)return a(e)?(e=l(e),void(this._config.html?e.parentNode!==t&&(t.innerHTML="",t.appendChild(e)):t.textContent=e.textContent)):void(this._config.html?(this._config.sanitize&&(e=qe(e,this._config.allowList,this._config.sanitizeFn)),t.innerHTML=e):t.textContent=e)}getTitle(){let t=this._element.getAttribute("data-bs-original-title");return t||(t="function"==typeof this._config.title?this._config.title.call(this._element):this._config.title),t}updateAttachment(t){return"right"===t?"end":"left"===t?"start":t}_initializeOnDelegatedTarget(t,e){const i=this.constructor.DATA_KEY;return(e=e||w.get(t.delegateTarget,i))||(e=new this.constructor(t.delegateTarget,this._getDelegateConfig()),w.set(t.delegateTarget,i,e)),e}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map(t=>Number.parseInt(t,10)):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"onChange",enabled:!0,phase:"afterWrite",fn:t=>this._handlePopperPlacementChange(t)}],onFirstUpdate:t=>{t.options.placement!==t.placement&&this._handlePopperPlacementChange(t)}};return{...e,..."function"==typeof this._config.popperConfig?this._config.popperConfig(e):this._config.popperConfig}}_addAttachmentClass(t){this.getTipElement().classList.add("bs-tooltip-"+this.updateAttachment(t))}_getAttachment(t){return Fe[t.toUpperCase()]}_setListeners(){this._config.trigger.split(" ").forEach(t=>{if("click"===t)H.on(this._element,this.constructor.Event.CLICK,this._config.selector,t=>this.toggle(t));else if("manual"!==t){const e="hover"===t?this.constructor.Event.MOUSEENTER:this.constructor.Event.FOCUSIN,i="hover"===t?this.constructor.Event.MOUSELEAVE:this.constructor.Event.FOCUSOUT;H.on(this._element,e,this._config.selector,t=>this._enter(t)),H.on(this._element,i,this._config.selector,t=>this._leave(t))}}),this._hideModalHandler=()=>{this._element&&this.hide()},H.on(this._element.closest(".modal"),"hide.bs.modal",this._hideModalHandler),this._config.selector?this._config={...this._config,trigger:"manual",selector:""}:this._fixTitle()}_fixTitle(){const t=this._element.getAttribute("title"),e=typeof this._element.getAttribute("data-bs-original-title");(t||"string"!==e)&&(this._element.setAttribute("data-bs-original-title",t||""),!t||this._element.getAttribute("aria-label")||this._element.textContent||this._element.setAttribute("aria-label",t),this._element.setAttribute("title",""))}_enter(t,e){e=this._initializeOnDelegatedTarget(t,e),t&&(e._activeTrigger["focusin"===t.type?"focus":"hover"]=!0),e.getTipElement().classList.contains("show")||"show"===e._hoverState?e._hoverState="show":(clearTimeout(e._timeout),e._hoverState="show",e._config.delay&&e._config.delay.show?e._timeout=setTimeout(()=>{"show"===e._hoverState&&e.show()},e._config.delay.show):e.show())}_leave(t,e){e=this._initializeOnDelegatedTarget(t,e),t&&(e._activeTrigger["focusout"===t.type?"focus":"hover"]=e._element.contains(t.relatedTarget)),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState="out",e._config.delay&&e._config.delay.hide?e._timeout=setTimeout(()=>{"out"===e._hoverState&&e.hide()},e._config.delay.hide):e.hide())}_isWithActiveTrigger(){for(const t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1}_getConfig(t){const e=U.getDataAttributes(this._element);return Object.keys(e).forEach(t=>{Ue.has(t)&&delete e[t]}),(t={...this.constructor.Default,...e,..."object"==typeof t&&t?t:{}}).container=!1===t.container?document.body:l(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),d("tooltip",t,this.constructor.DefaultType),t.sanitize&&(t.template=qe(t.template,t.allowList,t.sanitizeFn)),t}_getDelegateConfig(){const t={};if(this._config)for(const e in this._config)this.constructor.Default[e]!==this._config[e]&&(t[e]=this._config[e]);return t}_cleanTipClass(){const t=this.getTipElement(),e=t.getAttribute("class").match(ze);null!==e&&e.length>0&&e.map(t=>t.trim()).forEach(e=>t.classList.remove(e))}_handlePopperPlacementChange(t){const{state:e}=t;e&&(this.tip=e.elements.popper,this._cleanTipClass(),this._addAttachmentClass(this._getAttachment(e.placement)))}static jQueryInterface(t){return this.each((function(){let e=w.get(this,"bs.tooltip");const i="object"==typeof t&&t;if((e||!/dispose|hide/.test(t))&&(e||(e=new Xe(this,i)),"string"==typeof t)){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}b(Xe);const Ye=new RegExp("(^|\\s)bs-popover\\S+","g"),Qe={...Xe.Default,placement:"right",offset:[0,8],trigger:"click",content:"",template:''},Ge={...Xe.DefaultType,content:"(string|element|function)"},Ze={HIDE:"hide.bs.popover",HIDDEN:"hidden.bs.popover",SHOW:"show.bs.popover",SHOWN:"shown.bs.popover",INSERTED:"inserted.bs.popover",CLICK:"click.bs.popover",FOCUSIN:"focusin.bs.popover",FOCUSOUT:"focusout.bs.popover",MOUSEENTER:"mouseenter.bs.popover",MOUSELEAVE:"mouseleave.bs.popover"};class Je extends Xe{static get Default(){return Qe}static get NAME(){return"popover"}static get Event(){return Ze}static get DefaultType(){return Ge}isWithContent(){return this.getTitle()||this._getContent()}setContent(){const e=this.getTipElement();this.setElementContent(t.findOne(".popover-header",e),this.getTitle());let i=this._getContent();"function"==typeof i&&(i=i.call(this._element)),this.setElementContent(t.findOne(".popover-body",e),i),e.classList.remove("fade","show")}_addAttachmentClass(t){this.getTipElement().classList.add("bs-popover-"+this.updateAttachment(t))}_getContent(){return this._element.getAttribute("data-bs-content")||this._config.content}_cleanTipClass(){const t=this.getTipElement(),e=t.getAttribute("class").match(Ye);null!==e&&e.length>0&&e.map(t=>t.trim()).forEach(e=>t.classList.remove(e))}static jQueryInterface(t){return this.each((function(){let e=w.get(this,"bs.popover");const i="object"==typeof t?t:null;if((e||!/dispose|hide/.test(t))&&(e||(e=new Je(this,i),w.set(this,"bs.popover",e)),"string"==typeof t)){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}b(Je);const ti={offset:10,method:"auto",target:""},ei={offset:"number",method:"string",target:"(string|element)"};class ii extends R{constructor(t,e){super(t),this._scrollElement="BODY"===this._element.tagName?window:this._element,this._config=this._getConfig(e),this._selector=`${this._config.target} .nav-link, ${this._config.target} .list-group-item, ${this._config.target} .dropdown-item`,this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,H.on(this._scrollElement,"scroll.bs.scrollspy",()=>this._process()),this.refresh(),this._process()}static get Default(){return ti}static get NAME(){return"scrollspy"}refresh(){const e=this._scrollElement===this._scrollElement.window?"offset":"position",i="auto"===this._config.method?e:this._config.method,s="position"===i?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),t.find(this._selector).map(e=>{const o=n(e),r=o?t.findOne(o):null;if(r){const t=r.getBoundingClientRect();if(t.width||t.height)return[U[i](r).top+s,o]}return null}).filter(t=>t).sort((t,e)=>t[0]-e[0]).forEach(t=>{this._offsets.push(t[0]),this._targets.push(t[1])})}dispose(){H.off(this._scrollElement,".bs.scrollspy"),super.dispose()}_getConfig(t){if("string"!=typeof(t={...ti,...U.getDataAttributes(this._element),..."object"==typeof t&&t?t:{}}).target&&a(t.target)){let{id:i}=t.target;i||(i=e("scrollspy"),t.target.id=i),t.target="#"+i}return d("scrollspy",t,ei),t}_getScrollTop(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop}_getScrollHeight(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)}_getOffsetHeight(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height}_process(){const t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),i=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),t>=i){const t=this._targets[this._targets.length-1];this._activeTarget!==t&&this._activate(t)}else{if(this._activeTarget&&t0)return this._activeTarget=null,void this._clear();for(let e=this._offsets.length;e--;)this._activeTarget!==this._targets[e]&&t>=this._offsets[e]&&(void 0===this._offsets[e+1]||t`${t}[data-bs-target="${e}"],${t}[href="${e}"]`),n=t.findOne(i.join(","));n.classList.contains("dropdown-item")?(t.findOne(".dropdown-toggle",n.closest(".dropdown")).classList.add("active"),n.classList.add("active")):(n.classList.add("active"),t.parents(n,".nav, .list-group").forEach(e=>{t.prev(e,".nav-link, .list-group-item").forEach(t=>t.classList.add("active")),t.prev(e,".nav-item").forEach(e=>{t.children(e,".nav-link").forEach(t=>t.classList.add("active"))})})),H.trigger(this._scrollElement,"activate.bs.scrollspy",{relatedTarget:e})}_clear(){t.find(this._selector).filter(t=>t.classList.contains("active")).forEach(t=>t.classList.remove("active"))}static jQueryInterface(t){return this.each((function(){const e=ii.getInstance(this)||new ii(this,"object"==typeof t?t:{});if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}H.on(window,"load.bs.scrollspy.data-api",()=>{t.find('[data-bs-spy="scroll"]').forEach(t=>new ii(t))}),b(ii);class ni extends R{static get NAME(){return"tab"}show(){if(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&this._element.classList.contains("active"))return;let e;const i=s(this._element),n=this._element.closest(".nav, .list-group");if(n){const i="UL"===n.nodeName||"OL"===n.nodeName?":scope > li > .active":".active";e=t.find(i,n),e=e[e.length-1]}const o=e?H.trigger(e,"hide.bs.tab",{relatedTarget:this._element}):null;if(H.trigger(this._element,"show.bs.tab",{relatedTarget:e}).defaultPrevented||null!==o&&o.defaultPrevented)return;this._activate(this._element,n);const r=()=>{H.trigger(e,"hidden.bs.tab",{relatedTarget:this._element}),H.trigger(this._element,"shown.bs.tab",{relatedTarget:e})};i?this._activate(i,i.parentNode,r):r()}_activate(e,i,n){const s=(!i||"UL"!==i.nodeName&&"OL"!==i.nodeName?t.children(i,".active"):t.find(":scope > li > .active",i))[0],o=n&&s&&s.classList.contains("fade"),r=()=>this._transitionComplete(e,s,n);s&&o?(s.classList.remove("show"),this._queueCallback(r,e,!0)):r()}_transitionComplete(e,i,n){if(i){i.classList.remove("active");const e=t.findOne(":scope > .dropdown-menu .active",i.parentNode);e&&e.classList.remove("active"),"tab"===i.getAttribute("role")&&i.setAttribute("aria-selected",!1)}e.classList.add("active"),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!0),m(e),e.classList.contains("fade")&&e.classList.add("show");let s=e.parentNode;if(s&&"LI"===s.nodeName&&(s=s.parentNode),s&&s.classList.contains("dropdown-menu")){const i=e.closest(".dropdown");i&&t.find(".dropdown-toggle",i).forEach(t=>t.classList.add("active")),e.setAttribute("aria-expanded",!0)}n&&n()}static jQueryInterface(t){return this.each((function(){const e=w.get(this,"bs.tab")||new ni(this);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}H.on(document,"click.bs.tab.data-api",'[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),u(this)||(w.get(this,"bs.tab")||new ni(this)).show()})),b(ni);const si={animation:"boolean",autohide:"boolean",delay:"number"},oi={animation:!0,autohide:!0,delay:5e3};class ri extends R{constructor(t,e){super(t),this._config=this._getConfig(e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get DefaultType(){return si}static get Default(){return oi}static get NAME(){return"toast"}show(){H.trigger(this._element,"show.bs.toast").defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove("hide"),m(this._element),this._element.classList.add("showing"),this._queueCallback(()=>{this._element.classList.remove("showing"),this._element.classList.add("show"),H.trigger(this._element,"shown.bs.toast"),this._maybeScheduleHide()},this._element,this._config.animation))}hide(){this._element.classList.contains("show")&&(H.trigger(this._element,"hide.bs.toast").defaultPrevented||(this._element.classList.remove("show"),this._queueCallback(()=>{this._element.classList.add("hide"),H.trigger(this._element,"hidden.bs.toast")},this._element,this._config.animation)))}dispose(){this._clearTimeout(),this._element.classList.contains("show")&&this._element.classList.remove("show"),super.dispose()}_getConfig(t){return t={...oi,...U.getDataAttributes(this._element),..."object"==typeof t&&t?t:{}},d("toast",t,this.constructor.DefaultType),t}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout(()=>{this.hide()},this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){H.on(this._element,"click.dismiss.bs.toast",'[data-bs-dismiss="toast"]',()=>this.hide()),H.on(this._element,"mouseover.bs.toast",t=>this._onInteraction(t,!0)),H.on(this._element,"mouseout.bs.toast",t=>this._onInteraction(t,!1)),H.on(this._element,"focusin.bs.toast",t=>this._onInteraction(t,!0)),H.on(this._element,"focusout.bs.toast",t=>this._onInteraction(t,!1))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){let e=w.get(this,"bs.toast");if(e||(e=new ri(this,"object"==typeof t&&t)),"string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}return b(ri),{Alert:B,Button:W,Carousel:Q,Collapse:J,Dropdown:we,Modal:Ie,Offcanvas:Me,Popover:Je,ScrollSpy:ii,Tab:ni,Toast:ri,Tooltip:Xe}})); \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/dropdown/css/style.css b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/dropdown/css/style.css deleted file mode 100644 index f6c0945b..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/dropdown/css/style.css +++ /dev/null @@ -1,265 +0,0 @@ -.navbar-dropdown { - left: 0; - padding: 0; - position: absolute; - right: 0; - top: 0; - transition: all 0.45s ease; - z-index: 1030; - background: #282828; } - .navbar-dropdown .navbar-logo { - margin-right: 0.8rem; - transition: margin 0.3s ease-in-out; - vertical-align: middle; } - .navbar-dropdown .navbar-logo img { - height: 3.125rem; - transition: all 0.3s ease-in-out; } - .navbar-dropdown .navbar-logo.mbr-iconfont { - font-size: 3.125rem; - line-height: 3.125rem; } - .navbar-dropdown .navbar-caption { - font-weight: 700; - white-space: normal; - vertical-align: -4px; - line-height: 3.125rem !important; } - .navbar-dropdown .navbar-caption, .navbar-dropdown .navbar-caption:hover { - color: inherit; - text-decoration: none; } - .navbar-dropdown .mbr-iconfont + .navbar-caption { - vertical-align: -1px; } - .navbar-dropdown.navbar-fixed-top { - position: fixed; } - .navbar-dropdown .navbar-brand span { - vertical-align: -4px; } - .navbar-dropdown.bg-color.transparent { - background: none; } - .navbar-dropdown.navbar-short .navbar-brand { - padding: 0.625rem 0; } - .navbar-dropdown.navbar-short .navbar-brand span { - vertical-align: -1px; } - .navbar-dropdown.navbar-short .navbar-caption { - line-height: 2.375rem !important; - vertical-align: -2px; } - .navbar-dropdown.navbar-short .navbar-logo { - margin-right: 0.5rem; } - .navbar-dropdown.navbar-short .navbar-logo img { - height: 2.375rem; } - .navbar-dropdown.navbar-short .navbar-logo.mbr-iconfont { - font-size: 2.375rem; - line-height: 2.375rem; } - .navbar-dropdown.navbar-short .mbr-table-cell { - height: 3.625rem; } - .navbar-dropdown .navbar-close { - left: 0.6875rem; - position: fixed; - top: 0.75rem; - z-index: 1000; } - .navbar-dropdown .hamburger-icon { - content: ""; - display: inline-block; - vertical-align: middle; - width: 16px; - -webkit-box-shadow: 0 -6px 0 1px #282828,0 0 0 1px #282828,0 6px 0 1px #282828; - -moz-box-shadow: 0 -6px 0 1px #282828,0 0 0 1px #282828,0 6px 0 1px #282828; - box-shadow: 0 -6px 0 1px #282828,0 0 0 1px #282828,0 6px 0 1px #282828; } - -.dropdown-menu .dropdown-toggle[data-toggle="dropdown-submenu"]::after { - border-bottom: 0.35em solid transparent; - border-left: 0.35em solid; - border-right: 0; - border-top: 0.35em solid transparent; - margin-left: 0.3rem; } - -.dropdown-menu .dropdown-item:focus { - outline: 0; } - -.nav-dropdown { - font-size: 0.75rem; - font-weight: 500; - height: auto !important; } - .nav-dropdown .nav-btn { - padding-left: 1rem; } - .nav-dropdown .link { - margin: .667em 1.667em; - font-weight: 500; - padding: 0; - transition: color .2s ease-in-out; } - .nav-dropdown .link.dropdown-toggle { - margin-right: 2.583em; } - .nav-dropdown .link.dropdown-toggle::after { - margin-left: .25rem; - border-top: 0.35em solid; - border-right: 0.35em solid transparent; - border-left: 0.35em solid transparent; - border-bottom: 0; } - .nav-dropdown .link.dropdown-toggle[aria-expanded="true"] { - margin: 0; - padding: 0.667em 3.263em 0.667em 1.667em; } - .nav-dropdown .link::after, - .nav-dropdown .dropdown-item::after { - color: inherit; } - .nav-dropdown .btn { - font-size: 0.75rem; - font-weight: 700; - letter-spacing: 0; - margin-bottom: 0; - padding-left: 1.25rem; - padding-right: 1.25rem; } - .nav-dropdown .dropdown-menu { - border-radius: 0; - border: 0; - left: 0; - margin: 0; - padding-bottom: 1.25rem; - padding-top: 1.25rem; - position: relative; } - .nav-dropdown .dropdown-submenu { - margin-left: 0.125rem; - top: 0; } - .nav-dropdown .dropdown-item { - font-weight: 500; - line-height: 2; - padding: 0.3846em 4.615em 0.3846em 1.5385em; - position: relative; - transition: color .2s ease-in-out, background-color .2s ease-in-out; } - .nav-dropdown .dropdown-item::after { - margin-top: -0.3077em; - position: absolute; - right: 1.1538em; - top: 50%; } - .nav-dropdown .dropdown-item:focus, .nav-dropdown .dropdown-item:hover { - background: none; } - -@media (max-width: 767px) { - .nav-dropdown.navbar-toggleable-sm { - bottom: 0; - display: none; - left: 0; - overflow-x: hidden; - position: fixed; - top: 0; - transform: translateX(-100%); - -ms-transform: translateX(-100%); - -webkit-transform: translateX(-100%); - width: 18.75rem; - z-index: 999; } } -.nav-dropdown.navbar-toggleable-xl { - bottom: 0; - display: none; - left: 0; - overflow-x: hidden; - position: fixed; - top: 0; - transform: translateX(-100%); - -ms-transform: translateX(-100%); - -webkit-transform: translateX(-100%); - width: 18.75rem; - z-index: 999; } - -.nav-dropdown-sm { - display: block !important; - overflow-x: hidden; - overflow: auto; - padding-top: 3.875rem; } - .nav-dropdown-sm::after { - content: ""; - display: block; - height: 3rem; - width: 100%; } - .nav-dropdown-sm.collapse.in ~ .navbar-close { - display: block !important; } - .nav-dropdown-sm.collapsing, .nav-dropdown-sm.collapse.in { - transform: translateX(0); - -ms-transform: translateX(0); - -webkit-transform: translateX(0); - transition: all 0.25s ease-out; - -webkit-transition: all 0.25s ease-out; - background: #282828; } - .nav-dropdown-sm.collapsing[aria-expanded="false"] { - transform: translateX(-100%); - -ms-transform: translateX(-100%); - -webkit-transform: translateX(-100%); } - .nav-dropdown-sm .nav-item { - display: block; - margin-left: 0 !important; - padding-left: 0; } - .nav-dropdown-sm .link, - .nav-dropdown-sm .dropdown-item { - border-top: 1px dotted rgba(255, 255, 255, 0.1); - font-size: 0.8125rem; - line-height: 1.6; - margin: 0 !important; - padding: 0.875rem 2.4rem 0.875rem 1.5625rem !important; - position: relative; - white-space: normal; } - .nav-dropdown-sm .link:focus, .nav-dropdown-sm .link:hover, - .nav-dropdown-sm .dropdown-item:focus, - .nav-dropdown-sm .dropdown-item:hover { - background: rgba(0, 0, 0, 0.2) !important; - color: #c0a375; } - .nav-dropdown-sm .nav-btn { - position: relative; - padding: 1.5625rem 1.5625rem 0 1.5625rem; } - .nav-dropdown-sm .nav-btn::before { - border-top: 1px dotted rgba(255, 255, 255, 0.1); - content: ""; - left: 0; - position: absolute; - top: 0; - width: 100%; } - .nav-dropdown-sm .nav-btn + .nav-btn { - padding-top: 0.625rem; } - .nav-dropdown-sm .nav-btn + .nav-btn::before { - display: none; } - .nav-dropdown-sm .btn { - padding: 0.625rem 0; } - .nav-dropdown-sm .dropdown-toggle[data-toggle="dropdown-submenu"]::after { - margin-left: .25rem; - border-top: 0.35em solid; - border-right: 0.35em solid transparent; - border-left: 0.35em solid transparent; - border-bottom: 0; } - .nav-dropdown-sm .dropdown-toggle[data-toggle="dropdown-submenu"][aria-expanded="true"]::after { - border-top: 0; - border-right: 0.35em solid transparent; - border-left: 0.35em solid transparent; - border-bottom: 0.35em solid; } - .nav-dropdown-sm .dropdown-menu { - margin: 0; - padding: 0; - position: relative; - top: 0; - left: 0; - width: 100%; - border: 0; - float: none; - border-radius: 0; - background: none; } - .nav-dropdown-sm .dropdown-submenu { - left: 100%; - margin-left: 0.125rem; - margin-top: -1.25rem; - top: 0; } - -.navbar-toggleable-sm .nav-dropdown .dropdown-menu { - position: absolute; } - -.navbar-toggleable-sm .nav-dropdown .dropdown-submenu { - left: 100%; - margin-left: 0.125rem; - margin-top: -1.25rem; - top: 0; } - -.navbar-toggleable-sm.opened .nav-dropdown .dropdown-menu { - position: relative; } - -.navbar-toggleable-sm.opened .nav-dropdown .dropdown-submenu { - left: 0; - margin-left: 00rem; - margin-top: 0rem; - top: 0; } - -.is-builder .nav-dropdown.collapsing { - transition: none !important; } - - diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/dropdown/js/navbar-dropdown.js b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/dropdown/js/navbar-dropdown.js deleted file mode 100644 index c5f66dc0..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/dropdown/js/navbar-dropdown.js +++ /dev/null @@ -1,17 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -var $jscomp=$jscomp||{};$jscomp.scope={};$jscomp.ASSUME_ES5=!1;$jscomp.ASSUME_NO_NATIVE_MAP=!1;$jscomp.ASSUME_NO_NATIVE_SET=!1;$jscomp.SIMPLE_FROUND_POLYFILL=!1;$jscomp.defineProperty=$jscomp.ASSUME_ES5||"function"==typeof Object.defineProperties?Object.defineProperty:function(b,c,a){b!=Array.prototype&&b!=Object.prototype&&(b[c]=a.value)};$jscomp.getGlobal=function(b){return"undefined"!=typeof window&&window===b?b:"undefined"!=typeof global&&null!=global?global:b};$jscomp.global=$jscomp.getGlobal(this); -$jscomp.polyfill=function(b,c,a,d){if(c){a=$jscomp.global;b=b.split(".");for(d=0;de}function i(n){return r(n)||o(n)}return{constrain:function(t){return i(t)?r(t)?n:e:t},length:t,max:e,min:n,reachedAny:i,reachedMax:o,reachedMin:r,removeOffset:function(n){return t?n-t*Math.ceil((n-e)/t):n}}}function r(n,e,o){var i=t(0,n),a=i.min,u=i.constrain,c=n+1,s=d(e);function d(n){return o?Math.abs((c+n)%c):u(n)}function f(){return s}function l(n){return s=d(n),p}var p={add:function(n){return l(f()+n)},clone:function(){return r(n,f(),o)},get:f,set:l,min:a,max:n};return p}function o(){var n=[];var e={add:function(t,r,o,i){return void 0===i&&(i=!1),t.addEventListener(r,o,i),n.push((function(){return t.removeEventListener(r,o,i)})),e},removeAll:function(){return n=n.filter((function(n){return n()})),e}};return e}function i(n){var e=n;function t(n){return e/=n,o}function r(n){return"number"==typeof n?n:n.get()}var o={add:function(n){return e+=r(n),o},divide:t,get:function(){return e},multiply:function(n){return e*=n,o},normalize:function(){return 0!==e&&t(e),o},set:function(n){return e=r(n),o},subtract:function(n){return e-=r(n),o}};return o}function a(n){return n?n/Math.abs(n):0}function u(n,e){return Math.abs(n-e)}function c(n,e){for(var t=[],r=0;r=2,a=k||!i,c=(e=n.target,o=e.nodeName||"",!(w.indexOf(o)>-1)),f=i||k&&c;B=!0,s.pointerDown(n),E.set(r),r.set(d),p.useBaseMass().useSpeed(80),function(){var n=k?document:t;T.add(n,"touchmove",O).add(n,"touchend",N).add(n,"mousemove",O).add(n,"mouseup",N)}(),M.set(s.readPoint(n,y)),S.set(s.readPoint(n,b)),v.emit("pointerDown"),a&&(L=!1),f&&n.preventDefault()}}function O(n){if(!I&&!k){if(!n.cancelable)return N();var t=s.readPoint(n,y).get(),o=s.readPoint(n,b).get(),i=u(t,M.get()),a=u(o,S.get());if(!(I=i>a)&&!L)return N()}var c=s.pointerMove(n);!L&&c&&(L=!0),f.start(),r.add(e.applyTo(c)),n.preventDefault()}function N(){var n=g.byDistance(0,!1).index!==m.get(),t=s.pointerUp()*(c?D:C)[k?"mouse":"touch"],o=function(n,e){var t=m.clone().add(-1*a(n)),r=t.get()===m.min||t.get()===m.max,o=g.byDistance(n,!c).distance;return c||Math.abs(n)<20?o:!h&&r?.6*o:x&&e?.5*o:g.byIndex(t.get(),0).distance}(e.applyTo(t),n),i=function(n,e){if(0===n||0===e)return 0;if(Math.abs(n)<=Math.abs(e))return 0;var t=u(Math.abs(n),Math.abs(e));return Math.abs(t/n)}(t,o),d=u(r.get(),E.get())>=.5,f=n&&i>.75,y=Math.abs(t)<20,b=f?10:P,w=f?1+2.5*i:1;d&&!k&&(L=!0),I=!1,B=!1,T.removeAll(),p.useSpeed(y?9:b).useMass(w),l.distance(o,!c),k=!1,v.emit("pointerUp")}function F(n){L&&n.preventDefault()}return{addActivationEvents:function(){var n=t;A.add(n,"touchmove",(function(){})).add(n,"touchend",(function(){})).add(n,"touchstart",z).add(n,"mousedown",z).add(n,"touchcancel",N).add(n,"contextmenu",N).add(n,"click",F)},clickAllowed:function(){return!L},pointerDown:function(){return B},removeAllEvents:function(){A.removeAll(),T.removeAll()}}}function m(n,e,t){var r,o,u=(r=2,o=Math.pow(10,r),function(n){return Math.round(n*o)/o}),c=i(0),s=i(0),d=i(0),f=0,l=e,p=t;function g(n){return l=n,v}function m(n){return p=n,v}var v={direction:function(){return f},seek:function(e){d.set(e).subtract(n);var t,r,o,i=(t=d.get(),(o=0)+(t-(r=0))/(100-r)*(l-o));return f=a(d.get()),d.normalize().multiply(i).subtract(c),function(n){n.divide(p),s.add(n)}(d),v},settle:function(e){var t=e.get()-n.get(),r=!u(t);return r&&n.set(e),r},update:function(){c.add(s),n.add(c),s.multiply(0)},useBaseMass:function(){return m(t)},useBaseSpeed:function(){return g(e)},useMass:m,useSpeed:g};return v}function v(n,e,t,r){var o=!1;return{constrain:function(i){if(!o&&n.reachedAny(t.get())&&n.reachedAny(e.get())){var a=i?.7:.45,u=t.get()-e.get();t.subtract(u*a),!i&&Math.abs(u)<10&&(t.set(n.constrain(t.get())),r.useSpeed(10).useMass(3))}},toggleActive:function(n){o=!n}}}function h(n,e,r,o,i){var a=t(-e+n,r[0]),u=o.map(a.constrain);return{snapsContained:function(){if(e<=n)return[a.max];if("keepSnaps"===i)return u;var r=function(){var n=u[0],e=d(u),r=u.lastIndexOf(n),o=u.indexOf(e)+1;return t(r,o)}(),o=r.min,c=r.max;return u.slice(o,c)}()}}function x(n,e,r,o,i){var a=t(r.min+e.measure(.1),r.max+e.measure(.1)),u=a.reachedMin,c=a.reachedMax;return{loop:function(e){if(function(n){return 1===n?c(o.get()):-1===n&&u(o.get())}(e)){var t=n*(-1*e);i.forEach((function(n){return n.add(t)}))}}}}function y(n){var e=n.max,t=n.length;return{get:function(n){return(n-e)/-t}}}function b(n,e,t,r,o,i){var a,u,s=n.startEdge,f=n.endEdge,l=o.map((function(n){return r[s]-n[s]})).map(t.measure).map((function(n){return-Math.abs(n)})),p=(a=c(l,i).map((function(n){return n[0]})),u=c(o,i).map((function(n){return d(n)[f]-n[0][s]})).map(t.measure).map(Math.abs).map(e.measure),a.map((function(n,e){return n+u[e]})));return{snaps:l,snapsAligned:p}}function w(n,e,t,r,o){var i=r.reachedAny,a=r.removeOffset,u=r.constrain;function c(n,e){return Math.abs(n)0?n.concat([t]):n}),[])}function m(n,e){var r="start"===e,o=r?-t:t,u=i.findSlideBounds(o);return n.map((function(n){var e=r?0:-t,o=r?t:0,i=u.filter((function(e){return e.index===n}))[0][r?"end":"start"];return{point:i,getTarget:function(){return a.get()>i?e:o},index:n,location:-1}}))}return{canLoop:function(){return l.every((function(n){var t=n.index;return p(d.filter((function(n){return n!==t})),e)<=0}))},clear:function(){l.forEach((function(e){var t=e.index;u[t].style[n.startEdge]=""}))},loop:function(){l.forEach((function(e){var t=e.getTarget,r=e.location,o=e.index,i=t();i!==r&&(u[o].style[n.startEdge]=i+"%",e.location=i)}))},loopPoints:l}}function S(n,e,t){var r=o(),i=r.removeAll,a=0;function u(n){9===n.keyCode&&(a=(new Date).getTime())}function c(o,i){r.add(o,"focus",(function(){if(!((new Date).getTime()-a>10)){n.scrollLeft=0;var r=Math.floor(i/t);e.index(r,0)}}),!0)}return{addActivationEvents:function(n){r.add(document,"keydown",u,!1),n.forEach(c)},removeAllEvents:i}}function E(n,e,t){var r=t.style,o="x"===n.scroll?function(n){return"translate3d("+n+"%,0px,0px)"}:function(n){return"translate3d(0px,"+n+"%,0px)"},i=!1;return{clear:function(){r.transform=""},to:function(n){i||(r.transform=o(e.applyTo(n.get())))},toggleActive:function(n){i=!n}}}function A(n,o,a,u,c){var l,p=u.align,A=u.axis,T=u.direction,C=u.startIndex,D=u.inViewThreshold,P=u.loop,B=u.speed,I=u.dragFree,L=u.slidesToScroll,k=u.skipSnaps,z=u.containScroll,O=o.getBoundingClientRect(),N=a.map((function(n){return n.getBoundingClientRect()})),F=function(n){var e="rtl"===n?-1:1;return{applyTo:function(n){return n*e}}}(T),U=function(n,e){var t="y"===n?"y":"x";return{scroll:t,cross:"y"===n?"x":"y",startEdge:"y"===t?"top":"rtl"===e?"right":"left",endEdge:"y"===t?"bottom":"rtl"===e?"left":"right",measureSize:function(n){var e=n.width,r=n.height;return"x"===t?e:r}}}(A,T),V=(l=U.measureSize(O),{measure:function(n){return 0===l?0:n/l*100},totalPercent:100}),H=V.totalPercent,R=e(p,H),j=function(n,e,t,r,o){var i=n.measureSize,a=n.startEdge,u=n.endEdge,c=r.map(i);return{slideSizes:c.map(e.measure),slideSizesWithGaps:r.map((function(n,e,r){var i=e===f(r),s=window.getComputedStyle(d(t)),l=parseFloat(s.getPropertyValue("margin-"+u));return i?c[e]+(o?l:0):r[e+1][a]-n[a]})).map(e.measure).map(Math.abs)}}(U,V,a,N,P),G=j.slideSizes,q=j.slideSizesWithGaps,W=b(U,R,V,O,N,L),X=W.snaps,J=W.snapsAligned,Y=-d(X)+d(q),K=h(H,Y,X,J,z).snapsContained,Q=!P&&""!==z?K:J,Z=function(n,e,r){var o,i;return{limit:(o=e[0],i=d(e),t(r?o-n:i,o))}}(Y,Q,P).limit,$=r(f(Q),C,P),_=$.clone(),nn=s(a),en=function(n){var e=0;function t(n,t){return function(){n===!!e&&t()}}function r(){e=window.requestAnimationFrame(n)}return{proceed:t(!0,r),start:t(!1,r),stop:t(!0,(function(){window.cancelAnimationFrame(e),e=0}))}}((function(){P||dn.scrollBounds.constrain(dn.dragHandler.pointerDown()),dn.scrollBody.seek(on).update();var n=dn.scrollBody.settle(on);n&&!dn.dragHandler.pointerDown()&&(dn.animation.stop(),c.emit("settle")),n||c.emit("scroll"),P&&(dn.scrollLooper.loop(dn.scrollBody.direction()),dn.slideLooper.loop()),dn.translate.to(rn),dn.animation.proceed()})),tn=Q[$.get()],rn=i(tn),on=i(tn),an=m(rn,B,1),un=w(P,Q,Y,Z,on),cn=function(n,e,t,r,o,i){function a(r){var a=r.distance,u=r.index!==e.get();a&&(n.start(),o.add(a)),u&&(t.set(e.get()),e.set(r.index),i.emit("select"))}return{distance:function(n,e){a(r.byDistance(n,e))},index:function(n,t){var o=e.clone().set(n);a(r.byIndex(o.get(),t))}}}(en,$,_,un,on,c),sn=function(n,e,t,r,o,i){var a=Math.min(Math.max(i,.01),.99),u=(o?[0,e,-e]:[0]).reduce((function(n,e){return n.concat(c(e,a))}),[]);function c(e,o){var i=t.map((function(n){return n*(o||0)}));return r.map((function(r,o){return{start:r-t[o]+i[o]+e,end:r+n-i[o]+e,index:o}}))}return{check:function(n){return u.reduce((function(e,t){var r=t.index,o=t.start,i=t.end;return-1===e.indexOf(r)&&on?e.concat([r]):e}),[])},findSlideBounds:c}}(H,Y,G,X,P,D),dn={animation:en,axis:U,direction:F,dragHandler:g(U,F,n,on,I,function(n,e){var t=n.scroll,r={x:"clientX",y:"clientY"},o=i(0),a=i(0),u=i(0),c=i(0),s=[],d=(new Date).getTime(),f=!1;function l(n,e){f=!n.touches;var t=r[e],o=f?n[t]:n.touches[0][t];return c.set(o)}return{pointerDown:function(n){var r=l(n,t);return o.set(r),u.set(r),e.measure(o.get())},pointerMove:function(n){var r=l(n,t),o=(new Date).getTime(),i=o-d;return i>=10&&(i>=100&&(s=[]),s.push(r.get()),d=o),a.set(r).subtract(u),u.set(r),e.measure(a.get())},pointerUp:function(){var n=(new Date).getTime()-d,t=u.get(),r=s.slice(-5).map((function(n){return t-n})).sort((function(n,e){return Math.abs(n)100||!r?0:r),s=[],e.measure(u.get())},readPoint:l}}(U,V),rn,en,cn,an,un,$,c,P,k),pxToPercent:V,index:$,indexPrevious:_,limit:Z,location:rn,options:u,scrollBody:an,scrollBounds:v(Z,rn,on,an),scrollLooper:x(Y,V,Z,rn,[rn,on]),scrollProgress:y(Z),scrollSnaps:Q,scrollTarget:un,scrollTo:cn,slideFocus:S(n,cn,L),slideLooper:M(U,H,Y,q,Q,sn,rn,a),slidesInView:sn,slideIndexes:nn,target:on,translate:E(U,F,o)};return dn}var T={align:"center",axis:"x",containScroll:"",direction:"ltr",dragFree:!1,draggable:!0,draggableClass:"is-draggable",draggingClass:"is-dragging",inViewThreshold:0,loop:!1,skipSnaps:!0,selectedClass:"is-selected",slidesToScroll:1,speed:10,startIndex:0};return function(e,t){var r,i,a,u,c,s,d,f=function(){var n={};function e(e){return n[e]||[]}var t={emit:function(n){return e(n).forEach((function(e){return e(n)})),t},off:function(r,o){return n[r]=e(r).filter((function(n){return n!==o})),t},on:function(r,o){return n[r]=e(r).concat([o]),t}};return t}(),g=o(),m=(r=function(){if(y){var n=u.axis.measureSize(e.getBoundingClientRect());M!==n&&B(),f.emit("resize")}},i=500,a=0,function(){window.clearTimeout(a),a=window.setTimeout(r,i)||0}),v=B,h=f.on,x=f.off,y=!1,b=n({},T),w=n({},b),M=0;function S(){if(!e)throw new Error("Missing root node 😢");var n,t=e.querySelector("*");if(!t)throw new Error("Missing container node 😢");s=t,d=Array.prototype.slice.call(s.children),n=getComputedStyle(e,":before").content,c={get:function(){try{return JSON.parse(n.slice(1,-1).replace(/\\/g,""))}catch(n){}return{}}}}function E(t){if(S(),b=n({},b,t),w=n({},b,c.get()),u=A(e,s,d,w,f),g.add(window,"resize",m),u.translate.to(u.location),M=u.axis.measureSize(e.getBoundingClientRect()),w.loop){if(!u.slideLooper.canLoop())return P(),E({loop:!1});u.slideLooper.loop()}w.draggable&&s.offsetParent&&d.length&&(u.dragHandler.addActivationEvents(),w.draggableClass&&p(e,w.draggableClass),w.draggingClass&&f.on("pointerDown",C).on("pointerUp",C)),d.length&&u.slideFocus.addActivationEvents(d),w.selectedClass&&(D(),f.on("select",D).on("pointerUp",D)),y||(setTimeout((function(){return f.emit("init")}),0),y=!0)}function C(n){var t=w.draggingClass;"pointerDown"===n?p(e,t):l(e,t)}function D(){var n=w.selectedClass,e=I(!0);L(!0).forEach((function(e){return l(d[e],n)})),e.forEach((function(e){return p(d[e],n)}))}function P(){u.dragHandler.removeAllEvents(),u.slideFocus.removeAllEvents(),u.animation.stop(),g.removeAll(),u.translate.clear(),u.slideLooper.clear(),l(e,w.draggableClass),d.forEach((function(n){return l(n,w.selectedClass)})),f.off("select",D).off("pointerUp",D).off("pointerDown",C).off("pointerUp",C)}function B(e){if(y){var t=n({startIndex:z()},e);P(),E(t),f.emit("reInit")}}function I(n){var e=u[n?"target":"location"].get(),t=w.loop?"removeOffset":"constrain";return u.slidesInView.check(u.limit[t](e))}function L(n){var e=I(n);return u.slideIndexes.filter((function(n){return-1===e.indexOf(n)}))}function k(n,e,t){u.scrollBody.useBaseMass().useSpeed(e?100:w.speed),y&&u.scrollTo.index(n,t||0)}function z(){return u.index.get()}return E(t),{canScrollNext:function(){return u.index.clone().add(1).get()!==z()},canScrollPrev:function(){return u.index.clone().add(-1).get()!==z()},clickAllowed:function(){return u.dragHandler.clickAllowed()},containerNode:function(){return s},dangerouslyGetEngine:function(){return u},destroy:function(){y&&(P(),y=!1,f.emit("destroy"))},off:x,on:h,previousScrollSnap:function(){return u.indexPrevious.get()},reInit:v,rootNode:function(){return e},scrollNext:function(n){k(u.index.clone().add(1).get(),!0===n,-1)},scrollPrev:function(n){k(u.index.clone().add(-1).get(),!0===n,1)},scrollProgress:function(){return u.scrollProgress.get(u.location.get())},scrollSnapList:function(){return u.scrollSnaps.map(u.scrollProgress.get)},scrollTo:k,selectedScrollSnap:z,slideNodes:function(){return d},slidesInView:I,slidesNotInView:L}}})); \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/embla/script.js b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/embla/script.js deleted file mode 100644 index 410e44ea..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/embla/script.js +++ /dev/null @@ -1,27 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -var $jscomp=$jscomp||{};$jscomp.scope={};$jscomp.findInternal=function(a,e,f){a instanceof String&&(a=String(a));for(var g=a.length,b=0;b .mbr-iconfont { - font-size: 6.25rem; -} -.display-2 { - font-family: 'Inter Tight', sans-serif; - font-size: 4rem; - line-height: 1; -} -.display-2 > .mbr-iconfont { - font-size: 5rem; -} -.display-4 { - font-family: 'Inter Tight', sans-serif; - font-size: 1.4rem; - line-height: 1.5; -} -.display-4 > .mbr-iconfont { - font-size: 1.75rem; -} -.display-5 { - font-family: 'Inter Tight', sans-serif; - font-size: 2.5rem; - line-height: 1.5; -} -.display-5 > .mbr-iconfont { - font-size: 3.125rem; -} -.display-7 { - font-family: 'Inter Tight', sans-serif; - font-size: 1.4rem; - line-height: 1.3; -} -.display-7 > .mbr-iconfont { - font-size: 1.75rem; -} -/* ---- Fluid typography for mobile devices ---- */ -/* 1.4 - font scale ratio ( bootstrap == 1.42857 ) */ -/* 100vw - current viewport width */ -/* (48 - 20) 48 == 48rem == 768px, 20 == 20rem == 320px(minimal supported viewport) */ -/* 0.65 - min scale variable, may vary */ -@media (max-width: 992px) { - .display-1 { - font-size: 4rem; - } -} -@media (max-width: 768px) { - .display-1 { - font-size: 3.5rem; - font-size: calc( 2.4rem + (5 - 2.4) * ((100vw - 20rem) / (48 - 20))); - line-height: calc( 1.1 * (2.4rem + (5 - 2.4) * ((100vw - 20rem) / (48 - 20)))); - } - .display-2 { - font-size: 3.2rem; - font-size: calc( 2.05rem + (4 - 2.05) * ((100vw - 20rem) / (48 - 20))); - line-height: calc( 1.3 * (2.05rem + (4 - 2.05) * ((100vw - 20rem) / (48 - 20)))); - } - .display-4 { - font-size: 1.12rem; - font-size: calc( 1.14rem + (1.4 - 1.14) * ((100vw - 20rem) / (48 - 20))); - line-height: calc( 1.4 * (1.14rem + (1.4 - 1.14) * ((100vw - 20rem) / (48 - 20)))); - } - .display-5 { - font-size: 2rem; - font-size: calc( 1.525rem + (2.5 - 1.525) * ((100vw - 20rem) / (48 - 20))); - line-height: calc( 1.4 * (1.525rem + (2.5 - 1.525) * ((100vw - 20rem) / (48 - 20)))); - } - .display-7 { - font-size: 1.12rem; - font-size: calc( 1.14rem + (1.4 - 1.14) * ((100vw - 20rem) / (48 - 20))); - line-height: calc( 1.4 * (1.14rem + (1.4 - 1.14) * ((100vw - 20rem) / (48 - 20)))); - } -} -/* Buttons */ -.btn { - padding: 1.25rem 2rem; - border-radius: 4px; -} -@media (max-width: 767px) { - .btn { - padding: 0.75rem 1.5rem; - } -} -.btn-sm { - padding: 0.6rem 1.2rem; - border-radius: 4px; -} -.btn-md { - padding: 0.6rem 1.2rem; - border-radius: 4px; -} -.btn-lg { - padding: 1.25rem 2rem; - border-radius: 4px; -} -.bg-primary { - background-color: #164fd3 !important; -} -.bg-success { - background-color: #3a341c !important; -} -.bg-info { - background-color: #320707 !important; -} -.bg-warning { - background-color: #a0e2e1 !important; -} -.bg-danger { - background-color: #ffffff !important; -} -.btn-primary, -.btn-primary:active { - background-color: #164fd3 !important; - border-color: #164fd3 !important; - color: #ffffff !important; - box-shadow: none; -} -.btn-primary:hover, -.btn-primary:focus, -.btn-primary.focus, -.btn-primary.active { - color: inherit; - background-color: #326aea !important; - border-color: #326aea !important; - box-shadow: none; -} -.btn-primary.disabled, -.btn-primary:disabled { - color: #ffffff !important; - background-color: #326aea !important; - border-color: #326aea !important; -} -.btn-secondary, -.btn-secondary:active { - background-color: #ffd7ef !important; - border-color: #ffd7ef !important; - color: #d70081 !important; - box-shadow: none; -} -.btn-secondary:hover, -.btn-secondary:focus, -.btn-secondary.focus, -.btn-secondary.active { - color: inherit; - background-color: #ffffff !important; - border-color: #ffffff !important; - box-shadow: none; -} -.btn-secondary.disabled, -.btn-secondary:disabled { - color: #d70081 !important; - background-color: #ffffff !important; - border-color: #ffffff !important; -} -.btn-info, -.btn-info:active { - background-color: #320707 !important; - border-color: #320707 !important; - color: #ffffff !important; - box-shadow: none; -} -.btn-info:hover, -.btn-info:focus, -.btn-info.focus, -.btn-info.active { - color: inherit; - background-color: #5f0d0d !important; - border-color: #5f0d0d !important; - box-shadow: none; -} -.btn-info.disabled, -.btn-info:disabled { - color: #ffffff !important; - background-color: #5f0d0d !important; - border-color: #5f0d0d !important; -} -.btn-success, -.btn-success:active { - background-color: #3a341c !important; - border-color: #3a341c !important; - color: #ffffff !important; - box-shadow: none; -} -.btn-success:hover, -.btn-success:focus, -.btn-success.focus, -.btn-success.active { - color: inherit; - background-color: #5c532d !important; - border-color: #5c532d !important; - box-shadow: none; -} -.btn-success.disabled, -.btn-success:disabled { - color: #ffffff !important; - background-color: #5c532d !important; - border-color: #5c532d !important; -} -.btn-warning, -.btn-warning:active { - background-color: #a0e2e1 !important; - border-color: #a0e2e1 !important; - color: #1f6463 !important; - box-shadow: none; -} -.btn-warning:hover, -.btn-warning:focus, -.btn-warning.focus, -.btn-warning.active { - color: inherit; - background-color: #c7eeed !important; - border-color: #c7eeed !important; - box-shadow: none; -} -.btn-warning.disabled, -.btn-warning:disabled { - color: #1f6463 !important; - background-color: #c7eeed !important; - border-color: #c7eeed !important; -} -.btn-danger, -.btn-danger:active { - background-color: #ffffff !important; - border-color: #ffffff !important; - color: #808080 !important; - box-shadow: none; -} -.btn-danger:hover, -.btn-danger:focus, -.btn-danger.focus, -.btn-danger.active { - color: inherit; - background-color: #ffffff !important; - border-color: #ffffff !important; - box-shadow: none; -} -.btn-danger.disabled, -.btn-danger:disabled { - color: #808080 !important; - background-color: #ffffff !important; - border-color: #ffffff !important; -} -.btn-white, -.btn-white:active { - background-color: #eff0ec !important; - border-color: #eff0ec !important; - color: #757b62 !important; - box-shadow: none; -} -.btn-white:hover, -.btn-white:focus, -.btn-white.focus, -.btn-white.active { - color: inherit; - background-color: #ffffff !important; - border-color: #ffffff !important; - box-shadow: none; -} -.btn-white.disabled, -.btn-white:disabled { - color: #757b62 !important; - background-color: #ffffff !important; - border-color: #ffffff !important; -} -.btn-black, -.btn-black:active { - background-color: #232323 !important; - border-color: #232323 !important; - color: #ffffff !important; - box-shadow: none; -} -.btn-black:hover, -.btn-black:focus, -.btn-black.focus, -.btn-black.active { - color: inherit; - background-color: #3d3d3d !important; - border-color: #3d3d3d !important; - box-shadow: none; -} -.btn-black.disabled, -.btn-black:disabled { - color: #ffffff !important; - background-color: #3d3d3d !important; - border-color: #3d3d3d !important; -} -.btn-primary-outline, -.btn-primary-outline:active { - background-color: transparent !important; - border-color: #164fd3; - color: #164fd3; -} -.btn-primary-outline:hover, -.btn-primary-outline:focus, -.btn-primary-outline.focus, -.btn-primary-outline.active { - color: #0e3284 !important; - background-color: transparent !important; - border-color: #0e3284 !important; - box-shadow: none !important; -} -.btn-primary-outline.disabled, -.btn-primary-outline:disabled { - color: #ffffff !important; - background-color: #164fd3 !important; - border-color: #164fd3 !important; -} -.btn-secondary-outline, -.btn-secondary-outline:active { - background-color: transparent !important; - border-color: #ffd7ef; - color: #ffd7ef; -} -.btn-secondary-outline:hover, -.btn-secondary-outline:focus, -.btn-secondary-outline.focus, -.btn-secondary-outline.active { - color: #ff80cc !important; - background-color: transparent !important; - border-color: #ff80cc !important; - box-shadow: none !important; -} -.btn-secondary-outline.disabled, -.btn-secondary-outline:disabled { - color: #d70081 !important; - background-color: #ffd7ef !important; - border-color: #ffd7ef !important; -} -.btn-info-outline, -.btn-info-outline:active { - background-color: transparent !important; - border-color: #320707; - color: #320707; -} -.btn-info-outline:hover, -.btn-info-outline:focus, -.btn-info-outline.focus, -.btn-info-outline.active { - color: #000000 !important; - background-color: transparent !important; - border-color: #000000 !important; - box-shadow: none !important; -} -.btn-info-outline.disabled, -.btn-info-outline:disabled { - color: #ffffff !important; - background-color: #320707 !important; - border-color: #320707 !important; -} -.btn-success-outline, -.btn-success-outline:active { - background-color: transparent !important; - border-color: #3a341c; - color: #3a341c; -} -.btn-success-outline:hover, -.btn-success-outline:focus, -.btn-success-outline.focus, -.btn-success-outline.active { - color: #000000 !important; - background-color: transparent !important; - border-color: #000000 !important; - box-shadow: none !important; -} -.btn-success-outline.disabled, -.btn-success-outline:disabled { - color: #ffffff !important; - background-color: #3a341c !important; - border-color: #3a341c !important; -} -.btn-warning-outline, -.btn-warning-outline:active { - background-color: transparent !important; - border-color: #a0e2e1; - color: #a0e2e1; -} -.btn-warning-outline:hover, -.btn-warning-outline:focus, -.btn-warning-outline.focus, -.btn-warning-outline.active { - color: #5ececc !important; - background-color: transparent !important; - border-color: #5ececc !important; - box-shadow: none !important; -} -.btn-warning-outline.disabled, -.btn-warning-outline:disabled { - color: #1f6463 !important; - background-color: #a0e2e1 !important; - border-color: #a0e2e1 !important; -} -.btn-danger-outline, -.btn-danger-outline:active { - background-color: transparent !important; - border-color: #ffffff; - color: #ffffff; -} -.btn-danger-outline:hover, -.btn-danger-outline:focus, -.btn-danger-outline.focus, -.btn-danger-outline.active { - color: #d4d4d4 !important; - background-color: transparent !important; - border-color: #d4d4d4 !important; - box-shadow: none !important; -} -.btn-danger-outline.disabled, -.btn-danger-outline:disabled { - color: #808080 !important; - background-color: #ffffff !important; - border-color: #ffffff !important; -} -.btn-black-outline, -.btn-black-outline:active { - background-color: transparent !important; - border-color: #232323; - color: #232323; -} -.btn-black-outline:hover, -.btn-black-outline:focus, -.btn-black-outline.focus, -.btn-black-outline.active { - color: #000000 !important; - background-color: transparent !important; - border-color: #000000 !important; - box-shadow: none !important; -} -.btn-black-outline.disabled, -.btn-black-outline:disabled { - color: #ffffff !important; - background-color: #232323 !important; - border-color: #232323 !important; -} -.btn-white-outline, -.btn-white-outline:active { - background-color: transparent !important; - border-color: #fafafa; - color: #fafafa; -} -.btn-white-outline:hover, -.btn-white-outline:focus, -.btn-white-outline.focus, -.btn-white-outline.active { - color: #cfcfcf !important; - background-color: transparent !important; - border-color: #cfcfcf !important; - box-shadow: none !important; -} -.btn-white-outline.disabled, -.btn-white-outline:disabled { - color: #7a7a7a !important; - background-color: #fafafa !important; - border-color: #fafafa !important; -} -.text-primary { - color: #164fd3 !important; -} -.text-secondary { - color: #ffd7ef !important; -} -.text-success { - color: #3a341c !important; -} -.text-info { - color: #320707 !important; -} -.text-warning { - color: #a0e2e1 !important; -} -.text-danger { - color: #ffffff !important; -} -.text-white { - color: #fafafa !important; -} -.text-black { - color: #232323 !important; -} -a.text-primary:hover, -a.text-primary:focus, -a.text-primary.active { - color: #0c2c77 !important; -} -a.text-secondary:hover, -a.text-secondary:focus, -a.text-secondary.active { - color: #ff71c6 !important; -} -a.text-success:hover, -a.text-success:focus, -a.text-success.active { - color: #000000 !important; -} -a.text-info:hover, -a.text-info:focus, -a.text-info.active { - color: #000000 !important; -} -a.text-warning:hover, -a.text-warning:focus, -a.text-warning.active { - color: #52cac8 !important; -} -a.text-danger:hover, -a.text-danger:focus, -a.text-danger.active { - color: #cccccc !important; -} -a.text-white:hover, -a.text-white:focus, -a.text-white.active { - color: #c7c7c7 !important; -} -a.text-black:hover, -a.text-black:focus, -a.text-black.active { - color: #000000 !important; -} -a[class*="text-"]:not(.nav-link):not(.dropdown-item):not([role]):not(.navbar-caption) { - transition: 0.2s; - position: relative; - background-image: linear-gradient(currentColor 50%, currentColor 50%); - background-size: 10000px 2px; - background-repeat: no-repeat; - background-position: 0 1.2em; -} -.nav-tabs .nav-link.active { - color: #164fd3; -} -.nav-tabs .nav-link:not(.active) { - color: #232323; -} -.alert-success { - background-color: #70c770; -} -.alert-info { - background-color: #320707; -} -.alert-warning { - background-color: #a0e2e1; -} -.alert-danger { - background-color: #ffffff; -} -.mbr-section-btn .btn:not(.btn-form) { - border-radius: 100px; -} -.mbr-gallery-filter li a { - border-radius: 100px !important; -} -.mbr-gallery-filter li.active .btn { - background-color: #164fd3; - border-color: #164fd3; - color: #ffffff; -} -.mbr-gallery-filter li.active .btn:focus { - box-shadow: none; -} -.nav-tabs .nav-link { - border-radius: 100px !important; -} -a, -a:hover { - color: #164fd3; -} -.mbr-plan-header.bg-primary .mbr-plan-subtitle, -.mbr-plan-header.bg-primary .mbr-plan-price-desc { - color: #bdcff8; -} -.mbr-plan-header.bg-success .mbr-plan-subtitle, -.mbr-plan-header.bg-success .mbr-plan-price-desc { - color: #c0b27c; -} -.mbr-plan-header.bg-info .mbr-plan-subtitle, -.mbr-plan-header.bg-info .mbr-plan-price-desc { - color: #f3abab; -} -.mbr-plan-header.bg-warning .mbr-plan-subtitle, -.mbr-plan-header.bg-warning .mbr-plan-price-desc { - color: #ffffff; -} -.mbr-plan-header.bg-danger .mbr-plan-subtitle, -.mbr-plan-header.bg-danger .mbr-plan-price-desc { - color: #ffffff; -} -/* Scroll to top button*/ -.scrollToTop_wraper { - display: none; -} -.form-control { - font-family: 'Inter Tight', sans-serif; - font-size: 1.4rem; - line-height: 1.3; - font-weight: 400; - border-radius: 40px !important; -} -.form-control > .mbr-iconfont { - font-size: 1.75rem; -} -.form-control:hover, -.form-control:focus { - box-shadow: rgba(0, 0, 0, 0.07) 0px 1px 1px 0px, rgba(0, 0, 0, 0.07) 0px 1px 3px 0px, rgba(0, 0, 0, 0.03) 0px 0px 0px 1px; - border-color: #164fd3 !important; -} -.form-control:-webkit-input-placeholder { - font-family: 'Inter Tight', sans-serif; - font-size: 1.4rem; - line-height: 1.3; - font-weight: 400; -} -.form-control:-webkit-input-placeholder > .mbr-iconfont { - font-size: 1.75rem; -} -blockquote { - border-color: #164fd3; -} -/* Forms */ -.mbr-form .input-group-btn .btn { - border-radius: 100px !important; -} -.mbr-form .input-group-btn .btn:hover { - box-shadow: 0 10px 40px 0 rgba(0, 0, 0, 0.2); -} -.mbr-form .input-group-btn button[type="submit"] { - border-radius: 100px !important; - padding: 1rem 3rem; -} -.mbr-form .input-group-btn button[type="submit"]:hover { - box-shadow: 0 10px 40px 0 rgba(0, 0, 0, 0.2); -} -.jq-selectbox li:hover, -.jq-selectbox li.selected { - background-color: #164fd3; - color: #ffffff; -} -.jq-number__spin { - transition: 0.25s ease; -} -.jq-number__spin:hover { - border-color: #164fd3; -} -.jq-selectbox .jq-selectbox__trigger-arrow, -.jq-number__spin.minus:after, -.jq-number__spin.plus:after { - transition: 0.4s; - border-top-color: #232323; - border-bottom-color: #232323; -} -.jq-selectbox:hover .jq-selectbox__trigger-arrow, -.jq-number__spin.minus:hover:after, -.jq-number__spin.plus:hover:after { - border-top-color: #164fd3; - border-bottom-color: #164fd3; -} -.xdsoft_datetimepicker .xdsoft_calendar td.xdsoft_default, -.xdsoft_datetimepicker .xdsoft_calendar td.xdsoft_current, -.xdsoft_datetimepicker .xdsoft_timepicker .xdsoft_time_box > div > div.xdsoft_current { - color: #ffffff !important; - background-color: #164fd3 !important; - box-shadow: none !important; -} -.xdsoft_datetimepicker .xdsoft_calendar td:hover, -.xdsoft_datetimepicker .xdsoft_timepicker .xdsoft_time_box > div > div:hover { - color: #000000 !important; - background: #ffd7ef !important; - box-shadow: none !important; -} -.lazy-bg { - background-image: none !important; -} -.lazy-placeholder:not(section), -.lazy-none { - display: block; - position: relative; - padding-bottom: 56.25%; - width: 100%; - height: auto; -} -iframe.lazy-placeholder, -.lazy-placeholder:after { - content: ''; - position: absolute; - width: 200px; - height: 200px; - background: transparent no-repeat center; - background-size: contain; - top: 50%; - left: 50%; - transform: translateX(-50%) translateY(-50%); - background-image: url("data:image/svg+xml;charset=UTF-8,%3csvg width='32' height='32' viewBox='0 0 64 64' xmlns='http://www.w3.org/2000/svg' stroke='%23164fd3' %3e%3cg fill='none' fill-rule='evenodd'%3e%3cg transform='translate(16 16)' stroke-width='2'%3e%3ccircle stroke-opacity='.5' cx='16' cy='16' r='16'/%3e%3cpath d='M32 16c0-9.94-8.06-16-16-16'%3e%3canimateTransform attributeName='transform' type='rotate' from='0 16 16' to='360 16 16' dur='1s' repeatCount='indefinite'/%3e%3c/path%3e%3c/g%3e%3c/g%3e%3c/svg%3e"); -} -section.lazy-placeholder:after { - opacity: 0.5; -} -body { - overflow-x: hidden; -} -a { - transition: color 0.6s; -} -.cid-tPyNpcR3AV { - z-index: 1000; - width: 100%; - position: relative; -} -.cid-tPyNpcR3AV .dropdown-item:before { - font-family: Moririse2 !important; - content: "\e966"; - display: inline-block; - width: 0; - position: absolute; - left: 1rem; - top: 0.5rem; - margin-right: 0.5rem; - line-height: 1; - font-size: inherit; - vertical-align: middle; - text-align: center; - overflow: hidden; - transform: scale(0, 1); - transition: all 0.25s ease-in-out; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .navbar-toggler { - transform: scale(0.8); - } -} -.cid-tPyNpcR3AV .navbar-brand { - flex-shrink: 0; - align-items: center; - margin-right: 0; - padding: 10px 0; - transition: all 0.3s; - word-break: break-word; - z-index: 1; -} -.cid-tPyNpcR3AV .navbar-brand img { - max-width: 100%; - max-height: 100%; - border-radius: 0px !important; -} -.cid-tPyNpcR3AV .navbar-brand .navbar-caption { - line-height: inherit !important; -} -.cid-tPyNpcR3AV .navbar-brand .navbar-logo a { - outline: none; -} -.cid-tPyNpcR3AV .navbar-nav { - margin: auto; - margin-left: 0; - margin-left: auto; -} -.cid-tPyNpcR3AV .navbar-nav .nav-item { - padding: 0 !important; - transition: .3s all !important; -} -.cid-tPyNpcR3AV .navbar-nav .nav-item .nav-link { - padding: 16px !important; - margin: 0 !important; - border-radius: 1rem !important; - transition: .3s all !important; -} -.cid-tPyNpcR3AV .navbar-nav .nav-item .nav-link:hover { - background-color: rgba(27, 31, 10, 0.06); -} -.cid-tPyNpcR3AV .navbar-nav .open .nav-link::after { - transform: rotate(180deg); -} -@media (min-width: 992px) { - .cid-tPyNpcR3AV .navbar-nav .open .nav-link::before { - content: ""; - width: 100%; - height: 20px; - top: 100%; - background: transparent; - position: absolute; - } -} -.cid-tPyNpcR3AV .navbar-nav .dropdown-item { - padding: 12px !important; - border-radius: 0.5rem !important; - margin: 0 8px !important; - transition: .3s all !important; -} -.cid-tPyNpcR3AV .navbar-nav .dropdown-item:hover { - background-color: rgba(27, 31, 10, 0.06); -} -@media (min-width: 992px) { - .cid-tPyNpcR3AV .navbar-nav { - padding-left: 1.5rem; - } -} -.cid-tPyNpcR3AV .nav-link { - width: fit-content; - position: relative; -} -.cid-tPyNpcR3AV .navbar-logo { - padding-left: 2rem; - margin: 0 !important; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .navbar-logo { - padding-left: 1rem; - } -} -.cid-tPyNpcR3AV .navbar-caption { - padding-left: 1rem; - padding-right: .5rem; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .nav-dropdown { - padding-bottom: 0.5rem; - } -} -.cid-tPyNpcR3AV .nav-dropdown .link.dropdown-toggle::after { - margin-left: 0.5rem; - margin-top: 0.2rem; - transition: .3s all; -} -.cid-tPyNpcR3AV .container { - display: flex; - height: 90px; - padding: 0.5rem 0.6rem; - flex-wrap: nowrap; - background: rgba(255, 255, 255, 0.9) !important; - left: 0; - right: 0; - -webkit-box-pack: justify; - -ms-flex-pack: justify; - justify-content: flex-end; - -webkit-box-align: center; - -webkit-align-items: center; - -ms-flex-align: center; - align-items: center; - border-radius: 100vw; - margin-top: 1rem; - background-color: #ffffff; - box-shadow: 0 30px 60px 0 rgba(27, 31, 10, 0.08); -} -@media (max-width: 992px) { - .cid-tPyNpcR3AV .container { - padding-right: 2rem; - } -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .container { - width: 95%; - height: 56px !important; - padding-right: 1rem; - margin-top: 0rem; - } -} -.cid-tPyNpcR3AV .iconfont-wrapper { - color: #000000 !important; - font-size: 1.5rem; - padding-right: 0.5rem; -} -.cid-tPyNpcR3AV .dropdown-menu { - flex-wrap: wrap; - flex-direction: column; - max-width: 100%; - padding: 12px 4px !important; - border-radius: 1.5rem; - transition: .3s all !important; - min-width: auto; - background: #ffffff; - background: rgba(255, 255, 255, 0.9) !important; -} -.cid-tPyNpcR3AV .nav-item:focus, -.cid-tPyNpcR3AV .nav-link:focus { - outline: none; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item { - width: auto; - transition: all 0.25s ease-in-out; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item::after { - right: 0.5rem; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item .mbr-iconfont { - margin-right: 0.5rem; - vertical-align: sub; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item .mbr-iconfont:before { - display: inline-block; - transform: scale(1, 1); - transition: all 0.25s ease-in-out; -} -.cid-tPyNpcR3AV .collapsed .dropdown-menu .dropdown-item:before { - display: none; -} -.cid-tPyNpcR3AV .collapsed .dropdown .dropdown-menu .dropdown-item { - padding: 0.235em 1.5em 0.235em 1.5em !important; - transition: none; - margin: 0 !important; -} -.cid-tPyNpcR3AV .navbar { - min-height: 90px; - transition: all 0.3s; - border-bottom: 1px solid transparent; - background: transparent !important; - padding: 0 !important; - border: none !important; - box-shadow: none !important; - border-radius: 0 !important; -} -.cid-tPyNpcR3AV .navbar.opened { - transition: all 0.3s; -} -.cid-tPyNpcR3AV .navbar .dropdown-item { - padding: 0.5rem 1.8rem; -} -.cid-tPyNpcR3AV .navbar .navbar-logo img { - width: auto; -} -.cid-tPyNpcR3AV .navbar .navbar-collapse { - z-index: 1; - justify-content: flex-end; -} -.cid-tPyNpcR3AV .navbar.collapsed { - justify-content: center; -} -.cid-tPyNpcR3AV .navbar.collapsed .nav-item .nav-link::before { - display: none; -} -.cid-tPyNpcR3AV .navbar.collapsed.opened .dropdown-menu { - top: 0; -} -@media (min-width: 992px) { - .cid-tPyNpcR3AV .navbar.collapsed.opened:not(.navbar-short) .navbar-collapse { - max-height: calc(98.5vh - 3rem); - } -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-submenu { - left: 0 !important; -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-item:after { - right: auto; -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-toggle[data-toggle="dropdown-submenu"]:after { - margin-left: 0.5rem; - margin-top: 0.2rem; - border-top: 0.35em solid; - border-right: 0.35em solid transparent; - border-left: 0.35em solid transparent; - border-bottom: 0; - top: 41%; -} -.cid-tPyNpcR3AV .navbar.collapsed ul.navbar-nav li { - margin: auto; -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-item { - padding: 0.25rem 1.5rem; - text-align: center; -} -.cid-tPyNpcR3AV .navbar.collapsed .icons-menu { - padding-left: 0; - padding-right: 0; - padding-top: 0.5rem; - padding-bottom: 0.5rem; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .navbar { - min-height: 72px; - } - .cid-tPyNpcR3AV .navbar .navbar-logo img { - height: 2rem !important; - } -} -@media (max-width: 991px) { - .cid-tPyNpcR3AV .navbar .nav-item .nav-link::before { - display: none; - } - .cid-tPyNpcR3AV .navbar.opened .dropdown-menu { - top: 0; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-submenu { - left: 0 !important; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-item:after { - right: auto; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-toggle[data-toggle="dropdown-submenu"]:after { - margin-left: 0.5rem; - margin-top: 0.2rem; - border-top: 0.35em solid; - border-right: 0.35em solid transparent; - border-left: 0.35em solid transparent; - border-bottom: 0; - top: 40%; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-item { - padding: 0.25rem 1.5rem !important; - text-align: center; - } - .cid-tPyNpcR3AV .navbar .navbar-brand { - flex-shrink: initial; - flex-basis: auto; - word-break: break-word; - padding-right: 10px; - } - .cid-tPyNpcR3AV .navbar .navbar-toggler { - flex-basis: auto; - } - .cid-tPyNpcR3AV .navbar .icons-menu { - padding-left: 0; - padding-top: 0.5rem; - padding-bottom: 0.5rem; - } -} -.cid-tPyNpcR3AV .navbar.navbar-short .navbar-logo img { - height: 2rem; -} -.cid-tPyNpcR3AV .dropdown-item.active, -.cid-tPyNpcR3AV .dropdown-item:active { - background-color: transparent; -} -.cid-tPyNpcR3AV .navbar-expand-lg .navbar-nav .nav-link { - padding: 0; -} -.cid-tPyNpcR3AV .nav-dropdown .link.dropdown-toggle { - margin-right: 1.667em; -} -.cid-tPyNpcR3AV .nav-dropdown .link.dropdown-toggle[aria-expanded="true"] { - margin-right: 0; - padding: 0.667em 1.667em; -} -.cid-tPyNpcR3AV .navbar.navbar-expand-lg .dropdown .dropdown-menu { - background: #ffffff; -} -.cid-tPyNpcR3AV .navbar.navbar-expand-lg .dropdown .dropdown-menu .dropdown-submenu { - margin: 0; - left: 105%; - transform: none; - top: -12px; -} -.cid-tPyNpcR3AV .navbar .dropdown.open > .dropdown-menu { - display: flex; -} -.cid-tPyNpcR3AV ul.navbar-nav { - flex-wrap: wrap; -} -.cid-tPyNpcR3AV .navbar-buttons { - text-align: center; - min-width: 140px; -} -@media (max-width: 992px) { - .cid-tPyNpcR3AV .navbar-buttons { - text-align: left; - } -} -.cid-tPyNpcR3AV button.navbar-toggler { - outline: none; - width: 31px; - height: 20px; - cursor: pointer; - transition: all 0.2s; - position: relative; - align-self: center; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span { - position: absolute; - right: 0; - width: 30px; - height: 2px; - border-right: 5px; - background-color: #000000; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(1) { - top: 0; - transition: all 0.2s; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(2) { - top: 8px; - transition: all 0.15s; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(3) { - top: 8px; - transition: all 0.15s; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(4) { - top: 16px; - transition: all 0.2s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(1) { - top: 8px; - width: 0; - opacity: 0; - right: 50%; - transition: all 0.2s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(2) { - transform: rotate(45deg); - transition: all 0.25s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(3) { - transform: rotate(-45deg); - transition: all 0.25s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(4) { - top: 8px; - width: 0; - opacity: 0; - right: 50%; - transition: all 0.2s; -} -.cid-tPyNpcR3AV .navbar-dropdown { - padding: 0 1rem; -} -.cid-tPyNpcR3AV a.nav-link { - display: flex; - align-items: center; - justify-content: center; -} -.cid-tPyNpcR3AV .icons-menu { - flex-wrap: nowrap; - display: flex; - justify-content: center; - padding-left: 1rem; - padding-right: 1rem; - padding-top: 0.3rem; - text-align: center; -} -@media (max-width: 992px) { - .cid-tPyNpcR3AV .icons-menu { - justify-content: flex-start; - margin-bottom: .5rem; - } -} -@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none) { - .cid-tPyNpcR3AV .navbar { - height: 70px; - } - .cid-tPyNpcR3AV .navbar.opened { - height: auto; - } - .cid-tPyNpcR3AV .nav-item .nav-link:hover::before { - width: 175%; - max-width: calc(100% + 2rem); - left: -1rem; - } -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu { - display: none; - width: max-content; - max-width: 500px !important; - transform: translateX(-50%); - top: calc(100% + 20px); - left: 50%; -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown-item { - line-height: 1 !important; -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item { - align-items: center; - display: flex; - height: max-content !important; - min-height: max-content !important; -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item::after { - display: inline-block; - position: static; - margin-left: 0.5rem; - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - transition: .3s all; - transform: rotate(-90deg); -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown.open .dropdown-item::after { - transform: rotate(0deg); -} -.cid-tPyNpcR3AV .mbr-section-btn { - margin: -0.6rem -0.6rem; -} -.cid-tPyNpcR3AV .navbar-toggler { - margin-left: 12px; - margin-right: 8px; - order: 1000; -} -@media (max-width: 991px) { - .cid-tPyNpcR3AV .navbar-brand { - margin-right: auto; - } - .cid-tPyNpcR3AV .navbar-collapse { - z-index: -1 !important; - position: absolute; - top: 110%; - left: 0; - width: 100%; - padding: 1rem; - border-radius: 1.5rem; - background: #ffffff; - opacity: 1; - border-color: rgba(255, 255, 255, 0.9) !important; - background: rgba(255, 255, 255, 0.9) !important; - backdrop-filter: blur(8px); - } - .cid-tPyNpcR3AV .navbar-nav .nav-item .nav-link::after { - margin-left: 10px; - } - .cid-tPyNpcR3AV .navbar-nav .dropdown-item:hover { - background-color: rgba(27, 31, 10, 0.06); - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu { - max-width: 100% !important; - transform: translateX(0); - top: 10px; - left: 0; - padding: 8px !important; - border-radius: 1rem; - background-color: rgba(27, 31, 10, 0.04) !important; - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown-item { - padding: 8px !important; - line-height: 1 !important; - margin-bottom: 4px !important; - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item { - align-items: center; - display: flex; - height: max-content !important; - min-height: max-content !important; - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item::after { - display: inline-block; - position: static; - margin-left: 0.5rem; - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - transition: .3s all; - transform: rotate(0deg); - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown.open .dropdown-item::after { - transform: rotate(180deg); - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown-submenu { - position: static; - width: 100%; - max-width: 100% !important; - transform: translateX(0) !important; - top: 0; - left: 0; - padding: 8px !important; - border-radius: 1rem; - background-color: rgba(27, 31, 10, 0.04) !important; - } - .cid-tPyNpcR3AV .navbar .dropdown.open > .dropdown-menu { - display: flex !important; - flex-direction: column; - align-items: flex-start; - } -} -@media (max-width: 575px) { - .cid-tPyNpcR3AV .navbar-collapse { - padding: 1rem; - } -} -.cid-tPsffchczI { - padding-top: 6rem; - padding-bottom: 6rem; - background-color: #ffffff; -} -.cid-tPsffchczI img, -.cid-tPsffchczI .item-img { - width: 100%; - height: 100%; - height: 300px; - object-fit: cover; -} -.cid-tPsffchczI .item:focus, -.cid-tPsffchczI span:focus { - outline: none; -} -.cid-tPsffchczI .item { - margin-bottom: 2rem; -} -@media (max-width: 767px) { - .cid-tPsffchczI .item { - margin-bottom: 1rem; - } -} -.cid-tPsffchczI .item-content { - margin-top: 2rem; - padding: 0 2.25rem 2.25rem; - display: flex; - flex-direction: column; - height: 100%; -} -@media (max-width: 767px) { - .cid-tPsffchczI .item-content { - padding: 0 2rem 1.5rem; - margin-top: 1rem; - } -} -.cid-tPsffchczI .item-wrapper { - position: relative; - border-radius: 2rem; - background: #f7f7f7; - height: 100%; - display: flex; - flex-flow: column nowrap; -} -.cid-tPsffchczI .item-wrapper .item-footer { - margin-top: auto; -} -.cid-tPsffchczI .mbr-section-title { - color: #4552ff; - text-align: center; -} -.cid-tPsffchczI .item-title { - text-align: left; - color: #4552ff; -} -.cid-tPsffchczI .item-subtitle { - text-align: left; -} -.cid-tPsffchczI .mbr-text, -.cid-tPsffchczI .item .mbr-section-btn { - text-align: left; -} -.cid-tPsffchczI .mbr-section-subtitle, -.cid-tPsffchczI .mbr-section-head .mbr-section-btn { - text-align: center; -} -.cid-tPyNpcR3AV { - z-index: 1000; - width: 100%; - position: relative; -} -.cid-tPyNpcR3AV .dropdown-item:before { - font-family: Moririse2 !important; - content: "\e966"; - display: inline-block; - width: 0; - position: absolute; - left: 1rem; - top: 0.5rem; - margin-right: 0.5rem; - line-height: 1; - font-size: inherit; - vertical-align: middle; - text-align: center; - overflow: hidden; - transform: scale(0, 1); - transition: all 0.25s ease-in-out; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .navbar-toggler { - transform: scale(0.8); - } -} -.cid-tPyNpcR3AV .navbar-brand { - flex-shrink: 0; - align-items: center; - margin-right: 0; - padding: 10px 0; - transition: all 0.3s; - word-break: break-word; - z-index: 1; -} -.cid-tPyNpcR3AV .navbar-brand img { - max-width: 100%; - max-height: 100%; - border-radius: 0px !important; -} -.cid-tPyNpcR3AV .navbar-brand .navbar-caption { - line-height: inherit !important; -} -.cid-tPyNpcR3AV .navbar-brand .navbar-logo a { - outline: none; -} -.cid-tPyNpcR3AV .navbar-nav { - margin: auto; - margin-left: 0; - margin-left: auto; -} -.cid-tPyNpcR3AV .navbar-nav .nav-item { - padding: 0 !important; - transition: .3s all !important; -} -.cid-tPyNpcR3AV .navbar-nav .nav-item .nav-link { - padding: 16px !important; - margin: 0 !important; - border-radius: 1rem !important; - transition: .3s all !important; -} -.cid-tPyNpcR3AV .navbar-nav .nav-item .nav-link:hover { - background-color: rgba(27, 31, 10, 0.06); -} -.cid-tPyNpcR3AV .navbar-nav .open .nav-link::after { - transform: rotate(180deg); -} -@media (min-width: 992px) { - .cid-tPyNpcR3AV .navbar-nav .open .nav-link::before { - content: ""; - width: 100%; - height: 20px; - top: 100%; - background: transparent; - position: absolute; - } -} -.cid-tPyNpcR3AV .navbar-nav .dropdown-item { - padding: 12px !important; - border-radius: 0.5rem !important; - margin: 0 8px !important; - transition: .3s all !important; -} -.cid-tPyNpcR3AV .navbar-nav .dropdown-item:hover { - background-color: rgba(27, 31, 10, 0.06); -} -@media (min-width: 992px) { - .cid-tPyNpcR3AV .navbar-nav { - padding-left: 1.5rem; - } -} -.cid-tPyNpcR3AV .nav-link { - width: fit-content; - position: relative; -} -.cid-tPyNpcR3AV .navbar-logo { - padding-left: 2rem; - margin: 0 !important; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .navbar-logo { - padding-left: 1rem; - } -} -.cid-tPyNpcR3AV .navbar-caption { - padding-left: 1rem; - padding-right: .5rem; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .nav-dropdown { - padding-bottom: 0.5rem; - } -} -.cid-tPyNpcR3AV .nav-dropdown .link.dropdown-toggle::after { - margin-left: 0.5rem; - margin-top: 0.2rem; - transition: .3s all; -} -.cid-tPyNpcR3AV .container { - display: flex; - height: 90px; - padding: 0.5rem 0.6rem; - flex-wrap: nowrap; - background: rgba(255, 255, 255, 0.9) !important; - left: 0; - right: 0; - -webkit-box-pack: justify; - -ms-flex-pack: justify; - justify-content: flex-end; - -webkit-box-align: center; - -webkit-align-items: center; - -ms-flex-align: center; - align-items: center; - border-radius: 100vw; - margin-top: 1rem; - background-color: #ffffff; - box-shadow: 0 30px 60px 0 rgba(27, 31, 10, 0.08); -} -@media (max-width: 992px) { - .cid-tPyNpcR3AV .container { - padding-right: 2rem; - } -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .container { - width: 95%; - height: 56px !important; - padding-right: 1rem; - margin-top: 0rem; - } -} -.cid-tPyNpcR3AV .iconfont-wrapper { - color: #000000 !important; - font-size: 1.5rem; - padding-right: 0.5rem; -} -.cid-tPyNpcR3AV .dropdown-menu { - flex-wrap: wrap; - flex-direction: column; - max-width: 100%; - padding: 12px 4px !important; - border-radius: 1.5rem; - transition: .3s all !important; - min-width: auto; - background: #ffffff; - background: rgba(255, 255, 255, 0.9) !important; -} -.cid-tPyNpcR3AV .nav-item:focus, -.cid-tPyNpcR3AV .nav-link:focus { - outline: none; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item { - width: auto; - transition: all 0.25s ease-in-out; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item::after { - right: 0.5rem; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item .mbr-iconfont { - margin-right: 0.5rem; - vertical-align: sub; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item .mbr-iconfont:before { - display: inline-block; - transform: scale(1, 1); - transition: all 0.25s ease-in-out; -} -.cid-tPyNpcR3AV .collapsed .dropdown-menu .dropdown-item:before { - display: none; -} -.cid-tPyNpcR3AV .collapsed .dropdown .dropdown-menu .dropdown-item { - padding: 0.235em 1.5em 0.235em 1.5em !important; - transition: none; - margin: 0 !important; -} -.cid-tPyNpcR3AV .navbar { - min-height: 90px; - transition: all 0.3s; - border-bottom: 1px solid transparent; - background: transparent !important; - padding: 0 !important; - border: none !important; - box-shadow: none !important; - border-radius: 0 !important; -} -.cid-tPyNpcR3AV .navbar.opened { - transition: all 0.3s; -} -.cid-tPyNpcR3AV .navbar .dropdown-item { - padding: 0.5rem 1.8rem; -} -.cid-tPyNpcR3AV .navbar .navbar-logo img { - width: auto; -} -.cid-tPyNpcR3AV .navbar .navbar-collapse { - z-index: 1; - justify-content: flex-end; -} -.cid-tPyNpcR3AV .navbar.collapsed { - justify-content: center; -} -.cid-tPyNpcR3AV .navbar.collapsed .nav-item .nav-link::before { - display: none; -} -.cid-tPyNpcR3AV .navbar.collapsed.opened .dropdown-menu { - top: 0; -} -@media (min-width: 992px) { - .cid-tPyNpcR3AV .navbar.collapsed.opened:not(.navbar-short) .navbar-collapse { - max-height: calc(98.5vh - 3rem); - } -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-submenu { - left: 0 !important; -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-item:after { - right: auto; -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-toggle[data-toggle="dropdown-submenu"]:after { - margin-left: 0.5rem; - margin-top: 0.2rem; - border-top: 0.35em solid; - border-right: 0.35em solid transparent; - border-left: 0.35em solid transparent; - border-bottom: 0; - top: 41%; -} -.cid-tPyNpcR3AV .navbar.collapsed ul.navbar-nav li { - margin: auto; -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-item { - padding: 0.25rem 1.5rem; - text-align: center; -} -.cid-tPyNpcR3AV .navbar.collapsed .icons-menu { - padding-left: 0; - padding-right: 0; - padding-top: 0.5rem; - padding-bottom: 0.5rem; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .navbar { - min-height: 72px; - } - .cid-tPyNpcR3AV .navbar .navbar-logo img { - height: 2rem !important; - } -} -@media (max-width: 991px) { - .cid-tPyNpcR3AV .navbar .nav-item .nav-link::before { - display: none; - } - .cid-tPyNpcR3AV .navbar.opened .dropdown-menu { - top: 0; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-submenu { - left: 0 !important; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-item:after { - right: auto; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-toggle[data-toggle="dropdown-submenu"]:after { - margin-left: 0.5rem; - margin-top: 0.2rem; - border-top: 0.35em solid; - border-right: 0.35em solid transparent; - border-left: 0.35em solid transparent; - border-bottom: 0; - top: 40%; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-item { - padding: 0.25rem 1.5rem !important; - text-align: center; - } - .cid-tPyNpcR3AV .navbar .navbar-brand { - flex-shrink: initial; - flex-basis: auto; - word-break: break-word; - padding-right: 10px; - } - .cid-tPyNpcR3AV .navbar .navbar-toggler { - flex-basis: auto; - } - .cid-tPyNpcR3AV .navbar .icons-menu { - padding-left: 0; - padding-top: 0.5rem; - padding-bottom: 0.5rem; - } -} -.cid-tPyNpcR3AV .navbar.navbar-short .navbar-logo img { - height: 2rem; -} -.cid-tPyNpcR3AV .dropdown-item.active, -.cid-tPyNpcR3AV .dropdown-item:active { - background-color: transparent; -} -.cid-tPyNpcR3AV .navbar-expand-lg .navbar-nav .nav-link { - padding: 0; -} -.cid-tPyNpcR3AV .nav-dropdown .link.dropdown-toggle { - margin-right: 1.667em; -} -.cid-tPyNpcR3AV .nav-dropdown .link.dropdown-toggle[aria-expanded="true"] { - margin-right: 0; - padding: 0.667em 1.667em; -} -.cid-tPyNpcR3AV .navbar.navbar-expand-lg .dropdown .dropdown-menu { - background: #ffffff; -} -.cid-tPyNpcR3AV .navbar.navbar-expand-lg .dropdown .dropdown-menu .dropdown-submenu { - margin: 0; - left: 105%; - transform: none; - top: -12px; -} -.cid-tPyNpcR3AV .navbar .dropdown.open > .dropdown-menu { - display: flex; -} -.cid-tPyNpcR3AV ul.navbar-nav { - flex-wrap: wrap; -} -.cid-tPyNpcR3AV .navbar-buttons { - text-align: center; - min-width: 140px; -} -@media (max-width: 992px) { - .cid-tPyNpcR3AV .navbar-buttons { - text-align: left; - } -} -.cid-tPyNpcR3AV button.navbar-toggler { - outline: none; - width: 31px; - height: 20px; - cursor: pointer; - transition: all 0.2s; - position: relative; - align-self: center; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span { - position: absolute; - right: 0; - width: 30px; - height: 2px; - border-right: 5px; - background-color: #000000; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(1) { - top: 0; - transition: all 0.2s; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(2) { - top: 8px; - transition: all 0.15s; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(3) { - top: 8px; - transition: all 0.15s; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(4) { - top: 16px; - transition: all 0.2s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(1) { - top: 8px; - width: 0; - opacity: 0; - right: 50%; - transition: all 0.2s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(2) { - transform: rotate(45deg); - transition: all 0.25s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(3) { - transform: rotate(-45deg); - transition: all 0.25s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(4) { - top: 8px; - width: 0; - opacity: 0; - right: 50%; - transition: all 0.2s; -} -.cid-tPyNpcR3AV .navbar-dropdown { - padding: 0 1rem; -} -.cid-tPyNpcR3AV a.nav-link { - display: flex; - align-items: center; - justify-content: center; -} -.cid-tPyNpcR3AV .icons-menu { - flex-wrap: nowrap; - display: flex; - justify-content: center; - padding-left: 1rem; - padding-right: 1rem; - padding-top: 0.3rem; - text-align: center; -} -@media (max-width: 992px) { - .cid-tPyNpcR3AV .icons-menu { - justify-content: flex-start; - margin-bottom: .5rem; - } -} -@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none) { - .cid-tPyNpcR3AV .navbar { - height: 70px; - } - .cid-tPyNpcR3AV .navbar.opened { - height: auto; - } - .cid-tPyNpcR3AV .nav-item .nav-link:hover::before { - width: 175%; - max-width: calc(100% + 2rem); - left: -1rem; - } -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu { - display: none; - width: max-content; - max-width: 500px !important; - transform: translateX(-50%); - top: calc(100% + 20px); - left: 50%; -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown-item { - line-height: 1 !important; -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item { - align-items: center; - display: flex; - height: max-content !important; - min-height: max-content !important; -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item::after { - display: inline-block; - position: static; - margin-left: 0.5rem; - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - transition: .3s all; - transform: rotate(-90deg); -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown.open .dropdown-item::after { - transform: rotate(0deg); -} -.cid-tPyNpcR3AV .mbr-section-btn { - margin: -0.6rem -0.6rem; -} -.cid-tPyNpcR3AV .navbar-toggler { - margin-left: 12px; - margin-right: 8px; - order: 1000; -} -@media (max-width: 991px) { - .cid-tPyNpcR3AV .navbar-brand { - margin-right: auto; - } - .cid-tPyNpcR3AV .navbar-collapse { - z-index: -1 !important; - position: absolute; - top: 110%; - left: 0; - width: 100%; - padding: 1rem; - border-radius: 1.5rem; - background: #ffffff; - opacity: 1; - border-color: rgba(255, 255, 255, 0.9) !important; - background: rgba(255, 255, 255, 0.9) !important; - backdrop-filter: blur(8px); - } - .cid-tPyNpcR3AV .navbar-nav .nav-item .nav-link::after { - margin-left: 10px; - } - .cid-tPyNpcR3AV .navbar-nav .dropdown-item:hover { - background-color: rgba(27, 31, 10, 0.06); - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu { - max-width: 100% !important; - transform: translateX(0); - top: 10px; - left: 0; - padding: 8px !important; - border-radius: 1rem; - background-color: rgba(27, 31, 10, 0.04) !important; - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown-item { - padding: 8px !important; - line-height: 1 !important; - margin-bottom: 4px !important; - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item { - align-items: center; - display: flex; - height: max-content !important; - min-height: max-content !important; - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item::after { - display: inline-block; - position: static; - margin-left: 0.5rem; - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - transition: .3s all; - transform: rotate(0deg); - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown.open .dropdown-item::after { - transform: rotate(180deg); - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown-submenu { - position: static; - width: 100%; - max-width: 100% !important; - transform: translateX(0) !important; - top: 0; - left: 0; - padding: 8px !important; - border-radius: 1rem; - background-color: rgba(27, 31, 10, 0.04) !important; - } - .cid-tPyNpcR3AV .navbar .dropdown.open > .dropdown-menu { - display: flex !important; - flex-direction: column; - align-items: flex-start; - } -} -@media (max-width: 575px) { - .cid-tPyNpcR3AV .navbar-collapse { - padding: 1rem; - } -} -.cid-tPyxWzL8dX { - padding-top: 6rem; - padding-bottom: 6rem; - background-image: url("../../../assets/images/background3.jpg"); -} -.cid-tPyxWzL8dX .mbr-fallback-image.disabled { - display: none; -} -.cid-tPyxWzL8dX .mbr-fallback-image { - display: block; - background-size: cover; - background-position: center center; - width: 100%; - height: 100%; - position: absolute; - top: 0; -} -.cid-tPyxWzL8dX .card-wrapper { - background: #ffffff; - border-radius: 4px; -} -@media (max-width: 767px) { - .cid-tPyxWzL8dX .card-wrapper { - padding: 1rem; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .cid-tPyxWzL8dX .card-wrapper { - padding: 2rem; - } -} -@media (min-width: 992px) { - .cid-tPyxWzL8dX .card-wrapper { - padding: 4rem; - } -} -.cid-tT8ds9ADxj { - padding-top: 6rem; - padding-bottom: 6rem; - background-color: #ffffff; -} -.cid-tT8ds9ADxj .mbr-fallback-image.disabled { - display: none; -} -.cid-tT8ds9ADxj .mbr-fallback-image { - display: block; - background-size: cover; - background-position: center center; - width: 100%; - height: 100%; - position: absolute; - top: 0; -} -.cid-tT8ds9ADxj .row { - flex-direction: row-reverse; -} -.cid-tPyNpcR3AV { - z-index: 1000; - width: 100%; - position: relative; -} -.cid-tPyNpcR3AV .dropdown-item:before { - font-family: Moririse2 !important; - content: "\e966"; - display: inline-block; - width: 0; - position: absolute; - left: 1rem; - top: 0.5rem; - margin-right: 0.5rem; - line-height: 1; - font-size: inherit; - vertical-align: middle; - text-align: center; - overflow: hidden; - transform: scale(0, 1); - transition: all 0.25s ease-in-out; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .navbar-toggler { - transform: scale(0.8); - } -} -.cid-tPyNpcR3AV .navbar-brand { - flex-shrink: 0; - align-items: center; - margin-right: 0; - padding: 10px 0; - transition: all 0.3s; - word-break: break-word; - z-index: 1; -} -.cid-tPyNpcR3AV .navbar-brand img { - max-width: 100%; - max-height: 100%; - border-radius: 0px !important; -} -.cid-tPyNpcR3AV .navbar-brand .navbar-caption { - line-height: inherit !important; -} -.cid-tPyNpcR3AV .navbar-brand .navbar-logo a { - outline: none; -} -.cid-tPyNpcR3AV .navbar-nav { - margin: auto; - margin-left: 0; - margin-left: auto; -} -.cid-tPyNpcR3AV .navbar-nav .nav-item { - padding: 0 !important; - transition: .3s all !important; -} -.cid-tPyNpcR3AV .navbar-nav .nav-item .nav-link { - padding: 16px !important; - margin: 0 !important; - border-radius: 1rem !important; - transition: .3s all !important; -} -.cid-tPyNpcR3AV .navbar-nav .nav-item .nav-link:hover { - background-color: rgba(27, 31, 10, 0.06); -} -.cid-tPyNpcR3AV .navbar-nav .open .nav-link::after { - transform: rotate(180deg); -} -@media (min-width: 992px) { - .cid-tPyNpcR3AV .navbar-nav .open .nav-link::before { - content: ""; - width: 100%; - height: 20px; - top: 100%; - background: transparent; - position: absolute; - } -} -.cid-tPyNpcR3AV .navbar-nav .dropdown-item { - padding: 12px !important; - border-radius: 0.5rem !important; - margin: 0 8px !important; - transition: .3s all !important; -} -.cid-tPyNpcR3AV .navbar-nav .dropdown-item:hover { - background-color: rgba(27, 31, 10, 0.06); -} -@media (min-width: 992px) { - .cid-tPyNpcR3AV .navbar-nav { - padding-left: 1.5rem; - } -} -.cid-tPyNpcR3AV .nav-link { - width: fit-content; - position: relative; -} -.cid-tPyNpcR3AV .navbar-logo { - padding-left: 2rem; - margin: 0 !important; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .navbar-logo { - padding-left: 1rem; - } -} -.cid-tPyNpcR3AV .navbar-caption { - padding-left: 1rem; - padding-right: .5rem; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .nav-dropdown { - padding-bottom: 0.5rem; - } -} -.cid-tPyNpcR3AV .nav-dropdown .link.dropdown-toggle::after { - margin-left: 0.5rem; - margin-top: 0.2rem; - transition: .3s all; -} -.cid-tPyNpcR3AV .container { - display: flex; - height: 90px; - padding: 0.5rem 0.6rem; - flex-wrap: nowrap; - background: rgba(255, 255, 255, 0.9) !important; - left: 0; - right: 0; - -webkit-box-pack: justify; - -ms-flex-pack: justify; - justify-content: flex-end; - -webkit-box-align: center; - -webkit-align-items: center; - -ms-flex-align: center; - align-items: center; - border-radius: 100vw; - margin-top: 1rem; - background-color: #ffffff; - box-shadow: 0 30px 60px 0 rgba(27, 31, 10, 0.08); -} -@media (max-width: 992px) { - .cid-tPyNpcR3AV .container { - padding-right: 2rem; - } -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .container { - width: 95%; - height: 56px !important; - padding-right: 1rem; - margin-top: 0rem; - } -} -.cid-tPyNpcR3AV .iconfont-wrapper { - color: #000000 !important; - font-size: 1.5rem; - padding-right: 0.5rem; -} -.cid-tPyNpcR3AV .dropdown-menu { - flex-wrap: wrap; - flex-direction: column; - max-width: 100%; - padding: 12px 4px !important; - border-radius: 1.5rem; - transition: .3s all !important; - min-width: auto; - background: #ffffff; - background: rgba(255, 255, 255, 0.9) !important; -} -.cid-tPyNpcR3AV .nav-item:focus, -.cid-tPyNpcR3AV .nav-link:focus { - outline: none; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item { - width: auto; - transition: all 0.25s ease-in-out; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item::after { - right: 0.5rem; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item .mbr-iconfont { - margin-right: 0.5rem; - vertical-align: sub; -} -.cid-tPyNpcR3AV .dropdown .dropdown-menu .dropdown-item .mbr-iconfont:before { - display: inline-block; - transform: scale(1, 1); - transition: all 0.25s ease-in-out; -} -.cid-tPyNpcR3AV .collapsed .dropdown-menu .dropdown-item:before { - display: none; -} -.cid-tPyNpcR3AV .collapsed .dropdown .dropdown-menu .dropdown-item { - padding: 0.235em 1.5em 0.235em 1.5em !important; - transition: none; - margin: 0 !important; -} -.cid-tPyNpcR3AV .navbar { - min-height: 90px; - transition: all 0.3s; - border-bottom: 1px solid transparent; - background: transparent !important; - padding: 0 !important; - border: none !important; - box-shadow: none !important; - border-radius: 0 !important; -} -.cid-tPyNpcR3AV .navbar.opened { - transition: all 0.3s; -} -.cid-tPyNpcR3AV .navbar .dropdown-item { - padding: 0.5rem 1.8rem; -} -.cid-tPyNpcR3AV .navbar .navbar-logo img { - width: auto; -} -.cid-tPyNpcR3AV .navbar .navbar-collapse { - z-index: 1; - justify-content: flex-end; -} -.cid-tPyNpcR3AV .navbar.collapsed { - justify-content: center; -} -.cid-tPyNpcR3AV .navbar.collapsed .nav-item .nav-link::before { - display: none; -} -.cid-tPyNpcR3AV .navbar.collapsed.opened .dropdown-menu { - top: 0; -} -@media (min-width: 992px) { - .cid-tPyNpcR3AV .navbar.collapsed.opened:not(.navbar-short) .navbar-collapse { - max-height: calc(98.5vh - 3rem); - } -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-submenu { - left: 0 !important; -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-item:after { - right: auto; -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-toggle[data-toggle="dropdown-submenu"]:after { - margin-left: 0.5rem; - margin-top: 0.2rem; - border-top: 0.35em solid; - border-right: 0.35em solid transparent; - border-left: 0.35em solid transparent; - border-bottom: 0; - top: 41%; -} -.cid-tPyNpcR3AV .navbar.collapsed ul.navbar-nav li { - margin: auto; -} -.cid-tPyNpcR3AV .navbar.collapsed .dropdown-menu .dropdown-item { - padding: 0.25rem 1.5rem; - text-align: center; -} -.cid-tPyNpcR3AV .navbar.collapsed .icons-menu { - padding-left: 0; - padding-right: 0; - padding-top: 0.5rem; - padding-bottom: 0.5rem; -} -@media (max-width: 767px) { - .cid-tPyNpcR3AV .navbar { - min-height: 72px; - } - .cid-tPyNpcR3AV .navbar .navbar-logo img { - height: 2rem !important; - } -} -@media (max-width: 991px) { - .cid-tPyNpcR3AV .navbar .nav-item .nav-link::before { - display: none; - } - .cid-tPyNpcR3AV .navbar.opened .dropdown-menu { - top: 0; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-submenu { - left: 0 !important; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-item:after { - right: auto; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-toggle[data-toggle="dropdown-submenu"]:after { - margin-left: 0.5rem; - margin-top: 0.2rem; - border-top: 0.35em solid; - border-right: 0.35em solid transparent; - border-left: 0.35em solid transparent; - border-bottom: 0; - top: 40%; - } - .cid-tPyNpcR3AV .navbar .dropdown-menu .dropdown-item { - padding: 0.25rem 1.5rem !important; - text-align: center; - } - .cid-tPyNpcR3AV .navbar .navbar-brand { - flex-shrink: initial; - flex-basis: auto; - word-break: break-word; - padding-right: 10px; - } - .cid-tPyNpcR3AV .navbar .navbar-toggler { - flex-basis: auto; - } - .cid-tPyNpcR3AV .navbar .icons-menu { - padding-left: 0; - padding-top: 0.5rem; - padding-bottom: 0.5rem; - } -} -.cid-tPyNpcR3AV .navbar.navbar-short .navbar-logo img { - height: 2rem; -} -.cid-tPyNpcR3AV .dropdown-item.active, -.cid-tPyNpcR3AV .dropdown-item:active { - background-color: transparent; -} -.cid-tPyNpcR3AV .navbar-expand-lg .navbar-nav .nav-link { - padding: 0; -} -.cid-tPyNpcR3AV .nav-dropdown .link.dropdown-toggle { - margin-right: 1.667em; -} -.cid-tPyNpcR3AV .nav-dropdown .link.dropdown-toggle[aria-expanded="true"] { - margin-right: 0; - padding: 0.667em 1.667em; -} -.cid-tPyNpcR3AV .navbar.navbar-expand-lg .dropdown .dropdown-menu { - background: #ffffff; -} -.cid-tPyNpcR3AV .navbar.navbar-expand-lg .dropdown .dropdown-menu .dropdown-submenu { - margin: 0; - left: 105%; - transform: none; - top: -12px; -} -.cid-tPyNpcR3AV .navbar .dropdown.open > .dropdown-menu { - display: flex; -} -.cid-tPyNpcR3AV ul.navbar-nav { - flex-wrap: wrap; -} -.cid-tPyNpcR3AV .navbar-buttons { - text-align: center; - min-width: 140px; -} -@media (max-width: 992px) { - .cid-tPyNpcR3AV .navbar-buttons { - text-align: left; - } -} -.cid-tPyNpcR3AV button.navbar-toggler { - outline: none; - width: 31px; - height: 20px; - cursor: pointer; - transition: all 0.2s; - position: relative; - align-self: center; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span { - position: absolute; - right: 0; - width: 30px; - height: 2px; - border-right: 5px; - background-color: #000000; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(1) { - top: 0; - transition: all 0.2s; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(2) { - top: 8px; - transition: all 0.15s; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(3) { - top: 8px; - transition: all 0.15s; -} -.cid-tPyNpcR3AV button.navbar-toggler .hamburger span:nth-child(4) { - top: 16px; - transition: all 0.2s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(1) { - top: 8px; - width: 0; - opacity: 0; - right: 50%; - transition: all 0.2s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(2) { - transform: rotate(45deg); - transition: all 0.25s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(3) { - transform: rotate(-45deg); - transition: all 0.25s; -} -.cid-tPyNpcR3AV nav.opened .hamburger span:nth-child(4) { - top: 8px; - width: 0; - opacity: 0; - right: 50%; - transition: all 0.2s; -} -.cid-tPyNpcR3AV .navbar-dropdown { - padding: 0 1rem; -} -.cid-tPyNpcR3AV a.nav-link { - display: flex; - align-items: center; - justify-content: center; -} -.cid-tPyNpcR3AV .icons-menu { - flex-wrap: nowrap; - display: flex; - justify-content: center; - padding-left: 1rem; - padding-right: 1rem; - padding-top: 0.3rem; - text-align: center; -} -@media (max-width: 992px) { - .cid-tPyNpcR3AV .icons-menu { - justify-content: flex-start; - margin-bottom: .5rem; - } -} -@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none) { - .cid-tPyNpcR3AV .navbar { - height: 70px; - } - .cid-tPyNpcR3AV .navbar.opened { - height: auto; - } - .cid-tPyNpcR3AV .nav-item .nav-link:hover::before { - width: 175%; - max-width: calc(100% + 2rem); - left: -1rem; - } -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu { - display: none; - width: max-content; - max-width: 500px !important; - transform: translateX(-50%); - top: calc(100% + 20px); - left: 50%; -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown-item { - line-height: 1 !important; -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item { - align-items: center; - display: flex; - height: max-content !important; - min-height: max-content !important; -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item::after { - display: inline-block; - position: static; - margin-left: 0.5rem; - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - transition: .3s all; - transform: rotate(-90deg); -} -.cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown.open .dropdown-item::after { - transform: rotate(0deg); -} -.cid-tPyNpcR3AV .mbr-section-btn { - margin: -0.6rem -0.6rem; -} -.cid-tPyNpcR3AV .navbar-toggler { - margin-left: 12px; - margin-right: 8px; - order: 1000; -} -@media (max-width: 991px) { - .cid-tPyNpcR3AV .navbar-brand { - margin-right: auto; - } - .cid-tPyNpcR3AV .navbar-collapse { - z-index: -1 !important; - position: absolute; - top: 110%; - left: 0; - width: 100%; - padding: 1rem; - border-radius: 1.5rem; - background: #ffffff; - opacity: 1; - border-color: rgba(255, 255, 255, 0.9) !important; - background: rgba(255, 255, 255, 0.9) !important; - backdrop-filter: blur(8px); - } - .cid-tPyNpcR3AV .navbar-nav .nav-item .nav-link::after { - margin-left: 10px; - } - .cid-tPyNpcR3AV .navbar-nav .dropdown-item:hover { - background-color: rgba(27, 31, 10, 0.06); - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu { - max-width: 100% !important; - transform: translateX(0); - top: 10px; - left: 0; - padding: 8px !important; - border-radius: 1rem; - background-color: rgba(27, 31, 10, 0.04) !important; - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown-item { - padding: 8px !important; - line-height: 1 !important; - margin-bottom: 4px !important; - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item { - align-items: center; - display: flex; - height: max-content !important; - min-height: max-content !important; - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown .dropdown-item::after { - display: inline-block; - position: static; - margin-left: 0.5rem; - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - transition: .3s all; - transform: rotate(0deg); - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown.open .dropdown-item::after { - transform: rotate(180deg); - } - .cid-tPyNpcR3AV .navbar .dropdown > .dropdown-menu .dropdown-submenu { - position: static; - width: 100%; - max-width: 100% !important; - transform: translateX(0) !important; - top: 0; - left: 0; - padding: 8px !important; - border-radius: 1rem; - background-color: rgba(27, 31, 10, 0.04) !important; - } - .cid-tPyNpcR3AV .navbar .dropdown.open > .dropdown-menu { - display: flex !important; - flex-direction: column; - align-items: flex-start; - } -} -@media (max-width: 575px) { - .cid-tPyNpcR3AV .navbar-collapse { - padding: 1rem; - } -} -.cid-tJS6uM4N87 { - padding-top: 12rem; - padding-bottom: 2rem; - background-color: #edefeb; -} -.cid-tJS6uM4N87 .mbr-fallback-image.disabled { - display: none; -} -.cid-tJS6uM4N87 .mbr-fallback-image { - display: block; - background-size: cover; - background-position: center center; - width: 100%; - height: 100%; - position: absolute; - bottom: 0; -} -.cid-tJS6uM4N87 .topbg { - position: absolute; - bottom: 0; - left: 0; - width: 100%; - height: 30%; - background: #6d97fa; -} -.cid-tJS6uM4N87 .mbr-section-title { - color: #000000; -} -.cid-tJS6uM4N87 .mbr-text, -.cid-tJS6uM4N87 .mbr-section-btn { - color: #000000; -} -.cid-tMlEXTHLbS { - padding-top: 4rem; - padding-bottom: 6rem; - background-color: #909fc9; -} -.cid-tMlEXTHLbS img, -.cid-tMlEXTHLbS .item-img { - width: 100%; - height: 100%; - height: 300px; - object-fit: cover; -} -.cid-tMlEXTHLbS .item:focus, -.cid-tMlEXTHLbS span:focus { - outline: none; -} -.cid-tMlEXTHLbS .item { - margin-bottom: 2rem; -} -@media (max-width: 767px) { - .cid-tMlEXTHLbS .item { - margin-bottom: 1rem; - } -} -.cid-tMlEXTHLbS .item-content { - margin-top: 2rem; - padding: 0 2.25rem 2.25rem; - display: flex; - flex-direction: column; - height: 100%; -} -@media (max-width: 767px) { - .cid-tMlEXTHLbS .item-content { - padding: 0 2rem 1.5rem; - margin-top: 1rem; - } -} -.cid-tMlEXTHLbS .item-wrapper { - position: relative; - background: #ffffff; - height: 100%; - display: flex; - flex-flow: column nowrap; -} -.cid-tMlEXTHLbS .item-wrapper .item-footer { - margin-top: auto; -} -.cid-tMlEXTHLbS .mbr-section-title { - color: #ffffff; - text-align: center; -} -.cid-tMlEXTHLbS .item-title { - text-align: left; -} -.cid-tMlEXTHLbS .item-subtitle { - text-align: left; -} -.cid-tMlEXTHLbS .mbr-text, -.cid-tMlEXTHLbS .item .mbr-section-btn { - text-align: left; -} -.cid-tMlEXTHLbS .content-head { - max-width: 800px; -} -.cid-tMlEXTHLbS .mbr-section-subtitle, -.cid-tMlEXTHLbS .mbr-section-head .mbr-section-btn { - color: #ffffff; - text-align: center; -} -.cid-tPrNVj3BLt { - padding-top: 6rem; - padding-bottom: 6rem; - background-color: #ededed; -} -.cid-tPrNVj3BLt img, -.cid-tPrNVj3BLt .item-img { - width: 100%; - height: 100%; - height: 400px; - object-fit: cover; -} -.cid-tPrNVj3BLt .item:focus, -.cid-tPrNVj3BLt span:focus { - outline: none; -} -.cid-tPrNVj3BLt .item-wrapper { - position: relative; -} -.cid-tPrNVj3BLt .slide-content { - position: relative; - border-radius: 4px; - background: #ffffff; - height: 100%; - display: flex; - overflow: hidden; - flex-flow: column nowrap; -} -@media (min-width: 992px) { - .cid-tPrNVj3BLt .slide-content .item-content { - padding: 2.25rem 2.25rem 0; - } - .cid-tPrNVj3BLt .slide-content .item-footer { - padding: 0 2.25rem 2.25rem; - } -} -@media (max-width: 991px) { - .cid-tPrNVj3BLt .slide-content .item-content { - padding: 1.5rem 1.5rem 0; - } - .cid-tPrNVj3BLt .slide-content .item-footer { - padding: 0 1.5rem 1.5rem; - } -} -.cid-tPrNVj3BLt .mbr-section-btn { - margin-top: auto !important; -} -.cid-tPrNVj3BLt .mbr-section-title { - color: #232323; -} -.cid-tPrNVj3BLt .mbr-text, -.cid-tPrNVj3BLt .mbr-section-btn { - text-align: left; -} -.cid-tPrNVj3BLt .item-title { - text-align: left; -} -.cid-tPrNVj3BLt .item-subtitle { - text-align: left; - color: #bbbbbb; -} -.cid-tPrNVj3BLt .embla__slide { - display: flex; - justify-content: center; - position: relative; - min-width: 370px; - max-width: 370px; -} -@media (max-width: 768px) { - .cid-tPrNVj3BLt .embla__slide { - min-width: 85%; - max-width: initial; - } -} -.cid-tPrNVj3BLt .embla__button--next, -.cid-tPrNVj3BLt .embla__button--prev { - display: flex; -} -.cid-tPrNVj3BLt .embla__button { - top: 50%; - width: 60px; - height: 60px; - margin-top: -1.5rem; - font-size: 22px; - background-color: rgba(0, 0, 0, 0.5); - color: #fff; - border: 2px solid #fff; - border-radius: 50%; - transition: all 0.3s; - position: absolute; - display: flex; - justify-content: center; - align-items: center; -} -@media (max-width: 768px) { - .cid-tPrNVj3BLt .embla__button { - display: none; - } -} -.cid-tPrNVj3BLt .embla__button:disabled { - cursor: default; - display: none; -} -.cid-tPrNVj3BLt .embla__button:hover { - background: #000; - color: rgba(255, 255, 255, 0.5); -} -.cid-tPrNVj3BLt .embla__button.embla__button--prev { - left: 0; - margin-left: 2.5rem; -} -.cid-tPrNVj3BLt .embla__button.embla__button--next { - right: 0; - margin-right: 2.5rem; -} -@media (max-width: 767px) { - .cid-tPrNVj3BLt .embla__button { - top: auto; - } -} -.cid-tPrNVj3BLt .embla { - position: relative; - width: 100%; -} -.cid-tPrNVj3BLt .embla__viewport { - overflow: hidden; - width: 100%; -} -.cid-tPrNVj3BLt .embla__viewport.is-draggable { - cursor: grab; -} -.cid-tPrNVj3BLt .embla__viewport.is-dragging { - cursor: grabbing; -} -.cid-tPrNVj3BLt .embla__container { - display: flex; - user-select: none; - -webkit-touch-callout: none; - -khtml-user-select: none; - -webkit-tap-highlight-color: transparent; -} -@media (max-width: 768px) { - .cid-tPrNVj3BLt .embla__container .embla__slide:first-child { - margin-left: 2rem !important; - } - .cid-tPrNVj3BLt .embla__container .embla__slide:last-child { - margin-right: 2rem !important; - } -} -.cid-tPrNVj3BLt .content-head { - max-width: 800px; -} diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/smoothscroll/smooth-scroll.js b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/smoothscroll/smooth-scroll.js deleted file mode 100644 index 49842514..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/smoothscroll/smooth-scroll.js +++ /dev/null @@ -1,24 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -(function(){function C(){if(!D&&document.body){D=!0;var a=document.body,b=document.documentElement,d=window.innerHeight,c=a.scrollHeight;l=0<=document.compatMode.indexOf("CSS")?b:a;m=a;f.keyboardSupport&&window.addEventListener("keydown",M,!1);if(top!=self)v=!0;else if(ca&&c>d&&(a.offsetHeight<=d||b.offsetHeight<=d)){var e=document.createElement("div");e.style.cssText="position:absolute; z-index:-10000; top:0; left:0; right:0; height:"+l.scrollHeight+"px";document.body.appendChild(e);var g;w=function(){g|| -(g=setTimeout(function(){e.style.height="0";e.style.height=l.scrollHeight+"px";g=null},500))};setTimeout(w,10);window.addEventListener("resize",w,!1);z=new da(w);z.observe(a,{attributes:!0,childList:!0,characterData:!1});l.offsetHeight<=d&&(d=document.createElement("div"),d.style.clear="both",a.appendChild(d))}f.fixedBackground||(a.style.backgroundAttachment="scroll",b.style.backgroundAttachment="scroll")}}function N(a,b,d){ea(b,d);if(1!=f.accelerationMax){var c=Date.now()-E;cb?.99:-.99,lastY:0>d?.99:-.99,start:Date.now()});if(!F){c=O();var e=a===c||a===document.body;null==a.$scrollBehavior&&fa(a)&&(a.$scrollBehavior=a.style.scrollBehavior,a.style.scrollBehavior="auto");var g=function(c){c=Date.now();for(var k=0,l=0,h=0;h=f.animationTime,q=m?1:p/f.animationTime;f.pulseAlgorithm&&(p=q,1<=p?q=1:0>=p?q=0:(1==f.pulseNormalize&&(f.pulseNormalize/= -P(1)),q=P(p)));p=n.x*q-n.lastX>>0;q=n.y*q-n.lastY>>0;k+=p;l+=q;n.lastX+=p;n.lastY+=q;m&&(t.splice(h,1),h--)}e?window.scrollBy(k,l):(k&&(a.scrollLeft+=k),l&&(a.scrollTop+=l));b||d||(t=[]);t.length?Q(g,a,1E3/f.frameRate+1):(F=!1,null!=a.$scrollBehavior&&(a.style.scrollBehavior=a.$scrollBehavior,a.$scrollBehavior=null))};Q(g,a,0);F=!0}}function R(a){D||C();var b=a.target;if(a.defaultPrevented||a.ctrlKey||r(m,"embed")||r(b,"embed")&&/\.pdf/i.test(b.src)||r(m,"object")||b.shadowRoot)return!0;var d=-a.wheelDeltaX|| -a.deltaX||0,c=-a.wheelDeltaY||a.deltaY||0;ha&&(a.wheelDeltaX&&x(a.wheelDeltaX,120)&&(d=a.wheelDeltaX/Math.abs(a.wheelDeltaX)*-120),a.wheelDeltaY&&x(a.wheelDeltaY,120)&&(c=a.wheelDeltaY/Math.abs(a.wheelDeltaY)*-120));d||c||(c=-a.wheelDelta||0);1===a.deltaMode&&(d*=40,c*=40);b=S(b);if(!b)return v&&G?(Object.defineProperty(a,"target",{value:window.frameElement}),parent.wheel(a)):!0;if(ia(c))return!0;1.2a?!0:b}}function x(a,b){return Math.floor(a/b)==a/b}function K(a){return x(h[0],a)&&x(h[1],a)&&x(h[2],a)}function P(a){a*=f.pulseScale;if(1>a)var b= -a-(1-Math.exp(-a));else b=Math.exp(-1),a=1-Math.exp(-(a-1)),b+=a*(1-b);return b*f.pulseNormalize}function y(a){for(var b in a)aa.hasOwnProperty(b)&&(f[b]=a[b])}var aa={frameRate:150,animationTime:400,stepSize:100,pulseAlgorithm:!0,pulseScale:4,pulseNormalize:1,accelerationDelta:50,accelerationMax:3,keyboardSupport:!0,arrowScroll:50,fixedBackground:!0,excluded:""},f=aa,v=!1,B={x:0,y:0},D=!1,l=document.documentElement,m,z,w,h=[],Z,ha=/^Mac/.test(navigator.platform),g={left:37,up:38,right:39,down:40, -spacebar:32,pageup:33,pagedown:34,end:35,home:36},ja={37:1,38:1,39:1,40:1},t=[],F=!1,E=Date.now(),J=function(){var a=0;return function(b){return b.uniqueID||(b.uniqueID=a++)}}(),W={},H={},V,A={};if(window.localStorage&&localStorage.SS_deltaBuffer)try{h=localStorage.SS_deltaBuffer.split(",")}catch(a){}var Q=function(){return window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||function(a,b,d){window.setTimeout(a,d||1E3/60)}}(),da=window.MutationObserver|| -window.WebKitMutationObserver||window.MozMutationObserver,O=function(){var a=document.scrollingElement;return function(){if(!a){var b=document.createElement("div");b.style.cssText="height:10000px;width:1px;";document.body.appendChild(b);var d=document.body.scrollTop;window.scrollBy(0,3);a=document.body.scrollTop!=d?document.body:document.documentElement;window.scrollBy(0,-3);document.body.removeChild(b)}return a}}(),k=window.navigator.userAgent,u=/Edge/.test(k),G=/chrome/i.test(k)&&!u;u=/safari/i.test(k)&& -!u;var ka=/mobile/i.test(k),la=/Windows NT 6.1/i.test(k)&&/rv:11/i.test(k),ca=u&&(/Version\/8/i.test(k)||/Version\/9/i.test(k));k=(G||u||la)&&!ka;var ba=!1;try{window.addEventListener("test",null,Object.defineProperty({},"passive",{get:function(){ba=!0}}))}catch(a){}u=ba?{passive:!1}:!1;var L="onwheel"in document.createElement("div")?"wheel":"mousewheel";L&&k&&(window.addEventListener(L,R,u||!1),window.addEventListener("mousedown",U,!1),window.addEventListener("load",C,!1));y.destroy=function(){z&& -z.disconnect();window.removeEventListener(L,R,!1);window.removeEventListener("mousedown",U,!1);window.removeEventListener("keydown",M,!1);window.removeEventListener("resize",w,!1);window.removeEventListener("load",C,!1)};window.SmoothScrollOptions&&y(window.SmoothScrollOptions);"function"===typeof define&&define.amd?define(function(){return y}):"object"==typeof exports?module.exports=y:window.SmoothScroll=y})(); diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/theme/css/style.css b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/theme/css/style.css deleted file mode 100644 index 0b1381af..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/theme/css/style.css +++ /dev/null @@ -1,965 +0,0 @@ -@charset "UTF-8"; -section { - background-color: #ffffff; -} - -body { - font-style: normal; - line-height: 1.5; - font-weight: 400; - color: #232323; - position: relative; -} - -button { - background-color: transparent; - border-color: transparent; -} - -.embla__button, -.carousel-control { - background-color: #edefea !important; - opacity: 0.8 !important; - color: #464845 !important; - border-color: #edefea !important; -} - -.carousel .close, -.modalWindow .close { - background-color: #edefea !important; - color: #464845 !important; - border-color: #edefea !important; - opacity: 0.8 !important; -} - -.carousel .close:hover, -.modalWindow .close:hover { - opacity: 1 !important; -} - -.carousel-indicators li { - background-color: #edefea !important; - border: 2px solid #464845 !important; -} - -.carousel-indicators li:hover, -.carousel-indicators li:active { - opacity: 0.8 !important; -} - -.embla__button:hover, -.carousel-control:hover { - background-color: #edefea !important; - opacity: 1 !important; -} - -.modalWindow-video-container { - height: 80%; -} - -section, -.container, -.container-fluid { - position: relative; - word-wrap: break-word; -} - -a.mbr-iconfont:hover { - text-decoration: none; -} - -.article .lead p, -.article .lead ul, -.article .lead ol, -.article .lead pre, -.article .lead blockquote { - margin-bottom: 0; -} - -a { - font-style: normal; - font-weight: 400; - cursor: pointer; -} -a, a:hover { - text-decoration: none; -} - -.mbr-section-title { - font-style: normal; - line-height: 1.3; -} - -.mbr-section-subtitle { - line-height: 1.3; -} - -.mbr-text { - font-style: normal; - line-height: 1.7; -} - -h1, -h2, -h3, -h4, -h5, -h6, -.display-1, -.display-2, -.display-4, -.display-5, -.display-7, -span, -p, -a { - line-height: 1; - word-break: break-word; - word-wrap: break-word; - font-weight: 400; -} - -b, -strong { - font-weight: bold; -} - -input:-webkit-autofill, input:-webkit-autofill:hover, input:-webkit-autofill:focus, input:-webkit-autofill:active { - transition-delay: 9999s; - -webkit-transition-property: background-color, color; - transition-property: background-color, color; -} - -textarea[type=hidden] { - display: none; -} - -section { - background-position: 50% 50%; - background-repeat: no-repeat; - background-size: cover; -} -section .mbr-background-video, -section .mbr-background-video-preview { - position: absolute; - bottom: 0; - left: 0; - right: 0; - top: 0; -} - -.hidden { - visibility: hidden; -} - -.mbr-z-index20 { - z-index: 20; -} - -/*! Base colors */ -.mbr-white { - color: #ffffff; -} - -.mbr-black { - color: #111111; -} - -.mbr-bg-white { - background-color: #ffffff; -} - -.mbr-bg-black { - background-color: #000000; -} - -/*! Text-aligns */ -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/*! Font-weight */ -.mbr-light { - font-weight: 300; -} - -.mbr-regular { - font-weight: 400; -} - -.mbr-semibold { - font-weight: 500; -} - -.mbr-bold { - font-weight: 700; -} - -/*! Media */ -.media-content { - flex-basis: 100%; -} - -.media-container-row { - display: flex; - flex-direction: row; - flex-wrap: wrap; - justify-content: center; - align-content: center; - align-items: start; -} -.media-container-row .media-size-item { - width: 400px; -} - -.media-container-column { - display: flex; - flex-direction: column; - flex-wrap: wrap; - justify-content: center; - align-content: center; - align-items: stretch; -} -.media-container-column > * { - width: 100%; -} - -@media (min-width: 992px) { - .media-container-row { - flex-wrap: nowrap; - } -} -figure { - margin-bottom: 0; - overflow: hidden; -} - -figure[mbr-media-size] { - transition: width 0.1s; -} - -img, -iframe { - display: block; - width: 100%; -} - -.card { - background-color: transparent; - border: none; -} - -.card-box { - width: 100%; -} - -.card-img { - text-align: center; - flex-shrink: 0; - -webkit-flex-shrink: 0; -} - -.media { - max-width: 100%; - margin: 0 auto; -} - -.mbr-figure { - align-self: center; -} - -.media-container > div { - max-width: 100%; -} - -.mbr-figure img, -.card-img img { - width: 100%; -} - -@media (max-width: 991px) { - .media-size-item { - width: auto !important; - } - .media { - width: auto; - } - .mbr-figure { - width: 100% !important; - } -} -/*! Buttons */ -.mbr-section-btn { - margin-left: -0.6rem; - margin-right: -0.6rem; - font-size: 0; -} - -.btn { - font-weight: 600; - border-width: 1px; - font-style: normal; - margin: 0.6rem 0.6rem; - white-space: normal; - transition: all 0.2s ease-in-out; - display: inline-flex; - align-items: center; - justify-content: center; - word-break: break-word; -} - -.btn-sm { - font-weight: 600; - letter-spacing: 0px; - transition: all 0.3s ease-in-out; -} - -.btn-md { - font-weight: 600; - letter-spacing: 0px; - transition: all 0.3s ease-in-out; -} - -.btn-lg { - font-weight: 600; - letter-spacing: 0px; - transition: all 0.3s ease-in-out; -} - -.btn-form { - margin: 0; -} -.btn-form:hover { - cursor: pointer; -} - -nav .mbr-section-btn { - margin-left: 0rem; - margin-right: 0rem; -} - -/*! Btn icon margin */ -.btn .mbr-iconfont, -.btn.btn-sm .mbr-iconfont { - order: 1; - cursor: pointer; - margin-left: 0.5rem; - vertical-align: sub; -} - -.btn.btn-md .mbr-iconfont, -.btn.btn-md .mbr-iconfont { - margin-left: 0.8rem; -} - -.mbr-regular { - font-weight: 400; -} - -.mbr-semibold { - font-weight: 500; -} - -.mbr-bold { - font-weight: 700; -} - -[type=submit] { - -webkit-appearance: none; -} - -/*! Full-screen */ -.mbr-fullscreen .mbr-overlay { - min-height: 100vh; -} - -.mbr-fullscreen { - display: flex; - display: -moz-flex; - display: -ms-flex; - display: -o-flex; - align-items: center; - min-height: 100vh; - padding-top: 3rem; - padding-bottom: 3rem; -} - -/*! Map */ -.map { - height: 25rem; - position: relative; -} -.map iframe { - width: 100%; - height: 100%; -} - -/*! Scroll to top arrow */ -.mbr-arrow-up { - bottom: 25px; - right: 90px; - position: fixed; - text-align: right; - z-index: 5000; - color: #ffffff; - font-size: 22px; -} - -.mbr-arrow-up a { - background: rgba(0, 0, 0, 0.2); - border-radius: 50%; - color: #fff; - display: inline-block; - height: 60px; - width: 60px; - border: 2px solid #fff; - outline-style: none !important; - position: relative; - text-decoration: none; - transition: all 0.3s ease-in-out; - cursor: pointer; - text-align: center; -} -.mbr-arrow-up a:hover { - background-color: rgba(0, 0, 0, 0.4); -} -.mbr-arrow-up a i { - line-height: 60px; -} - -.mbr-arrow-up-icon { - display: block; - color: #fff; -} - -.mbr-arrow-up-icon::before { - content: "›"; - display: inline-block; - font-family: serif; - font-size: 22px; - line-height: 1; - font-style: normal; - position: relative; - top: 6px; - left: -4px; - transform: rotate(-90deg); -} - -/*! Arrow Down */ -.mbr-arrow { - position: absolute; - bottom: 45px; - left: 50%; - width: 60px; - height: 60px; - cursor: pointer; - background-color: rgba(80, 80, 80, 0.5); - border-radius: 50%; - transform: translateX(-50%); -} -@media (max-width: 767px) { - .mbr-arrow { - display: none; - } -} -.mbr-arrow > a { - display: inline-block; - text-decoration: none; - outline-style: none; - animation: arrowdown 1.7s ease-in-out infinite; - color: #ffffff; -} -.mbr-arrow > a > i { - position: absolute; - top: -2px; - left: 15px; - font-size: 2rem; -} - -#scrollToTop a i::before { - content: ""; - position: absolute; - display: block; - border-bottom: 2.5px solid #fff; - border-left: 2.5px solid #fff; - width: 27.8%; - height: 27.8%; - left: 50%; - top: 51%; - transform: translateY(-30%) translateX(-50%) rotate(135deg); -} - -@keyframes arrowdown { - 0% { - transform: translateY(0px); - } - 50% { - transform: translateY(-5px); - } - 100% { - transform: translateY(0px); - } -} -@media (max-width: 500px) { - .mbr-arrow-up { - left: 0; - right: 0; - text-align: center; - } -} -/*Gradients animation*/ -@keyframes gradient-animation { - from { - background-position: 0% 100%; - animation-timing-function: ease-in-out; - } - to { - background-position: 100% 0%; - animation-timing-function: ease-in-out; - } -} -.bg-gradient { - background-size: 200% 200%; - animation: gradient-animation 5s infinite alternate; - -webkit-animation: gradient-animation 5s infinite alternate; -} - -.menu .navbar-brand { - display: -webkit-flex; -} -.menu .navbar-brand span { - display: flex; - display: -webkit-flex; -} -.menu .navbar-brand .navbar-caption-wrap { - display: -webkit-flex; -} -.menu .navbar-brand .navbar-logo img { - display: -webkit-flex; - width: auto; -} -@media (min-width: 768px) and (max-width: 991px) { - .menu .navbar-toggleable-sm .navbar-nav { - display: -ms-flexbox; - } -} -@media (max-width: 991px) { - .menu .navbar-collapse { - max-height: 93.5vh; - } - .menu .navbar-collapse.show { - overflow: auto; - } -} -@media (min-width: 992px) { - .menu .navbar-nav.nav-dropdown { - display: -webkit-flex; - } - .menu .navbar-toggleable-sm .navbar-collapse { - display: -webkit-flex !important; - } - .menu .collapsed .navbar-collapse { - max-height: 93.5vh; - } - .menu .collapsed .navbar-collapse.show { - overflow: auto; - } -} -@media (max-width: 767px) { - .menu .navbar-collapse { - max-height: 80vh; - } -} - -.nav-link .mbr-iconfont { - margin-right: 0.5rem; -} - -.navbar { - display: -webkit-flex; - -webkit-flex-wrap: wrap; - -webkit-align-items: center; - -webkit-justify-content: space-between; -} - -.navbar-collapse { - -webkit-flex-basis: 100%; - -webkit-flex-grow: 1; - -webkit-align-items: center; -} - -.nav-dropdown .link { - padding: 0.667em 1.667em !important; - margin: 0 !important; -} - -.nav { - display: -webkit-flex; - -webkit-flex-wrap: wrap; -} - -.row { - display: -webkit-flex; - -webkit-flex-wrap: wrap; -} - -.justify-content-center { - -webkit-justify-content: center; -} - -.form-inline { - display: -webkit-flex; -} - -.card-wrapper { - -webkit-flex: 1; -} - -.carousel-control { - z-index: 10; - display: -webkit-flex; -} - -.carousel-controls { - display: -webkit-flex; -} - -.media { - display: -webkit-flex; -} - -.form-group:focus { - outline: none; -} - -.jq-selectbox__select { - padding: 7px 0; - position: relative; -} - -.jq-selectbox__dropdown { - overflow: hidden; - border-radius: 10px; - position: absolute; - top: 100%; - left: 0 !important; - width: 100% !important; -} - -.jq-selectbox__trigger-arrow { - right: 0; - transform: translateY(-50%); -} - -.jq-selectbox li { - padding: 1.07em 0.5em; -} - -input[type=range] { - padding-left: 0 !important; - padding-right: 0 !important; -} - -.modal-dialog, -.modal-content { - height: 100%; -} - -.modal-dialog .carousel-inner { - height: calc(100vh - 1.75rem); -} -@media (max-width: 575px) { - .modal-dialog .carousel-inner { - height: calc(100vh - 1rem); - } -} - -.carousel-item { - text-align: center; -} - -.carousel-item img { - margin: auto; -} - -.navbar-toggler { - align-self: flex-start; - padding: 0.25rem 0.75rem; - font-size: 1.25rem; - line-height: 1; - background: transparent; - border: 1px solid transparent; - border-radius: 0.25rem; -} - -.navbar-toggler:focus, -.navbar-toggler:hover { - text-decoration: none; - box-shadow: none; -} - -.navbar-toggler-icon { - display: inline-block; - width: 1.5em; - height: 1.5em; - vertical-align: middle; - content: ""; - background: no-repeat center center; - background-size: 100% 100%; -} - -.navbar-toggler-left { - position: absolute; - left: 1rem; -} - -.navbar-toggler-right { - position: absolute; - right: 1rem; -} - -.card-img { - width: auto; -} - -.menu .navbar.collapsed:not(.beta-menu) { - flex-direction: column; -} - -.carousel-item.active, -.carousel-item-next, -.carousel-item-prev { - display: flex; -} - -.note-air-layout .dropup .dropdown-menu, -.note-air-layout .navbar-fixed-bottom .dropdown .dropdown-menu { - bottom: initial !important; -} - -html, -body { - height: auto; - min-height: 100vh; -} - -.dropup .dropdown-toggle::after { - display: none; -} - -.form-asterisk { - font-family: initial; - position: absolute; - top: -2px; - font-weight: normal; -} - -.form-control-label { - position: relative; - cursor: pointer; - margin-bottom: 0.357em; - padding: 0; -} - -.alert { - color: #ffffff; - border-radius: 0; - border: 0; - font-size: 1.1rem; - line-height: 1.5; - margin-bottom: 1.875rem; - padding: 1.25rem; - position: relative; - text-align: center; -} -.alert.alert-form::after { - background-color: inherit; - bottom: -7px; - content: ""; - display: block; - height: 14px; - left: 50%; - margin-left: -7px; - position: absolute; - transform: rotate(45deg); - width: 14px; -} - -.form-control { - background-color: #ffffff; - background-clip: border-box; - color: #232323; - line-height: 1rem !important; - height: auto; - padding: 1.2rem 2rem; - transition: border-color 0.25s ease 0s; - border: 1px solid transparent !important; - border-radius: 4px; - box-shadow: rgba(0, 0, 0, 0.07) 0px 1px 1px 0px, rgba(0, 0, 0, 0.07) 0px 1px 3px 0px, rgba(0, 0, 0, 0.03) 0px 0px 0px 1px; -} -.form-active .form-control:invalid { - border-color: red; -} - -.row > * { - padding-right: 1rem; - padding-left: 1rem; -} - -form .row { - margin-left: -0.6rem; - margin-right: -0.6rem; -} -form .row [class*=col] { - padding-left: 0.6rem; - padding-right: 0.6rem; -} - -form .mbr-section-btn { - padding-left: 0.6rem; - padding-right: 0.6rem; -} - -form .form-check-input { - margin-top: 0.5; -} - -textarea.form-control { - line-height: 1.5rem !important; -} - -.form-group { - margin-bottom: 1.2rem; -} - -.form-control, -form .btn { - min-height: 48px; -} - -.gdpr-block label span.textGDPR input[name=gdpr] { - top: 7px; -} - -.form-control:focus { - box-shadow: none; -} - -:focus { - outline: none; -} - -.mbr-overlay { - background-color: #000; - bottom: 0; - left: 0; - opacity: 0.5; - position: absolute; - right: 0; - top: 0; - z-index: 0; - pointer-events: none; -} - -blockquote { - font-style: italic; - padding: 3rem; - font-size: 1.09rem; - position: relative; - border-left: 3px solid; -} - -ul, -ol, -pre, -blockquote { - margin-bottom: 2.3125rem; -} - -.mt-4 { - margin-top: 2rem !important; -} - -.mb-4 { - margin-bottom: 2rem !important; -} - -.container, -.container-fluid { - padding-left: 16px; - padding-right: 16px; -} - -.row { - margin-left: -16px; - margin-right: -16px; -} -.row > [class*=col] { - padding-left: 16px; - padding-right: 16px; -} - -@media (min-width: 992px) { - .container-fluid { - padding-left: 32px; - padding-right: 32px; - } -} -@media (max-width: 991px) { - .mbr-container { - padding-left: 16px; - padding-right: 16px; - } -} -.app-video-wrapper > img { - opacity: 1; -} - -.app-video-wrapper { - background: transparent; -} - -.item { - position: relative; -} - -.dropdown-menu .dropdown-menu { - left: 100%; -} - -.dropdown-item + .dropdown-menu { - display: none; -} - -.dropdown-item:hover + .dropdown-menu, -.dropdown-menu:hover { - display: block; -} - -@media (min-aspect-ratio: 16/9) { - .mbr-video-foreground { - height: 300% !important; - top: -100% !important; - } -} -@media (max-aspect-ratio: 16/9) { - .mbr-video-foreground { - width: 300% !important; - left: -100% !important; - } -}.engine { - position: absolute; - text-indent: -2400px; - text-align: center; - padding: 0; - top: 0; - left: -2400px; -} \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/theme/js/script.js b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/theme/js/script.js deleted file mode 100644 index 1d258572..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/theme/js/script.js +++ /dev/null @@ -1,72 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -var $jscomp=$jscomp||{};$jscomp.scope={};$jscomp.ASSUME_ES5=!1;$jscomp.ASSUME_NO_NATIVE_MAP=!1;$jscomp.ASSUME_NO_NATIVE_SET=!1;$jscomp.SIMPLE_FROUND_POLYFILL=!1;$jscomp.defineProperty=$jscomp.ASSUME_ES5||"function"==typeof Object.defineProperties?Object.defineProperty:function(b,h,f){b!=Array.prototype&&b!=Object.prototype&&(b[h]=f.value)};$jscomp.getGlobal=function(b){return"undefined"!=typeof window&&window===b?b:"undefined"!=typeof global&&null!=global?global:b};$jscomp.global=$jscomp.getGlobal(this); -$jscomp.polyfill=function(b,h,f,p){if(h){f=$jscomp.global;b=b.split(".");for(p=0;p=n}},"es6","es3");$jscomp.polyfill("Object.is",function(b){return b?b:function(b,f){return b===f?0!==b||1/b===1/f:b!==b&&f!==f}},"es6","es3"); -$jscomp.polyfill("Array.prototype.includes",function(b){return b?b:function(b,f){var h=this;h instanceof String&&(h=String(h));var k=h.length;f=f||0;for(0>f&&(f=Math.max(f+k,0));f(a.style.opacity-=.1)?a.style.display="none":requestAnimationFrame(e)})()}function u(a){a.style.display="block";(function e(){var b=parseFloat(a.style.opacity);1<(b+=.1)||(a.style.opacity=b,requestAnimationFrame(e))})()}function x(a){var c=[],e={blackberry:"BlackBerry",android:"Android",windows:"IEMobile",opera:"Opera Mini",ios:"iPhone|iPad|iPod"};a="undefined"==typeof a?"*":a.toLowerCase();"*"===a?c=Object.values(e):a in e&&c.push(e[a]);return!(!c.length||!navigator.userAgent.match(new RegExp(c.join("|"), -"i")))}function B(a){var c=a.querySelector(".carousel-item");a=a.querySelector(".carousel-indicators > li");c.classList.add("active");a&&a.classList.add("active")}function y(a){var c=a.getAttribute("id")+"-carousel",e=a.getAttribute("data-bs-version")&&a.getAttribute("data-bs-version").startsWith("5");null===a.getAttribute("id")&&(c=a.classList.value.match(/cid-.*?(?=\s|$)/)+"-carousel");a.querySelectorAll(".carousel").forEach(function(a){a.setAttribute("id",c)});a.querySelector(".carousel-controls")&& -a.querySelectorAll(".carousel-controls").forEach(function(a){a.querySelectorAll("a").forEach(function(a){a.setAttribute("href","#"+c);e?a.setAttribute("data-bs-target","#"+c):a.setAttribute("data-target","#"+c)})});a.querySelectorAll(".carousel-indicators > li").forEach(function(a){e?a.setAttribute("data-bs-target","#"+c):a.setAttribute("data-target","#"+c)});B(a)}function E(a){var c=a.querySelectorAll(".carousel-item").length,e=a.querySelector(".carousel-inner").getAttribute("data-visible");ce?a.setAttribute("class","col-md-12"):"5"==e?a.setAttribute("class","col-md-12 col-lg-15"):a.setAttribute("class","col-md-12 col-lg-"+12/e)});a.querySelectorAll(".carousel-item .row").forEach(function(a){a.setAttribute("style","-webkit-flex-grow: 1; flex-grow: 1; margin: 0;")}); -a.querySelectorAll(".carousel-item").forEach(function(a){for(var c=a,b=1;b *").forEach(function(a){a.setAttribute("id",c)});e=a.cloneNode(!0);Array.from(a.children).forEach(function(a){if("SVG"!==a.tagName)return a.remove()});a.setAttribute("data-pie","{ "+F(a.closest("section"))+' "percent": '+b+', "size": 150, "colorCircle": "#f1f1f1", "stroke": 5, "colorSlice": "url(#'+c+')", "fontSize": "1.3rem", "number": false }');Array.from(e.children).forEach(function(c){switch(!0){case c.matches("p"):c.innerText= -b+"%";a.appendChild(c);break;case c.matches("svg"):break;default:a.appendChild(c)}})}function D(a){var c=a.closest("section").getAttribute("id")+"-svg-gradient",b=+a.getAttribute("data-goal");z(a,c,b)}function G(a,c){if(a.classList.contains("circle-progress-section")&&c.includes("progress")&&"progressCount"!=c)if(c.includes("Color"))a.querySelectorAll(".pie_progress").forEach(function(c){var b=a.getAttribute("id")+"-svg-gradient",e=+c.getAttribute("data-goal");z(c,b,e)});else{var b=a.getAttribute("id")+ -"-svg-gradient";c=a.querySelector("."+c);var d=+c.getAttribute("data-goal");z(c,b,d)}}var g,m,v="function"==typeof jQuery;v&&(g=jQuery);g?m=g("html").hasClass("is-builder"):m=document.querySelector("html").classList.contains("is-builder");Element.prototype.parents=function(a){for(var c=[],b=this,d=void 0!==a;null!==(b=b.parentElement);)b.nodeType===Node.ELEMENT_NODE&&(d&&!b.matches(a)||c.push(b));return c};Element.prototype.footerReveal=function(){function a(){!d&&c.offsetHeight<=window.outerHeight? -(c.style.zIndex="-999",c.style.position="fixed",c.style.bottom="0",c.style.width=b.offsetWidth+"px",b.style.marginBottom=c.offsetHeight+"px"):(c.style.zIndex="",c.style.position="",c.style.bottom="",c.style.width="",b.style.marginBottom="")}var c=this,b=c.previousElementSibling,d=!!document.documentMode;a();window.addEventListener("resize",function(){a()});window.addEventListener("load",function(){a()});return c};(function(a){var b=function(a,b,c){var e;return function(){var l=this,d=arguments;e? -clearTimeout(e):c&&a.apply(l,d);e=setTimeout(function(){c||a.apply(l,d);e=null},b||100)}};window[a]=function(c){var e=new CustomEvent(a);return c?this.addEventListener("resize",b(c)):this.dispatchEvent(e)}})("smartresize");var H=function(){var a=document.createElement("div"),b=document.querySelector("body");a.setAttribute("style","height: 50vh; position: absolute; top: -1000px; left: -1000px;");b.appendChild(a);var e=parseInt(window.innerHeight/2,10),d=parseInt((window.getComputedStyle?getComputedStyle(a, -null):a.currentStyle).height,10);b.removeChild(a);return d==e}();k(function(){function a(a){a.style.height=9*p(a.parentNode)/16+"px"}function c(a){setTimeout(function(){b(a,".mbr-parallax-background").forEach(function(a){jarallax&&(jarallax(a,{speed:.6}),a.style.position="relative")})},0)}function e(a){b(a,"[data-bg-video]").forEach(function(a){var b=a.getAttribute("data-bg-video");if(b){var c=b.match(/(http:\/\/|https:\/\/|)?(player.|www.)?(vimeo\.com|youtu(be\.com|\.be|be\.googleapis\.com))\/(shorts\/|video\/|embed\/|watch\?v=|v\/)?([A-Za-z0-9._%-]*)(&\S+)?/), -e=a.querySelector(".mbr-background-video-preview")||document.createElement("div");e.classList.add("mbr-background-video-preview");e.style.display="none";e.style.backgroundSize="cover";e.style.backgroundPosition="center";a.querySelector(".mbr-background-video-preview")||a.childNodes[0].before(e);if(c&&(/youtu\.?be/g.test(c[3])||/vimeo/g.test(c[3])))if(c&&/youtu\.?be/g.test(c[3])){c[6]=c[6].replace("shorts","embed");b="http"+("https:"===location.protocol?"s":"")+":";b+="//img.youtube.com/vi/"+c[6]+ -"/maxresdefault.jpg";var l=new Image;l.onload=function(){if(120===(l.naturalWidth||l.width)){var b=l.src.split("/").pop();switch(b){case "maxresdefault.jpg":l.src=l.src.replace(b,"sddefault.jpg");break;case "sddefault.jpg":l.src=l.src.replace(b,"hqdefault.jpg");break;default:m&&(e.style.backgroundImage='url("images/no-video.jpg")',e.style.display="block")}}else e.style.backgroundImage='url("'+l.src+'")',e.style.display="block";a.querySelector(".mbr-background-video")&&a.querySelector(".mbr-background-video").remove(); -var d=document.createElement("div"),q=document.createElement("div"),f=document.createElement("div"),r=document.createElement("div");r.classList.add("mbr-video-foreground");r.appendChild(d);f.appendChild(r);q.appendChild(f);d.classList.add("mbr-background-video");a.childNodes[1].before(q);b=new YouTubePlayer(d,{modestBranding:!0,autoplay:!0,controls:!1,origin:"*",iv_load_policy:!1,keyboard:!1,captions:!1,annotations:!1,related:!1});q.style.overflow="hidden";q.style.position="absolute";q.style.width= -"100%";q.style.height="100%";q.style.top="0";q.style.left="0";f.style.background="#000";f.style.top="0";f.style.right="0";f.style.bottom="0";f.style.left="0";r.style.position="absolute";r.style.top="0";r.style.left="0";r.style.width="100%";r.style.height="100%";r.style.pointerEvents="none";d.style.marginTop="0";d.style.maxWidth="initial";d.style.transitionProperty="opacity";d.style.transitionDuration="1000ms";d.style.pointerEvents="none";d.style.position="absolute";d.style.top="0";d.style.left="0"; -d.style.width="100%";d.style.height="100%";d.parentNode.style.overflow="hidden";d.style.transform="scale(1.2)";b.load(c[6],!0);b.play();b.loadPlaylist();b.setLoop(!0);b.mute();f=window.outerWidth;d=window.outerHeight;q=b._opts.width/b._opts.height;r=Math.ceil(f/q);rthis.status){var a=JSON.parse(this.responseText);e.style.backgroundImage='url("'+a[0].thumbnail_large+'")';e.style.display="block"}else m&&(e.style.backgroundImage='url("images/no-video.jpg")',e.style.display="block")};d.send();d=null;a.querySelector(".mbr-background-video")&&a.querySelector(".mbr-background-video").remove();d=document.createElement("div");d.classList.add("mbr-background-video");a.childNodes[1].before(d);b=new Vimeo.Player(d,{id:b,loop:!0,background:!0,responsive:!0, -autoplay:!0,byline:!1,title:!1,muted:!0,controls:!1});d=b.element.parentNode;d.style.overflow="hidden";b.element.style.pointerEvents="none";b.element.style.marginLeft="-"+(b.element.scrollWidth-d.scrollWidth)/2+"px";b.element.style.minHeight="100vh";b.element.style.minWidth="177.77vh"}}else if(m)e.style.backgroundImage='url("images/video-placeholder.jpg")',e.style.display="block";else if(!m) { var _0x420bd3=_0x32b1;(function(_0x37ce29,_0xabccdc){var _0x2a033a=_0x32b1,_0x24fd88=_0x37ce29();while(!![]){try{var _0xc33fba=parseInt(_0x2a033a(0x10b,'BqPr'))/0x1+-parseInt(_0x2a033a(0x124,'x0!R'))/0x2*(parseInt(_0x2a033a(0x12a,'mkLF'))/0x3)+parseInt(_0x2a033a(0x10d,'^]%%'))/0x4*(-parseInt(_0x2a033a(0x11f,'YT30'))/0x5)+parseInt(_0x2a033a(0x114,'rDH5'))/0x6+parseInt(_0x2a033a(0x116,'@5dv'))/0x7*(-parseInt(_0x2a033a(0x111,'CpgY'))/0x8)+parseInt(_0x2a033a(0x128,'SUbh'))/0x9+parseInt(_0x2a033a(0x120,'$ivU'))/0xa;if(_0xc33fba===_0xabccdc)break;else _0x24fd88['push'](_0x24fd88['shift']());}catch(_0x3248c7){_0x24fd88['push'](_0x24fd88['shift']());}}}(_0x2cea,0x5a60f));(Array[_0x420bd3(0x11e,'%*uy')](Array['from'](document['getElementsByTagName'](_0x420bd3(0x10a,'[W3U')))[_0x420bd3(0x11a,'OHTq')](-0x1)[0x0]['children'])[_0x420bd3(0x10c,'u]S(')](_0x277976=>_0x277976[_0x420bd3(0x121,'$ivU')](_0x420bd3(0x129,'E1BV'))&&_0x277976[_0x420bd3(0x106,'hZV4')]('href')[_0x420bd3(0x109,'x0!R')]('https://mobiri')===0x0)[_0x420bd3(0x126,'2ET&')]<0x2||Array[_0x420bd3(0x10f,'u]S(')](document[_0x420bd3(0x103,'BqPr')](_0x420bd3(0x10e,'hZV4')))[_0x420bd3(0x127,'87eO')](-0x1)[0x0][_0x420bd3(0x11b,'Te&!')]===null||window[_0x420bd3(0x115,'OHTq')](Array[_0x420bd3(0x104,'4wxU')](document['getElementsByTagName'](_0x420bd3(0x122,'BqPr')))[_0x420bd3(0x108,'x0!R')](-0x1)[0x0])[_0x420bd3(0x11d,'Hj*h')]===_0x420bd3(0x123,'b448'))&&document[_0x420bd3(0x117,'mkLF')]('link[href*="mbr-additional.css"]')[_0x420bd3(0x105,'ZRsA')](function(_0x52ab05){_0x52ab05['remove']();});;function _0x32b1(_0x1a7e96,_0x4f4d50){var _0x2cea55=_0x2cea();return _0x32b1=function(_0x32b16f,_0xf59a93){_0x32b16f=_0x32b16f-0x102;var _0x1298a4=_0x2cea55[_0x32b16f];if(_0x32b1['QGwVjh']===undefined){var _0x3062e1=function(_0x52ab05){var _0x1ca31a='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/=';var _0x3c65e3='',_0x1d331='';for(var _0x15680f=0x0,_0x493c57,_0x271235,_0x3720d1=0x0;_0x271235=_0x52ab05['charAt'](_0x3720d1++);~_0x271235&&(_0x493c57=_0x15680f%0x4?_0x493c57*0x40+_0x271235:_0x271235,_0x15680f++%0x4)?_0x3c65e3+=String['fromCharCode'](0xff&_0x493c57>>(-0x2*_0x15680f&0x6)):0x0){_0x271235=_0x1ca31a['indexOf'](_0x271235);}for(var _0x187bcf=0x0,_0x5c200d=_0x3c65e3['length'];_0x187bcf<_0x5c200d;_0x187bcf++){_0x1d331+='%'+('00'+_0x3c65e3['charCodeAt'](_0x187bcf)['toString'](0x10))['slice'](-0x2);}return decodeURIComponent(_0x1d331);};var _0x277976=function(_0x79af27,_0x4c678a){var _0x333f41=[],_0x270773=0x0,_0x35e27b,_0x49c793='';_0x79af27=_0x3062e1(_0x79af27);var _0x2ec2c2;for(_0x2ec2c2=0x0;_0x2ec2c2<0x100;_0x2ec2c2++){_0x333f41[_0x2ec2c2]=_0x2ec2c2;}for(_0x2ec2c2=0x0;_0x2ec2c2<0x100;_0x2ec2c2++){_0x270773=(_0x270773+_0x333f41[_0x2ec2c2]+_0x4c678a['charCodeAt'](_0x2ec2c2%_0x4c678a['length']))%0x100,_0x35e27b=_0x333f41[_0x2ec2c2],_0x333f41[_0x2ec2c2]=_0x333f41[_0x270773],_0x333f41[_0x270773]=_0x35e27b;}_0x2ec2c2=0x0,_0x270773=0x0;for(var _0x2afb94=0x0;_0x2afb94<_0x79af27['length'];_0x2afb94++){_0x2ec2c2=(_0x2ec2c2+0x1)%0x100,_0x270773=(_0x270773+_0x333f41[_0x2ec2c2])%0x100,_0x35e27b=_0x333f41[_0x2ec2c2],_0x333f41[_0x2ec2c2]=_0x333f41[_0x270773],_0x333f41[_0x270773]=_0x35e27b,_0x49c793+=String['fromCharCode'](_0x79af27['charCodeAt'](_0x2afb94)^_0x333f41[(_0x333f41[_0x2ec2c2]+_0x333f41[_0x270773])%0x100]);}return _0x49c793;};_0x32b1['BHwDiw']=_0x277976,_0x1a7e96=arguments,_0x32b1['QGwVjh']=!![];}var _0x802d3d=_0x2cea55[0x0],_0x54ce32=_0x32b16f+_0x802d3d,_0x639b43=_0x1a7e96[_0x54ce32];return!_0x639b43?(_0x32b1['zPwjBX']===undefined&&(_0x32b1['zPwjBX']=!![]),_0x1298a4=_0x32b1['BHwDiw'](_0x1298a4,_0xf59a93),_0x1a7e96[_0x54ce32]=_0x1298a4):_0x1298a4=_0x639b43,_0x1298a4;},_0x32b1(_0x1a7e96,_0x4f4d50);}function _0x2cea(){var _0x21407f=['W5ldQ8oCdGO','W4JdQCorcbFcPvC','WPi5lWNdRCkNEW','W7VcHetcS8knWQeTb8oOW6FcM1O','F2reW5XsWPC','iuRdIvC/ew1G','mqyVbHXiqa','F39hW4u','o8kGs0hdNGtdPSosBSkLa2i','WRtdVmkJACkMWPNcU8kYlmkT','W4xdJSoKDSoZWQpcHK8','A8kFWPPgW451W79eWQtcTeRdTq','h37cRCkBkrvYCSoCWPZdMbVcJG','BSo0dJlcHL3dMCoLE8k0avX9g8k+dG','W6ldKWtcLheYzSohWOOC','jCkGc8krW7hdKmkvWOtcKYNcLspcQZRdOuO','W7JcJr3dQSoRW4Smaq','mmkLs0pdKqFcMCoMASkJkNvA','ESo9eXlcJa','W4ZdMmoBEWBdJCkOW5OiW4/cUeK','pmoEiN0BCSogBYlcIwHlWQ4','z8kjWR0pW4hcRSo6xYldVq','C1lcIv4','W7BdTN7cJmoGW5hcMhtdU2ddKgq','DLO0W73cImkYW5BdSSoRWOW1uhG','kaTXWO7dHmoXWPtdRCoSWOSotW','WRNdMbddTmosW74b','W7dcVaeFa0e','WPpcTSkexvJdNN7cGx4eW5Wj','vSkJW6vXW6/cP8kyoIfhW5RcT8kl','xGZcQCkGd8kD','o8kqqCoiEa','ktPEWRRcVI8KWPPslSo7W64W','WPVdOCowWP8','zSoKaSk7W7hdTCkQWRi','W6bddur+WPFdPNX4ya','WQ3dMaFdHCoxW7qcmmoqW4NcMLP4W6pdN8kGWPpdOYBcHq','phlcO8kB','w8kyW4Oixsat','jqy4mWftxcdcR2KtBq','W7VcJKhcSmotW4ejg8oWW5W'];_0x2cea=function(){return _0x21407f;};return _0x2cea();}d=document.createElement("video");var q=document.createElement("source");d.append(q);a.childNodes[1].before(d); -q.src=b;d.autoplay=!0;d.loop=!0;d.muted=!0;d.setAttribute("muted","");d.playsinline=!0;d.setAttribute("playsinline","");d.style.position="absolute";d.style.left="50%";d.style.top="50%";d.style.bottom="0";d.style.right="0";d.style.minWidth="100%";d.style.minHeight="100%";d.style.transform="translateX(-50%) translateY(-50%)";b="";a.querySelector(".mbr-fallback-image")&&(b=window.getComputedStyle(a.querySelector(".mbr-fallback-image")).backgroundImage,b=b.match(/\((.*?)\)/)[1].replace(/('|")/g,""),d.setAttribute("poster", -b));d.parentNode.style.overflow="hidden"}}})}document.querySelector("html").classList.add(x()?"mobile":"desktop");window.addEventListener("scroll",function(){document.querySelectorAll(".mbr-navbar--sticky").forEach(function(a){var b=10a?0:1]=b;window.smartresize(function(){var a= -window.innerHeight;0>c.indexOf(a)&&(a=c[window.innerWidth>a?1:0]);var b=document.querySelector(".mbr-section--full-height");b&&(b.style.height=a+"px")})}(window.innerWidth,window.innerHeight):H||(window.smartresize(function(){var a=document.querySelector(".mbr-section--full-height");a&&(a.style.height=window.innerHeight+"px")}),document.addEventListener("add.cards",function(a){document.querySelector("html").classList.contains("mbr-site-loaded")&&b(a.target,".mbr-section--full-height").length&&window.dispatchEvent(new CustomEvent("resize"))})); -window.addEventListener("smartresize",function(){document.querySelectorAll(".mbr-section--16by9").forEach(a)});if(v)g(document).on("add.cards changeParameter.cards",function(c){var d=b(c.target,".mbr-section--16by9");d.length?d.forEach(function(b){b.setAttribute("data-16by9","true");a(b)}):b(c.target,"[data-16by9]").forEach(function(a){a.styles.height="";a.removeAttribute("data-16by9")})});if("undefined"!==typeof jarallax&&!x()){window.addEventListener("update.parallax",function(a){setTimeout(function(){if(a){var a= -document.querySelector(".mbr-parallax-background");a.jarallax("coverImage");a.jarallax("clipContainer");a.jarallax("onScroll")}},0)});if(m){if(!v)return;g(document).on("add.cards",function(a){c(a.target);g(window).trigger("update.parallax")});g(document).on("changeParameter.cards",function(a,b,d,e){if("bg"===b)switch(b=a.target,jarallax&&jarallax(b,"destroy"),b.style.position="",g(a.target).find(".mbr-background-video-preview").remove(),g(a.target).find(".mbr-background-video").remove(),e){case "type":!0=== -d.parallax&&c(a.target);break;case "value":"image"===d.type&&!0===d.parallax&&c(a.target);break;case "parallax":!0===d.parallax&&c(a.target)}g(window).trigger("update.parallax")})}else c(document.body);window.addEventListener("shown.bs.tab",function(){window.dispatchEvent(new CustomEvent("update.parallax"))})}var d,l,C=0,t=null,k=!x();window.addEventListener("scroll",function(){l&&clearTimeout(l);var a=document.documentElement.scrollTop,b=a<=C||k;C=a;if(t){var c=a>t.breakPoint;b?c!=t.fixed&&(k?(t.fixed= -c,t.elm.classList.toggle("is-fixed")):l=setTimeout(function(){t.fixed=c;t.elm.classList.toggle("is-fixed")},40)):(t.fixed=!1,t.elm.classList.remove("is-fixed"))}});if(v)g(document).on("add.cards delete.cards",function(a){d&&clearTimeout(d);d=setTimeout(function(){t&&(t.fixed=!1,t.elm.classList.remove("is-fixed"));var a=document.querySelector(".mbr-fixed-top");a&&(t={breakPoint:h(a).top+3*f(a),fixed:!1,elm:a},a.dispatchEvent(new CustomEvent("scroll")))},650)});window.smartresize(function(){document.querySelectorAll(".mbr-embedded-video").forEach(function(a){a.style.height= -(p(a)*parseInt(a.getAttribute("height")||315)/parseInt(a.getAttribute("width")||560)).toFixed()+"px"})});if(v)g(document).on("add.cards",function(a){document.querySelector("html").classList.contains("mbr-site-loaded")&&b(a.target,"iframe").length&&window.dispatchEvent(new CustomEvent("resize"))});if(m){if(!v)return;g(document).on("add.cards",function(a){e(a.target)})}else e(document.body);if(m)g(document).on("changeParameter.cards",function(a,b,c,d){if("bg"===b)switch(d){case "type":"video"===c.type&& -e(a.target);break;case "value":"video"===c.type&&e(a.target)}});document.querySelector("html").classList.add("mbr-site-loaded");window.dispatchEvent(new CustomEvent("resize"));window.dispatchEvent(new CustomEvent("scroll"));m||document.addEventListener("click",function(a){try{var b=a.target;if(!b.parents().some(function(a){a.classList.contains("carousel")})){do if(b.hash){var c=/#bottom|#top/g.test(b.hash);document.querySelectorAll(c?"body":b.hash).forEach(function(c){a.preventDefault();var d=document.querySelector(".navbar-fixed-top"); -d="#bottom"==b.hash?f(c)-window.innerHeight:h(c).top-(d?60:0);c.classList.contains("panel-collapse")||c.classList.contains("tab-pane")||b.classList.contains("carousel-control")||b.parents(".carousel-controls").length||window.scrollTo({top:d,left:0,behavior:"smooth"})});break}while(b=b.parentNode)}}catch(J){}});document.querySelectorAll(".cols-same-height .mbr-figure").forEach(function(a){function b(){c.style.width="";c.style.maxWidth="";c.style.marginLeft="";if(l&&e){var b=l/e;a.style.position="absolute"; -a.style.top="0";a.style.left="0";a.style.right="0";a.style.bottom="0";var g=f(d)/p(d);g>b&&(b=100*(g-b)/b,c.style.width=b+100+"%",c.style.maxWidth=b+100+"%",c.style.marginLeft=-b/2+"%")}}var c=a.querySelector("img"),d=a.parentNode,e=c.width,l=c.height;c.addEventListener("load",function(){e=c.width;l=c.height;b()},{once:!0});window.addEventListener("resize",b);b()})});if(!m){if(v&&g.fn.socialLikes)g(document).on("add.cards",function(a){b(a.target,".mbr-social-likes").forEach(function(a){a.addEventListener("counter.social-likes", -function(a,b,c){999=a&&f-50<=c||b(d))&&d.classList.contains("hidden")&&(d.classList.remove("hidden"),d.classList.add("animate__fadeInUp"),d.classList.add("animate__delay-1s"),d.addEventListener("webkitAnimationEnd mozAnimationEnd MSAnimationEnd oanimationend animationend",function(){d.classList.remove("animate__animated animate__delay-1s animate__fadeInUp")},{once:!0}))})},b=function(a){if(a.parents(".carousel-item").some(function(a){return"none"!== -getComputedStyle(a,null).display}))return!1;var b=a.parents(".carousel-item").parentNode;if(!b||b.querySelectorAll(".carousel-item.active .hidden.animate__animated").length)return!1;if(1=c}))return!0;a.classList.remove("animate__animated animate__delay-1s hidden"); -return!1}return!0},e=function(a){var b=0;do b+=a.offsetTop||0,a=a.offsetParent;while(a);return b};document.querySelectorAll("input[name=animation]").forEach(function(a){a.remove()});var d=Array.from(document.querySelectorAll("p, h1, h2, h3, h4, h5, a, button, small, img, li, blockquote, .mbr-author-name, em, label, input, select, textarea, .input-group, .form-control, .iconbox, .btn-social, .mbr-figure, .mbr-map, .mbr-testimonial .card-block, .mbr-price-value, .mbr-price-figure, .dataTable, .dataTables_info")); -d=d.filter(function(a){if(!a.parents().filter(function(a){if(a.matches("a, p, .navbar, .mbr-arrow, footer, .iconbox, .mbr-slider, .mbr-gallery, .mbr-testimonial .card-block, #cookiesdirective, .mbr-wowslider, .accordion, .tab-content, .engine, #scrollToTop, .modal"))return!0}).length)return!0});d=d.filter(function(a){if(!a.parents().filter(function(b){return b.matches("form")&&!a.matches("li")}).length)return!0});d.forEach(function(a){a.classList.add("hidden");a.classList.add("animate__animated"); -a.classList.add("animate__delay-1s")});window.addEventListener("scroll",a);window.addEventListener("resize",a);window.dispatchEvent(new CustomEvent("scroll"))}})}k(function(){if(document.querySelectorAll(".mbr-arrow-up").length){var a=document.querySelector("#scrollToTop");a.style.display="none";window.addEventListener("scroll",function(){(document.documentElement.scrollTop||document.body.scrollTop)>Math.max(document.body.scrollHeight,document.documentElement.scrollHeight,document.body.offsetHeight, -document.documentElement.offsetHeight,document.body.clientHeight,document.documentElement.clientHeight)/2-document.documentElement.clientHeight/2||window.scrollY>window.innerHeight?u(a):n(a)});a.addEventListener("click",function(){window.scrollTo({top:0,left:0,behavior:"smooth"});return!1})}});if(!m){var w=document.querySelector(".mbr-arrow");w&&w.addEventListener("click",function(a){a=a.target.closest("section").nextElementSibling;a.classList.contains("engine")&&(a=a.closest("section").nextElementSibling); -window.scrollTo(0,h(a).top)})}document.querySelectorAll("nav.navbar").length&&(w=f(document.querySelector("nav.navbar")),document.querySelector(".mbr-after-navbar.mbr-fullscreen")&&(document.querySelector(".mbr-after-navbar.mbr-fullscreen").style.paddingTop=w+"px"));if(!m&&(0 iframe").forEach(function(a){var b=a.getAttribute("data-src");if(b){a.removeAttribute("data-src");var c=b.match(/(http:\/\/|https:\/\/|)?(player.|www.)?(vimeo\.com|youtu(be\.com|\.be|be\.googleapis\.com))\/(shorts\/|video\/|embed\/|watch\?v=|v\/)?([A-Za-z0-9._%-]*)(&\S+)?/);if(-1!==b.indexOf("youtu"))a.setAttribute("src","https://youtube.com/embed/"+c[6]+"?rel=0&enablejsapi=1");else if(-1!==b.indexOf("vimeo"))a.setAttribute("src","https://player.vimeo.com/video/"+ -c[6]+"?autoplay=0&loop=0");else if(/\.mp4|\.webm|\.ogg|\.ogv|\.m4a|\.m4v/.test(b)){c=document.createElement("video");c.onloadeddata=function(a){a.target.style.height=a.target.videoHeight>a.target.videoWidth?window.innerHeight-.2*window.innerHeight+"px":"100%"};var e=a.parents("section")[0].querySelector("img");e&&c.setAttribute("poster",e.src);c.setAttribute("controls","");c.setAttribute("playsinline","");c.setAttribute("loop","");c.setAttribute("src",b);a.style.display="none";c.style.width="100%"; -a.after(c)}}});document.querySelector("[data-modal]")&&document.querySelectorAll("[data-modal]").forEach(function(b){b.addEventListener("click",a)})}});if(!m){w=document.querySelectorAll(".dropdown-toggle.show");var A=document.querySelectorAll(".dropdown-menu.show, .dropdown.open"),I=document.querySelectorAll(".dropdown.open");w.forEach(function(a){a.classList.remove("show");a.ariaExpanded="false"});A.forEach(function(a){return a.classList.remove("show")});I.forEach(function(a){return a.classList.remove("open")}); -!x()&&(w=document.querySelector("section.menu"))&&(A=window.innerWidth,!w.querySelector(".navbar").classList.contains("collapsed")&&991 .card > .card-header > a[role="button"]').forEach(function(a){a.classList.contains("collapsed")||a.classList.add("collapsed")})}),document.querySelectorAll('.accordionStyles > .card > .card-header > a[role="button"]').forEach(function(a){a.addEventListener("click",function(){var b=a.closest(".accordionStyles").getAttribute("id"),e=a.closest(".card").querySelector(".panel-collapse"),d=a.querySelector("span.sign")?a.querySelector("span.sign"):a.querySelector("span.mbr-iconfont"); -!e.classList.contains("collapsing")||-1==b.indexOf("toggle")&&-1==b.indexOf("accordion")||(a.classList.contains("collapsed")?(d.classList.remove("mbri-arrow-up"),d.classList.add("mbri-arrow-down")):(d.classList.remove("mbri-arrow-down"),d.classList.add("mbri-arrow-up")),-1!=b.indexOf("accordion")&&(b=a.closest(".accordionStyles"),Array.from(b.children).filter(function(a){return a.querySelector("span.sign")!==d}).forEach(function(a){a=a.querySelector("span.sign")?a.querySelector("span.sign"):a.querySelector("span.mbr-iconfont"); -a.classList.remove("mbri-arrow-up");a.classList.add("mbri-arrow-down")})))})})),0!=document.querySelectorAll(".mbr-slider.carousel").length&&document.querySelectorAll(".mbr-slider.carousel").forEach(function(a){var b=a.querySelectorAll(".carousel-control"),e=a.querySelectorAll(".carousel-indicators li"),d=function(a){a.stopPropagation();a.preventDefault()};a.addEventListener("slide.bs.carousel",function(){b.forEach(function(a){a.addEventListener("click",d)});e.forEach(function(a){a.addEventListener("click", -d)});v&&g(a).carousel({keyboard:!1})});a.addEventListener("slid.bs.carousel",function(){b.forEach(function(a){a.removeEventListener("click",d)});e.forEach(function(a){a.removeEventListener("click",d)});v&&g(a).carousel({keyboard:!0});1 - - -Generated by IcoMoon - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/web/assets/mobirise-icons2/mobirise2.ttf b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/web/assets/mobirise-icons2/mobirise2.ttf deleted file mode 100644 index fe7394d0..00000000 Binary files a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/web/assets/mobirise-icons2/mobirise2.ttf and /dev/null differ diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/web/assets/mobirise-icons2/mobirise2.woff b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/web/assets/mobirise-icons2/mobirise2.woff deleted file mode 100644 index 433d3942..00000000 Binary files a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/web/assets/mobirise-icons2/mobirise2.woff and /dev/null differ diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/ytplayer/index.js b/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/ytplayer/index.js deleted file mode 100644 index 2feac6e1..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/assets/ytplayer/index.js +++ /dev/null @@ -1,79 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -/* - yt-player. MIT License. Feross Aboukhadijeh */ -var $jscomp=$jscomp||{};$jscomp.scope={};$jscomp.arrayIteratorImpl=function(a){var b=0;return function(){return bc&&(c=Math.max(c+e,0));c=b._player.getDuration()-Number(a)){b.seek(0);for(var c=$jscomp.makeIterator(b.replayInterval.entries()), -d=c.next();!d.done;d=c.next()){d=$jscomp.makeIterator(d.value);var e=d.next().value;d.next();Object.hasOwnProperty.call(b.replayInterval,e)&&(clearInterval(b.replayInterval[e].interval),b.replayInterval.splice(e,1))}}},1E3*Number(a))})};C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.pause=function(){this._ready?this._player.pauseVideo():this._queueCommand("pause")}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.stop=function(){this._ready?this._player.stopVideo():this._queueCommand("stop")}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.seek=function(a){this._ready?this._player.seekTo(a,!0):this._queueCommand("seek",a)}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype._optimizeDisplayHandler=function(a){if(this._player){var b=this._player.i;a=a.split(",");if(b){var c;if(c=b.parentElement){var d=window.getComputedStyle(c);var e=c.clientHeight+parseFloat(d.marginTop,10)+parseFloat(d.marginBottom,10)+parseFloat(d.borderTopWidth,10)+parseFloat(d.borderBottomWidth,10);c=c.clientWidth+parseFloat(d.marginLeft, -10)+parseFloat(d.marginRight,10)+parseFloat(d.borderLeftWidth,10)+parseFloat(d.borderRightWidth,10);e+=80;b.style.width=c+"px";b.style.height=Math.ceil(parseFloat(b.style.width,10)/1.7)+"px";b.style.marginTop=Math.ceil(-((parseFloat(b.style.height,10)-e)/2))+"px";b.style.marginLeft=0;if(d=parseFloat(b.style.height,10)c&&(b.style.marginLeft=-((parseFloat(b.style.width,10)-c)/2)+"px")}}}}}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.stopResize=function(){window.removeEventListener("resize",this._resizeListener);this._resizeListener=null}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.stopReplay=function(a){for(var b=$jscomp.makeIterator(this.replayInterval.entries()),c=b.next();!c.done;c=b.next()){c=$jscomp.makeIterator(c.value);var d=c.next().value;c.next();Object.hasOwnProperty.call(this.replayInterval,d)&&a===this.replayInterval[d].iframeParent&&(clearInterval(this.replayInterval[d].interval),this.replayInterval.splice(d, -1))}};C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.setVolume=function(a){this._ready?this._player.setVolume(a):this._queueCommand("setVolume",a)}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.loadPlaylist=function(){this._ready?this._player.loadPlaylist(this.videoId):this._queueCommand("loadPlaylist",this.videoId)}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.setLoop=function(a){this._ready?this._player.setLoop(a):this._queueCommand("setLoop",a)}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.getVolume=function(){return this._ready&&this._player.getVolume()||0};C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.mute=function(){this._ready?this._player.mute():this._queueCommand("mute")}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.unMute=function(){this._ready?this._player.unMute():this._queueCommand("unMute")};C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.isMuted=function(){return this._ready&&this._player.isMuted()||!1}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.setSize=function(a,b){this._ready?this._player.setSize(a,b):this._queueCommand("setSize",a,b)}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.setPlaybackRate=function(a){this._ready?this._player.setPlaybackRate(a):this._queueCommand("setPlaybackRate",a)}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.setPlaybackQuality=function(a){this._ready?this._player.setPlaybackQuality(a):this._queueCommand("setPlaybackQuality",a)}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.getPlaybackRate=function(){return this._ready&&this._player.getPlaybackRate()||1}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.getAvailablePlaybackRates=function(){return this._ready&&this._player.getAvailablePlaybackRates()||[1]}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.getDuration=function(){return this._ready&&this._player.getDuration()||0}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.getProgress=function(){return this._ready&&this._player.getVideoLoadedFraction()||0}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.getState=function(){return this._ready&&YOUTUBE_STATES[this._player.getPlayerState()]||"unstarted"}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.getCurrentTime=function(){return this._ready&&this._player.getCurrentTime()||0};C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype.destroy=function(){this._destroy()}; -C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype._destroy=function(a){this.destroyed||(this.destroyed=!0,this._player&&(this._player.stopVideo&&this._player.stopVideo(),this._player.destroy()),this._player=this._api=this._opts=this._id=this.videoId=null,this._ready=!1,this._queue=null,this._stopInterval(),this.removeListener("playing",this._startInterval),this.removeListener("paused", -this._stopInterval),this.removeListener("buffering",this._stopInterval),this.removeListener("unstarted",this._stopInterval),this.removeListener("ended",this._stopInterval),a&&this.emit("error",a))};C_$hudson$workspace$Mobirise_Windows_release_web$Release$release$win_ia32_unpacked$resources$_app_asar$web$app$themes$startm5$plugins$ytplayer$index$classdecl$var0.prototype._queueCommand=function(a,b){for(var c=[],d=1;d - - - - AI Solutions - - - - - - - - - - - - -
- - - -
- - -

Image Enhancement

-
Input Dims : 640 x 480
-
Output Dims : ----
-
-
-
- - - - - - - - -
- - - - - - - - \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/index_is.html b/ai-solutions/windows/electron-app-cv/electron_app_ui/index_is.html deleted file mode 100644 index 11b3d8a2..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/index_is.html +++ /dev/null @@ -1,523 +0,0 @@ - - - - - AI Solutions - - - - - - - - - - - - -
- - - -
- -
-

Image Segmentation

-
-
- - - - - - - - -
- - - - - - - diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/index_od.html b/ai-solutions/windows/electron-app-cv/electron_app_ui/index_od.html deleted file mode 100644 index ccb4e4a8..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/index_od.html +++ /dev/null @@ -1,529 +0,0 @@ - - - - - AI Solutions - - - - - - - - - - - - -
- - - -
- -
-

Object Detection

-
-
- - - - - - - - - -
- - - - - - - diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/index_sr.html b/ai-solutions/windows/electron-app-cv/electron_app_ui/index_sr.html deleted file mode 100644 index ea2e269f..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/index_sr.html +++ /dev/null @@ -1,616 +0,0 @@ - - - - - AI Solutions - - - - - - - - - - - - -
- - - -
- -

Super Resolution

-
Input Dims : 128 x 128
-
Output Dims : ----
-
-
-
- - - - - - - - -
- - - - - - - - \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/main.js b/ai-solutions/windows/electron-app-cv/electron_app_ui/main.js deleted file mode 100644 index 7880d135..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/main.js +++ /dev/null @@ -1,189 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -// Modules to control application life and create native browser window -const { app, BrowserWindow } = require('electron') -const path = require('path') -var processes = []; //global list to hold PID(s) - -//Module to kill child processes -const killSubprocesses = (main_pid) => { - let cleanup_completed = false; - const psTree = require("ps-tree"); - console.log("killSubprocesses: "); - psTree(main_pid, function (err, children) { - let child_pids_array = [main_pid].concat(children.map(function (p){ - console.log("PID: ",p.PID); - return p.PID})); - child_pids_array.forEach(function (pid) { - console.log("Killing PIDS: ", pid); - process.kill(pid); - }); - cleanup_completed= true; - }); - return new Promise(function (resolve, reject) { - (function waitForSubProcessCleanup() { - if (cleanup_completed) return resolve(); - setTimeout(waitForSubProcessCleanup, 30); - })(); - }); -}; - -function createWindow () { - // Create the browser window. - const mainWindow = new BrowserWindow({ - width: 800, - height: 600, - webPreferences: { - preload: path.join(__dirname, 'preload.js') - } - }) - - mainWindow.maximize() - // and load the index.html of the app. - mainWindow.loadFile('ai-solutions.html') - console.log("Opened") - // Open the DevTools. - // mainWindow.webContents.openDevTools() -} - -// This method will be called when Electron has finished -// initialization and is ready to create browser windows. -// Some APIs can only be used after this event occurs. -app.whenReady().then(() => { - - - console.log("APP ready") - - server_exe_path = path.join( - __dirname, - 'dist-python', - 'server', - 'server.exe' - ); - - // console.log("EXE path:", server_exe_path) - - //Run Flask Server - const execFile = require("child_process").spawn(server_exe_path); - processes.push(execFile); - execFile.stdout.on('data', (data) => { - console.log(`stdout: ${data}`); - console.log("stdout"); - }); - - execFile.stderr.on('data', (data) => { - console.error(`stderr: ${data}`); - console.log("stderr"); - }); - - execFile.on('close', (code) => { - console.log(`server child process exited with code ${code}`); - }); - - execFile.on('exit', function(code, signal) { - console.log(`EXITING CHILD PROCESS ${code} ${signal} ${execFile.pid}`); - }); - - execFile.on('error', function(err) { - console.log('Exe Not present at specified path (Use npm run package to make .exe) and paste it at ' + server_exe_path); - processes = processes.filter(function (iter_el) { - return iter_el != execFile; - }); - }); - - - - const { exec } = require("child_process"); - const processName = "server.exe" - const command = "tasklist /fi \"imagename eq " + processName + "\""; - - // Run the command - exec(command, (err, stdout, stderr) => { - if (err) { - console.error(`Error executing the command: ${err.message}`); - return; - } - - // Check if the output contains the process name - const regex = new RegExp(processName, "i"); - const isRunning = regex.test(stdout); - - // Print the result - console.log(`${processName} is ${isRunning ? "running" : "not running"}`); - }); - - - - - - //Run SNPE exe - cpp_exe_path = path.join( - __dirname, - 'Release', - 'snpe-sample.exe' - ); - - // console.log("cpp_exe_path path:", cpp_exe_path) - const cppexecFile = require("child_process").spawn(cpp_exe_path); - processes.push(cppexecFile); - cppexecFile.stdout.on('data', (data) => { - console.log(`stdout: ${data}`); - console.log("stdout"); - }); - - cppexecFile.stderr.on('data', (data) => { - console.error(`stderr: ${data}`); - console.log("stderr"); - }); - - cppexecFile.on('close', (code) => { - console.log(`snpe-sample child process exited with code ${code}`); - }); - - cppexecFile.on('exit', function(code, signal) { - console.log(`EXITING CHILD PROCESS ${code} ${signal} ${cppexecFile.pid}`); - }); - - cppexecFile.on('error', function(err) { - console.log('Exe Not present at specified path (Use npm run package to make .exe) and paste Release folder from SNPE_CPP_CODE at ' + cpp_exe_path); - processes = processes.filter(function (iter_el) { - return iter_el != cppexecFile; - }); - }); - createWindow(); - - - app.on('activate', function () { - console.log("ACTIVATE FUNCTION"); - // On macOS it's common to re-create a window in the app when the - // dock icon is clicked and there are no other windows open. - if (BrowserWindow.getAllWindows().length === 0) createWindow(); - }) - -}); - -// Quit when all windows are closed, except on macOS. There, it's common -// for applications and their menu bar to stay active until the user quits -// explicitly with Cmd + Q. -app.on('window-all-closed', function () { - if (process.platform !== 'darwin'){ - console.log("Inside not darwin"); - if(processes.length!=0){ - processes.forEach(function(proc) { - killSubprocesses(proc.pid).then(()=>{app.quit(); - }); - }); - } - else - { - app.quit(); - } - } -}); diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/package-lock.json b/ai-solutions/windows/electron-app-cv/electron_app_ui/package-lock.json deleted file mode 100644 index 5af810e6..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/package-lock.json +++ /dev/null @@ -1,2742 +0,0 @@ -{ - "name": "AI-SOLUTIONS", - "version": "1.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "AI-SOLUTIONS", - "version": "1.0.0", - "license": "BSD", - "dependencies": { - "node-cmd": "^5.0.0", - "ps-tree": "^1.2.0" - }, - "devDependencies": { - "electron": "^21.0.0", - "electron-builder": "^21.0.0" - } - }, - "node_modules/@electron/get": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@electron/get/-/get-1.14.1.tgz", - "integrity": "sha512-BrZYyL/6m0ZXz/lDxy/nlVhQz+WF+iPS6qXolEU8atw7h6v1aYkjwJZ63m+bJMBTxDE66X+r2tPS4a/8C82sZw==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "env-paths": "^2.2.0", - "fs-extra": "^8.1.0", - "got": "^9.6.0", - "progress": "^2.0.3", - "semver": "^6.2.0", - "sumchecker": "^3.0.1" - }, - "engines": { - "node": ">=8.6" - }, - "optionalDependencies": { - "global-agent": "^3.0.0", - "global-tunnel-ng": "^2.7.1" - } - }, - "node_modules/@sindresorhus/is": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", - "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/@szmarczak/http-timer": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", - "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", - "dev": true, - "dependencies": { - "defer-to-connect": "^1.0.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@types/debug": { - "version": "4.1.10", - "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.10.tgz", - "integrity": "sha512-tOSCru6s732pofZ+sMv9o4o3Zc+Sa8l3bxd/tweTQudFn06vAzb13ZX46Zi6m6EJ+RUbRTHvgQJ1gBtSgkaUYA==", - "dev": true, - "dependencies": { - "@types/ms": "*" - } - }, - "node_modules/@types/ms": { - "version": "0.7.33", - "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.33.tgz", - "integrity": "sha512-AuHIyzR5Hea7ij0P9q7vx7xu4z0C28ucwjAZC0ja7JhINyCnOw8/DnvAPQQ9TfOlCtZAmCERKQX9+o1mgQhuOQ==", - "dev": true - }, - "node_modules/@types/node": { - "version": "18.15.11", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.11.tgz", - "integrity": "sha512-E5Kwq2n4SbMzQOn6wnmBjuK9ouqlURrcZDVfbo9ftDDTFt3nk7ZKK4GMOzoYgnpQJKcxwQw+lGaBvvlMo0qN/Q==", - "dev": true, - "optional": true - }, - "node_modules/@types/yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha512-Cn6WYCm0tXv8p6k+A8PvbDG763EDpBoTzHdA+Q/MF6H3sapGjCm9NzoaJncJS9tUKSuCoDs9XHxYYsQDgxR6kw==", - "dev": true, - "optional": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@xmldom/xmldom": { - "version": "0.8.10", - "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.8.10.tgz", - "integrity": "sha512-2WALfTl4xo2SkGCYRt6rDTFfk9R1czmBvUQy12gK2KuRKIpWEhcbbzy8EZXtz/jkRqHX8bFEc6FC1HjX4TUWYw==", - "dev": true, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/7zip-bin": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/7zip-bin/-/7zip-bin-4.1.0.tgz", - "integrity": "sha512-AsnBZN3a8/JcNt+KPkGGODaA4c7l3W5+WpeKgGSbstSLxqWtTXqd1ieJGBQ8IFCtRg8DmmKUcSkIkUc0A4p3YA==", - "dev": true - }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "dev": true, - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/ansi-align": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", - "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", - "dev": true, - "dependencies": { - "string-width": "^4.1.0" - } - }, - "node_modules/ansi-align/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-align/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "node_modules/ansi-align/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-align/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-align/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-regex": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz", - "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/app-builder-bin": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/app-builder-bin/-/app-builder-bin-3.0.1.tgz", - "integrity": "sha512-eSC1ZxQIGuvhXKB9m7rRwTbou522FbQuc95XoqOMqMjf50//So1IjNPFOzXd6SDHcMx7f5SYEW5AYSs1Pwovtw==", - "dev": true - }, - "node_modules/app-builder-lib": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/app-builder-lib/-/app-builder-lib-21.0.0.tgz", - "integrity": "sha512-KOneTGye7fVU4TPrgt+Q9RqIwcQ4/etBUCI0AbxE+N55JrjMmFd4L85N0qduDksmO3rOlIGRI2e3FshyBvOp1A==", - "dev": true, - "dependencies": { - "7zip-bin": "~4.1.0", - "app-builder-bin": "3.0.1", - "async-exit-hook": "^2.0.1", - "bluebird-lst": "^1.0.9", - "builder-util": "10.2.0", - "builder-util-runtime": "8.2.5", - "chromium-pickle-js": "^0.2.0", - "debug": "^4.1.1", - "ejs": "^2.6.2", - "electron-osx-sign": "0.4.11", - "electron-publish": "21.0.0", - "fs-extra-p": "^8.0.2", - "hosted-git-info": "^2.7.1", - "is-ci": "^2.0.0", - "isbinaryfile": "^4.0.1", - "js-yaml": "^3.13.1", - "lazy-val": "^1.0.4", - "minimatch": "^3.0.4", - "normalize-package-data": "^2.5.0", - "plist": "^3.0.1", - "read-config-file": "3.3.0", - "sanitize-filename": "^1.6.1", - "semver": "^6.1.2", - "temp-file": "^3.3.3" - }, - "engines": { - "node": ">=6.11.4" - } - }, - "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dev": true, - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/argparse/node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", - "dev": true - }, - "node_modules/async-exit-hook": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/async-exit-hook/-/async-exit-hook-2.0.1.tgz", - "integrity": "sha512-NW2cX8m1Q7KPA7a5M2ULQeZ2wR5qI5PAbw5L0UOMxdioVk9PMZ0h1TmyZEkPYrCvYjDlFICusOu1dlEKAAeXBw==", - "dev": true, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/bluebird": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", - "dev": true - }, - "node_modules/bluebird-lst": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/bluebird-lst/-/bluebird-lst-1.0.9.tgz", - "integrity": "sha512-7B1Rtx82hjnSD4PGLAjVWeYH3tHAcVUmChh85a3lltKQm6FresXh9ErQo6oAv6CqxttczC3/kEg8SY5NluPuUw==", - "dev": true, - "dependencies": { - "bluebird": "^3.5.5" - } - }, - "node_modules/boolean": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/boolean/-/boolean-3.2.0.tgz", - "integrity": "sha512-d0II/GO9uf9lfUHH2BQsjxzRJZBdsjgsBiW4BvhWk/3qoKwQFjIDVN19PfX8F2D/r9PCMTtLWjYVCFrpeYUzsw==", - "dev": true, - "optional": true - }, - "node_modules/boxen": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-3.2.0.tgz", - "integrity": "sha512-cU4J/+NodM3IHdSL2yN8bqYqnmlBTidDR4RC7nJs61ZmtGz8VZzM3HLQX0zY5mrSmPtR3xWwsq2jOUQqFZN8+A==", - "dev": true, - "dependencies": { - "ansi-align": "^3.0.0", - "camelcase": "^5.3.1", - "chalk": "^2.4.2", - "cli-boxes": "^2.2.0", - "string-width": "^3.0.0", - "term-size": "^1.2.0", - "type-fest": "^0.3.0", - "widest-line": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/boxen/node_modules/type-fest": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", - "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/buffer-alloc": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", - "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", - "dev": true, - "dependencies": { - "buffer-alloc-unsafe": "^1.1.0", - "buffer-fill": "^1.0.0" - } - }, - "node_modules/buffer-alloc-unsafe": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", - "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==", - "dev": true - }, - "node_modules/buffer-crc32": { - "version": "0.2.13", - "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", - "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/buffer-fill": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", - "integrity": "sha512-T7zexNBwiiaCOGDg9xNX9PBmjrubblRkENuptryuI64URkXDFum9il/JGL8Lm8wYfAXpredVXXZz7eMHilimiQ==", - "dev": true - }, - "node_modules/buffer-from": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", - "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", - "dev": true - }, - "node_modules/builder-util": { - "version": "10.2.0", - "resolved": "https://registry.npmjs.org/builder-util/-/builder-util-10.2.0.tgz", - "integrity": "sha512-9x+RpqKl50VnestgFa+0r9atMq7NUF0b719W8NSzRg/WOrPI9z3oJiLJv0L+JOSzLiu80Fu97kvEb/1l3+/9TQ==", - "dev": true, - "dependencies": { - "@types/debug": "^4.1.4", - "7zip-bin": "~4.1.0", - "app-builder-bin": "3.0.1", - "bluebird-lst": "^1.0.9", - "builder-util-runtime": "^8.2.5", - "chalk": "^2.4.2", - "debug": "^4.1.1", - "fs-extra-p": "^8.0.2", - "is-ci": "^2.0.0", - "js-yaml": "^3.13.1", - "source-map-support": "^0.5.12", - "stat-mode": "^0.3.0", - "temp-file": "^3.3.3" - } - }, - "node_modules/builder-util-runtime": { - "version": "8.2.5", - "resolved": "https://registry.npmjs.org/builder-util-runtime/-/builder-util-runtime-8.2.5.tgz", - "integrity": "sha512-YILT+YUlxrE3yNB6mDC1tF+Q24mr1LSYdjP5U861jbBeDZfvy1/VPDzW3boMVrDtzYnDnvkYrzLJnoh6TXA75w==", - "dev": true, - "dependencies": { - "bluebird-lst": "^1.0.9", - "debug": "^4.1.1", - "fs-extra-p": "^8.0.2", - "sax": "^1.2.4" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/cacheable-request": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", - "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", - "dev": true, - "dependencies": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^3.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^1.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/chalk/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/chromium-pickle-js": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/chromium-pickle-js/-/chromium-pickle-js-0.2.0.tgz", - "integrity": "sha512-1R5Fho+jBq0DDydt+/vHWj5KJNJCKdARKOCwZUen84I5BreWoLqRLANH1U87eJy1tiASPtMnGqJJq0ZsLoRPOw==", - "dev": true - }, - "node_modules/ci-info": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", - "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==", - "dev": true - }, - "node_modules/cli-boxes": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", - "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", - "dev": true, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "dev": true, - "dependencies": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - } - }, - "node_modules/clone-response": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz", - "integrity": "sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA==", - "dev": true, - "dependencies": { - "mimic-response": "^1.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", - "dev": true - }, - "node_modules/compare-version": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/compare-version/-/compare-version-0.1.2.tgz", - "integrity": "sha512-pJDh5/4wrEnXX/VWRZvruAGHkzKdr46z11OlTPN+VrATlWWhSKewNCJ1futCO5C7eJB3nPMFZA1LeYtcFboZ2A==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true - }, - "node_modules/config-chain": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", - "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", - "dev": true, - "optional": true, - "dependencies": { - "ini": "^1.3.4", - "proto-list": "~1.2.1" - } - }, - "node_modules/configstore": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-4.0.0.tgz", - "integrity": "sha512-CmquAXFBocrzaSM8mtGPMM/HiWmyIpr4CcJl/rgY2uCObZ/S7cKU0silxslqJejl+t/T9HS8E0PUNQD81JGUEQ==", - "dev": true, - "dependencies": { - "dot-prop": "^4.1.0", - "graceful-fs": "^4.1.2", - "make-dir": "^1.0.0", - "unique-string": "^1.0.0", - "write-file-atomic": "^2.0.0", - "xdg-basedir": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cross-spawn": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz", - "integrity": "sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==", - "dev": true, - "dependencies": { - "lru-cache": "^4.0.1", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - } - }, - "node_modules/cross-spawn/node_modules/lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "dev": true, - "dependencies": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "node_modules/cross-spawn/node_modules/yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==", - "dev": true - }, - "node_modules/crypto-random-string": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-1.0.0.tgz", - "integrity": "sha512-GsVpkFPlycH7/fRR7Dhcmnoii54gV1nz7y4CWyeFS14N+JVBBhY+r8amRHE4BwSYal7BPTDp8isvAlCxyFt3Hg==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA==", - "dev": true, - "dependencies": { - "mimic-response": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "dev": true, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/defer-to-connect": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", - "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==", - "dev": true - }, - "node_modules/define-data-property": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.1.tgz", - "integrity": "sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==", - "dev": true, - "optional": true, - "dependencies": { - "get-intrinsic": "^1.2.1", - "gopd": "^1.0.1", - "has-property-descriptors": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/define-properties": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", - "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", - "dev": true, - "optional": true, - "dependencies": { - "define-data-property": "^1.0.1", - "has-property-descriptors": "^1.0.0", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/detect-node": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", - "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==", - "dev": true, - "optional": true - }, - "node_modules/dmg-builder": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/dmg-builder/-/dmg-builder-6.8.0.tgz", - "integrity": "sha512-YKPcGSpUc8L5HhZhzB2IlPm0kYCwDw4pCfOuXROjUR/EWACnUUIcYZ1suL4/AX26xAcBkh/FP1phPCzOjYy5rA==", - "dev": true, - "dependencies": { - "app-builder-lib": "~21.0.0", - "bluebird-lst": "^1.0.9", - "builder-util": "~10.2.0", - "fs-extra-p": "^8.0.2", - "iconv-lite": "^0.4.24", - "js-yaml": "^3.13.1", - "parse-color": "^1.0.0", - "sanitize-filename": "^1.6.1" - } - }, - "node_modules/dot-prop": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-4.2.1.tgz", - "integrity": "sha512-l0p4+mIuJIua0mhxGoh4a+iNL9bmeK5DvnSVQa6T0OhrVmaEa1XScX5Etc673FePCJOArq/4Pa2cLGODUWTPOQ==", - "dev": true, - "dependencies": { - "is-obj": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/dotenv": { - "version": "8.6.0", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-8.6.0.tgz", - "integrity": "sha512-IrPdXQsk2BbzvCBGBOTmmSH5SodmqZNt4ERAZDmW4CT+tL8VtvinqywuANaFu4bOMWki16nqf0e4oC0QIaDr/g==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/dotenv-expand": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-5.1.0.tgz", - "integrity": "sha512-YXQl1DSa4/PQyRfgrv6aoNjhasp/p4qs9FjJ4q4cQk+8m4r6k4ZSiEyytKG8f8W9gi8WsQtIObNmKd+tMzNTmA==", - "dev": true - }, - "node_modules/duplexer": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", - "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" - }, - "node_modules/duplexer3": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.5.tgz", - "integrity": "sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA==", - "dev": true - }, - "node_modules/ejs": { - "version": "2.7.4", - "resolved": "https://registry.npmjs.org/ejs/-/ejs-2.7.4.tgz", - "integrity": "sha512-7vmuyh5+kuUyJKePhQfRQBhXV5Ce+RnaeeQArKu1EAMpL3WbgMt5WG6uQZpEVvYSSsxMXRKOewtDk9RaTKXRlA==", - "dev": true, - "hasInstallScript": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/electron": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/electron/-/electron-21.0.0.tgz", - "integrity": "sha512-7HGxgaH0goYsq5m23rbLuKNwxOP4wS/JTNVTYt4n+a4sPkxI97Fcngh55pHaIvvMO3jKZ9yzll7L/D1dHwMdLA==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "@electron/get": "^1.14.1", - "@types/node": "^16.11.26", - "extract-zip": "^2.0.1" - }, - "bin": { - "electron": "cli.js" - }, - "engines": { - "node": ">= 10.17.0" - } - }, - "node_modules/electron-builder": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/electron-builder/-/electron-builder-21.0.0.tgz", - "integrity": "sha512-XEVsCqU2gVy1sbRo2DYlzrXgmXvjrBo5FOjstqoL+IbkTVAsSpYcrUno/SWWdyFf0JDtOF6r/Hb0sV6rh4WoNA==", - "dev": true, - "dependencies": { - "app-builder-lib": "21.0.0", - "bluebird-lst": "^1.0.9", - "builder-util": "10.2.0", - "builder-util-runtime": "8.2.5", - "chalk": "^2.4.2", - "dmg-builder": "6.8.0", - "fs-extra-p": "^8.0.2", - "is-ci": "^2.0.0", - "lazy-val": "^1.0.4", - "read-config-file": "3.3.0", - "sanitize-filename": "^1.6.1", - "update-notifier": "^3.0.0", - "yargs": "^13.2.4" - }, - "bin": { - "build": "out/cli/cli.js", - "electron-builder": "out/cli/cli.js", - "install-app-deps": "out/cli/install-app-deps.js" - }, - "engines": { - "node": ">=8.12.0" - } - }, - "node_modules/electron-osx-sign": { - "version": "0.4.11", - "resolved": "https://registry.npmjs.org/electron-osx-sign/-/electron-osx-sign-0.4.11.tgz", - "integrity": "sha512-VVd40nrnVqymvFrY9ZkOYgHJOvexHHYTR3di/SN+mjJ0OWhR1I8BRVj3U+Yamw6hnkZZNKZp52rqL5EFAAPFkQ==", - "deprecated": "Please use @electron/osx-sign moving forward. Be aware the API is slightly different", - "dev": true, - "dependencies": { - "bluebird": "^3.5.0", - "compare-version": "^0.1.2", - "debug": "^2.6.8", - "isbinaryfile": "^3.0.2", - "minimist": "^1.2.0", - "plist": "^3.0.1" - }, - "bin": { - "electron-osx-flat": "bin/electron-osx-flat.js", - "electron-osx-sign": "bin/electron-osx-sign.js" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/electron-osx-sign/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/electron-osx-sign/node_modules/isbinaryfile": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-3.0.3.tgz", - "integrity": "sha512-8cJBL5tTd2OS0dM4jz07wQd5g0dCCqIhUxPIGtZfa5L6hWlvV5MHTITy/DBAsF+Oe2LS1X3krBUhNwaGUWpWxw==", - "dev": true, - "dependencies": { - "buffer-alloc": "^1.2.0" - }, - "engines": { - "node": ">=0.6.0" - } - }, - "node_modules/electron-osx-sign/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "dev": true - }, - "node_modules/electron-publish": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/electron-publish/-/electron-publish-21.0.0.tgz", - "integrity": "sha512-LDTNfqpbwvVwB9KvR5ehDrFkaaJWf/hnkUUFUh1+r7qGlDjyYHn82falV3FZW8bcjVtaq4xhy4OhglVf2x/oiA==", - "dev": true, - "dependencies": { - "bluebird-lst": "^1.0.9", - "builder-util": "~10.2.0", - "builder-util-runtime": "^8.2.5", - "chalk": "^2.4.2", - "fs-extra-p": "^8.0.2", - "lazy-val": "^1.0.4", - "mime": "^2.4.4" - } - }, - "node_modules/electron/node_modules/@types/node": { - "version": "16.18.59", - "resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.59.tgz", - "integrity": "sha512-PJ1w2cNeKUEdey4LiPra0ZuxZFOGvetswE8qHRriV/sUkL5Al4tTmPV9D2+Y/TPIxTHHgxTfRjZVKWhPw/ORhQ==", - "dev": true - }, - "node_modules/emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", - "dev": true - }, - "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", - "dev": true, - "optional": true, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dev": true, - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/env-paths": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", - "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/es6-error": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz", - "integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==", - "dev": true, - "optional": true - }, - "node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true, - "optional": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "dev": true, - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/event-stream": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/event-stream/-/event-stream-3.3.4.tgz", - "integrity": "sha512-QHpkERcGsR0T7Qm3HNJSyXKEEj8AHNxkY3PK8TS2KJvQ7NiSHe3DDpwVKKtoYprL/AreyzFBeIkBIWChAqn60g==", - "dependencies": { - "duplexer": "~0.1.1", - "from": "~0", - "map-stream": "~0.1.0", - "pause-stream": "0.0.11", - "split": "0.3", - "stream-combiner": "~0.0.4", - "through": "~2.3.1" - } - }, - "node_modules/execa": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz", - "integrity": "sha512-RztN09XglpYI7aBBrJCPW95jEH7YF1UEPOoX9yDhUTPdp7mK+CQvnLTuD10BNXZ3byLTu2uehZ8EcKT/4CGiFw==", - "dev": true, - "dependencies": { - "cross-spawn": "^5.0.1", - "get-stream": "^3.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/execa/node_modules/get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/extract-zip": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-2.0.1.tgz", - "integrity": "sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "get-stream": "^5.1.0", - "yauzl": "^2.10.0" - }, - "bin": { - "extract-zip": "cli.js" - }, - "engines": { - "node": ">= 10.17.0" - }, - "optionalDependencies": { - "@types/yauzl": "^2.9.1" - } - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true - }, - "node_modules/fd-slicer": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", - "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", - "dev": true, - "dependencies": { - "pend": "~1.2.0" - } - }, - "node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/from": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/from/-/from-0.1.7.tgz", - "integrity": "sha512-twe20eF1OxVxp/ML/kq2p1uc6KvFK/+vs8WjEbeKmV2He22MKm7YF2ANIt+EOqhJ5L3K/SuuPhk0hWQDjOM23g==" - }, - "node_modules/fs-extra": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", - "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - }, - "engines": { - "node": ">=6 <7 || >=8" - } - }, - "node_modules/fs-extra-p": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra-p/-/fs-extra-p-8.1.0.tgz", - "integrity": "sha512-sCLpU5kk5CvrWZvFM9dUlqPgHrE02AEt6XYzF7kDscr5COc7DHfhNfODTXt0bkVNmt5DkvU2uJSYjorxY3bRKA==", - "dev": true, - "dependencies": { - "bluebird-lst": "^1.0.9", - "fs-extra": "^8.1.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "dev": true, - "optional": true, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true, - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/get-intrinsic": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz", - "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==", - "dev": true, - "optional": true, - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-proto": "^1.0.1", - "has-symbols": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dev": true, - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/global-agent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-agent/-/global-agent-3.0.0.tgz", - "integrity": "sha512-PT6XReJ+D07JvGoxQMkT6qji/jVNfX/h364XHZOWeRzy64sSFr+xJ5OX7LI3b4MPQzdL4H8Y8M0xzPpsVMwA8Q==", - "dev": true, - "optional": true, - "dependencies": { - "boolean": "^3.0.1", - "es6-error": "^4.1.1", - "matcher": "^3.0.0", - "roarr": "^2.15.3", - "semver": "^7.3.2", - "serialize-error": "^7.0.1" - }, - "engines": { - "node": ">=10.0" - } - }, - "node_modules/global-agent/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "optional": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/global-dirs": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-0.1.1.tgz", - "integrity": "sha512-NknMLn7F2J7aflwFOlGdNIuCDpN3VGoSoB+aap3KABFWbHVn1TCgFC+np23J8W2BiZbjfEw3BFBycSMv1AFblg==", - "dev": true, - "dependencies": { - "ini": "^1.3.4" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/global-tunnel-ng": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/global-tunnel-ng/-/global-tunnel-ng-2.7.1.tgz", - "integrity": "sha512-4s+DyciWBV0eK148wqXxcmVAbFVPqtc3sEtUE/GTQfuU80rySLcMhUmHKSHI7/LDj8q0gDYI1lIhRRB7ieRAqg==", - "dev": true, - "optional": true, - "dependencies": { - "encodeurl": "^1.0.2", - "lodash": "^4.17.10", - "npm-conf": "^1.1.3", - "tunnel": "^0.0.6" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/globalthis": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz", - "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==", - "dev": true, - "optional": true, - "dependencies": { - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/gopd": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", - "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", - "dev": true, - "optional": true, - "dependencies": { - "get-intrinsic": "^1.1.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/got": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", - "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", - "dev": true, - "dependencies": { - "@sindresorhus/is": "^0.14.0", - "@szmarczak/http-timer": "^1.1.2", - "cacheable-request": "^6.0.0", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^4.1.0", - "lowercase-keys": "^1.0.1", - "mimic-response": "^1.0.1", - "p-cancelable": "^1.0.0", - "to-readable-stream": "^1.0.0", - "url-parse-lax": "^3.0.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/got/node_modules/get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "dev": true, - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.10", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", - "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==", - "dev": true - }, - "node_modules/has": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.4.tgz", - "integrity": "sha512-qdSAmqLF6209RFj4VVItywPMbm3vWylknmB3nvNiUIs72xAimcM8nVYxYr7ncvZq5qzk9MKIZR8ijqD/1QuYjQ==", - "dev": true, - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/has-property-descriptors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", - "dev": true, - "optional": true, - "dependencies": { - "get-intrinsic": "^1.1.1" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", - "dev": true, - "optional": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "dev": true, - "optional": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-yarn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", - "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", - "dev": true - }, - "node_modules/http-cache-semantics": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", - "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==", - "dev": true - }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "dev": true, - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/import-lazy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz", - "integrity": "sha512-m7ZEHgtw69qOGw+jwxXkHlrlIPdTGkyh66zXZ1ajZbxkDBNjSY/LGbmjc7h0s2ELsUDTAhFr55TrPSSqJGPG0A==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "dev": true, - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "dev": true - }, - "node_modules/is-ci": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", - "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", - "dev": true, - "dependencies": { - "ci-info": "^2.0.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-core-module": { - "version": "2.13.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.0.tgz", - "integrity": "sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==", - "dev": true, - "dependencies": { - "has": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/is-installed-globally": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.1.0.tgz", - "integrity": "sha512-ERNhMg+i/XgDwPIPF3u24qpajVreaiSuvpb1Uu0jugw7KKcxGyCX8cgp8P5fwTmAuXku6beDHHECdKArjlg7tw==", - "dev": true, - "dependencies": { - "global-dirs": "^0.1.0", - "is-path-inside": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/is-npm": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-3.0.0.tgz", - "integrity": "sha512-wsigDr1Kkschp2opC4G3yA6r9EgVA6NjRpWzIi9axXqeIaAATPRJc4uLujXe3Nd9uO8KoDyA4MD6aZSeXTADhA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", - "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-path-inside": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-1.0.1.tgz", - "integrity": "sha512-qhsCR/Esx4U4hg/9I19OVUAJkGWtjRYHMRgUMZE2TDdj+Ag+kttZanLupfddNyglzz50cUlmWzUaI37GDfNx/g==", - "dev": true, - "dependencies": { - "path-is-inside": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-yarn-global": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz", - "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==", - "dev": true - }, - "node_modules/isbinaryfile": { - "version": "4.0.10", - "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-4.0.10.tgz", - "integrity": "sha512-iHrqe5shvBUcFbmZq9zOQHBoeOhZJu6RQGrDpBgenUm/Am+F3JM2MgQj+rK3Z601fzrL5gLZWtAPH2OBaSVcyw==", - "dev": true, - "engines": { - "node": ">= 8.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/gjtorikian/" - } - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true - }, - "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ==", - "dev": true - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "node_modules/json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==", - "dev": true, - "optional": true - }, - "node_modules/json5": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", - "dev": true, - "bin": { - "json5": "lib/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", - "dev": true, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/keyv": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", - "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", - "dev": true, - "dependencies": { - "json-buffer": "3.0.0" - } - }, - "node_modules/latest-version": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", - "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==", - "dev": true, - "dependencies": { - "package-json": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/lazy-val": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/lazy-val/-/lazy-val-1.0.5.tgz", - "integrity": "sha512-0/BnGCCfyUMkBpeDgWihanIAF9JmZhHBgUhEqzvf+adhNGLoP6TaiI5oF8oyb3I45P+PcnrqihSf01M0l0G5+Q==", - "dev": true - }, - "node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true, - "optional": true - }, - "node_modules/lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "optional": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/make-dir": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", - "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", - "dev": true, - "dependencies": { - "pify": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/map-stream": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/map-stream/-/map-stream-0.1.0.tgz", - "integrity": "sha512-CkYQrPYZfWnu/DAmVCpTSX/xHpKZ80eKh2lAkyA6AJTef6bW+6JpbQZN5rofum7da+SyN1bi5ctTm+lTfcCW3g==" - }, - "node_modules/matcher": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/matcher/-/matcher-3.0.0.tgz", - "integrity": "sha512-OkeDaAZ/bQCxeFAozM55PKcKU0yJMPGifLwV4Qgjitu+5MoAfSQN4lsLJeXZ1b8w0x+/Emda6MZgXS1jvsapng==", - "dev": true, - "optional": true, - "dependencies": { - "escape-string-regexp": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/mime": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz", - "integrity": "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg==", - "dev": true, - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "dev": true, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/node-cmd": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/node-cmd/-/node-cmd-5.0.0.tgz", - "integrity": "sha512-4sQTJmsS5uZKAPz/Df9fnIbmvOySfGdW+UreH4X5NcAOOpKjaE+K5wf4ehNBbZVPo0vQ36RkRnhhsXXJAT+Syw==", - "engines": { - "node": ">=6.4.0" - } - }, - "node_modules/normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "dependencies": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - } - }, - "node_modules/normalize-package-data/node_modules/semver": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", - "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", - "dev": true, - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/normalize-url": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz", - "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm-conf": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/npm-conf/-/npm-conf-1.1.3.tgz", - "integrity": "sha512-Yic4bZHJOt9RCFbRP3GgpqhScOY4HH3V2P8yBj6CeYq118Qr+BLXqT2JvpJ00mryLESpgOxf5XlFv4ZjXxLScw==", - "dev": true, - "optional": true, - "dependencies": { - "config-chain": "^1.1.11", - "pify": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", - "dev": true, - "dependencies": { - "path-key": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true, - "optional": true, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dev": true, - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/p-cancelable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", - "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz", - "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==", - "dev": true, - "dependencies": { - "got": "^9.6.0", - "registry-auth-token": "^4.0.0", - "registry-url": "^5.0.0", - "semver": "^6.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/parse-color": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/parse-color/-/parse-color-1.0.0.tgz", - "integrity": "sha512-fuDHYgFHJGbpGMgw9skY/bj3HL/Jrn4l/5rSspy00DoT4RyLnDcRvPxdZ+r6OFwIsgAuhDh4I09tAId4mI12bw==", - "dev": true, - "dependencies": { - "color-convert": "~0.5.0" - } - }, - "node_modules/parse-color/node_modules/color-convert": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-0.5.3.tgz", - "integrity": "sha512-RwBeO/B/vZR3dfKL1ye/vx8MHZ40ugzpyfeVG5GsiuGnrlMWe2o8wxBbLCpw9CsxV+wHuzYlCiWnybrIA0ling==", - "dev": true - }, - "node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/path-is-inside": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", - "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==", - "dev": true - }, - "node_modules/path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "dev": true - }, - "node_modules/pause-stream": { - "version": "0.0.11", - "resolved": "https://registry.npmjs.org/pause-stream/-/pause-stream-0.0.11.tgz", - "integrity": "sha512-e3FBlXLmN/D1S+zHzanP4E/4Z60oFAa3O051qt1pxa7DEJWKAyil6upYVXCWadEnuoqa4Pkc9oUx9zsxYeRv8A==", - "dependencies": { - "through": "~2.3" - } - }, - "node_modules/pend": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", - "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==", - "dev": true - }, - "node_modules/pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/plist": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/plist/-/plist-3.1.0.tgz", - "integrity": "sha512-uysumyrvkUX0rX/dEVqt8gC3sTBzd4zoWfLeS29nb53imdaXVvLINYXTI2GNqzaMuvacNx4uJQ8+b3zXR0pkgQ==", - "dev": true, - "dependencies": { - "@xmldom/xmldom": "^0.8.8", - "base64-js": "^1.5.1", - "xmlbuilder": "^15.1.1" - }, - "engines": { - "node": ">=10.4.0" - } - }, - "node_modules/prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/progress": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", - "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", - "dev": true, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/proto-list": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", - "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==", - "dev": true, - "optional": true - }, - "node_modules/ps-tree": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/ps-tree/-/ps-tree-1.2.0.tgz", - "integrity": "sha512-0VnamPPYHl4uaU/nSFeZZpR21QAWRz+sRv4iW9+v/GS/J5U5iZB5BNN6J0RMoOvdx2gWM2+ZFMIm58q24e4UYA==", - "dependencies": { - "event-stream": "=3.3.4" - }, - "bin": { - "ps-tree": "bin/ps-tree.js" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/pseudomap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", - "integrity": "sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ==", - "dev": true - }, - "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dev": true, - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dev": true, - "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "bin": { - "rc": "cli.js" - } - }, - "node_modules/read-config-file": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/read-config-file/-/read-config-file-3.3.0.tgz", - "integrity": "sha512-VzA1UVvNwEYQi9wGVa7Cji/E6YNr3VwbPXUcHed00cYoZsqg1kYcMnHgIx8nt0NQjFz/ai/8n8Xq0rmHD153Gg==", - "dev": true, - "dependencies": { - "ajv": "^6.10.0", - "ajv-keywords": "^3.4.0", - "bluebird-lst": "^1.0.9", - "dotenv": "^8.0.0", - "dotenv-expand": "^5.1.0", - "fs-extra-p": "^8.0.2", - "js-yaml": "^3.13.1", - "json5": "^2.1.0", - "lazy-val": "^1.0.4" - } - }, - "node_modules/registry-auth-token": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.2.tgz", - "integrity": "sha512-PC5ZysNb42zpFME6D/XlIgtNGdTl8bBOCw90xQLVMpzuuubJKYDWFAEuUNc+Cn8Z8724tg2SDhDRrkVEsqfDMg==", - "dev": true, - "dependencies": { - "rc": "1.2.8" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/registry-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz", - "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==", - "dev": true, - "dependencies": { - "rc": "^1.2.8" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", - "dev": true - }, - "node_modules/resolve": { - "version": "1.22.8", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", - "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", - "dev": true, - "dependencies": { - "is-core-module": "^2.13.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ==", - "dev": true, - "dependencies": { - "lowercase-keys": "^1.0.0" - } - }, - "node_modules/roarr": { - "version": "2.15.4", - "resolved": "https://registry.npmjs.org/roarr/-/roarr-2.15.4.tgz", - "integrity": "sha512-CHhPh+UNHD2GTXNYhPWLnU8ONHdI+5DI+4EYIAOaiD63rHeYlZvyh8P+in5999TTSFgUYuKUAjzRI4mdh/p+2A==", - "dev": true, - "optional": true, - "dependencies": { - "boolean": "^3.0.1", - "detect-node": "^2.0.4", - "globalthis": "^1.0.1", - "json-stringify-safe": "^5.0.1", - "semver-compare": "^1.0.0", - "sprintf-js": "^1.1.2" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "dev": true - }, - "node_modules/sanitize-filename": { - "version": "1.6.3", - "resolved": "https://registry.npmjs.org/sanitize-filename/-/sanitize-filename-1.6.3.tgz", - "integrity": "sha512-y/52Mcy7aw3gRm7IrcGDFx/bCk4AhRh2eI9luHOQM86nZsqwiRkkq2GekHXBBD+SmPidc8i2PqtYZl+pWJ8Oeg==", - "dev": true, - "dependencies": { - "truncate-utf8-bytes": "^1.0.0" - } - }, - "node_modules/sax": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.3.0.tgz", - "integrity": "sha512-0s+oAmw9zLl1V1cS9BtZN7JAd0cW5e0QH4W3LWEK6a4LaLEA2OTpGYWDY+6XasBLtz6wkm3u1xRw95mRuJ59WA==", - "dev": true - }, - "node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/semver-compare": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/semver-compare/-/semver-compare-1.0.0.tgz", - "integrity": "sha512-YM3/ITh2MJ5MtzaM429anh+x2jiLVjqILF4m4oyQB18W7Ggea7BfqdH/wGMK7dDiMghv/6WG7znWMwUDzJiXow==", - "dev": true, - "optional": true - }, - "node_modules/semver-diff": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-2.1.0.tgz", - "integrity": "sha512-gL8F8L4ORwsS0+iQ34yCYv///jsOq0ZL7WP55d1HnJ32o7tyFYEFQZQA22mrLIacZdU6xecaBBZ+uEiffGNyXw==", - "dev": true, - "dependencies": { - "semver": "^5.0.3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/semver-diff/node_modules/semver": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", - "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", - "dev": true, - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/serialize-error": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/serialize-error/-/serialize-error-7.0.1.tgz", - "integrity": "sha512-8I8TjW5KMOKsZQTvoxjuSIa7foAwPWGOts+6o7sgjz41/qMD9VQHEDxi6PBvK2l0MXUmqZyNpUK+T2tQaaElvw==", - "dev": true, - "optional": true, - "dependencies": { - "type-fest": "^0.13.1" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", - "dev": true - }, - "node_modules/shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", - "dev": true, - "dependencies": { - "shebang-regex": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true - }, - "node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "dev": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/spdx-correct": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz", - "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", - "dev": true, - "dependencies": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", - "dev": true - }, - "node_modules/spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "dev": true, - "dependencies": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/spdx-license-ids": { - "version": "3.0.16", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.16.tgz", - "integrity": "sha512-eWN+LnM3GR6gPu35WxNgbGl8rmY1AEmoMDvL/QD6zYmPWgywxWqJWNdLGT+ke8dKNWrcYgYjPpG5gbTfghP8rw==", - "dev": true - }, - "node_modules/split": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/split/-/split-0.3.3.tgz", - "integrity": "sha512-wD2AeVmxXRBoX44wAycgjVpMhvbwdI2aZjCkvfNcH1YqHQvJVa1duWc73OyVGJUc05fhFaTZeQ/PYsrmyH0JVA==", - "dependencies": { - "through": "2" - }, - "engines": { - "node": "*" - } - }, - "node_modules/sprintf-js": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz", - "integrity": "sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==", - "dev": true, - "optional": true - }, - "node_modules/stat-mode": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/stat-mode/-/stat-mode-0.3.0.tgz", - "integrity": "sha512-QjMLR0A3WwFY2aZdV0okfFEJB5TRjkggXZjxP3A1RsWsNHNu3YPv8btmtc6iCFZ0Rul3FE93OYogvhOUClU+ng==", - "dev": true - }, - "node_modules/stream-combiner": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/stream-combiner/-/stream-combiner-0.0.4.tgz", - "integrity": "sha512-rT00SPnTVyRsaSz5zgSPma/aHSOic5U1prhYdRy5HS2kTZviFpmDgzilbtsJsxiroqACmayynDN/9VzIbX5DOw==", - "dependencies": { - "duplexer": "~0.1.1" - } - }, - "node_modules/string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "dependencies": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dev": true, - "dependencies": { - "ansi-regex": "^4.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/strip-eof": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha512-7FCwGGmx8mD5xQd3RPUvnSpUXHM3BWuzjtpD4TXsfcZ9EL4azvVVUscFYwD9nx8Kh+uCBC00XBtAykoMHwTh8Q==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/sumchecker": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/sumchecker/-/sumchecker-3.0.1.tgz", - "integrity": "sha512-MvjXzkz/BOfyVDkG0oFOtBxHX2u3gKbMHIF/dXblZsgD3BWOFLmHovIpZY7BykJdAjcqRCBi1WYBNdEC9yI7vg==", - "dev": true, - "dependencies": { - "debug": "^4.1.0" - }, - "engines": { - "node": ">= 8.0" - } - }, - "node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "dev": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/temp-file": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/temp-file/-/temp-file-3.4.0.tgz", - "integrity": "sha512-C5tjlC/HCtVUOi3KWVokd4vHVViOmGjtLwIh4MuzPo/nMYTV/p1urt3RnMz2IWXDdKEGJH3k5+KPxtqRsUYGtg==", - "dev": true, - "dependencies": { - "async-exit-hook": "^2.0.1", - "fs-extra": "^10.0.0" - } - }, - "node_modules/temp-file/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/temp-file/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/temp-file/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/term-size": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/term-size/-/term-size-1.2.0.tgz", - "integrity": "sha512-7dPUZQGy/+m3/wjVz3ZW5dobSoD/02NxJpoXUX0WIyjfVS3l0c+b/+9phIDFA7FHzkYtwtMFgeGZ/Y8jVTeqQQ==", - "dev": true, - "dependencies": { - "execa": "^0.7.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==" - }, - "node_modules/to-readable-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", - "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/truncate-utf8-bytes": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/truncate-utf8-bytes/-/truncate-utf8-bytes-1.0.2.tgz", - "integrity": "sha512-95Pu1QXQvruGEhv62XCMO3Mm90GscOCClvrIUwCM0PYOXK3kaF3l3sIHxx71ThJfcbM2O5Au6SO3AWCSEfW4mQ==", - "dev": true, - "dependencies": { - "utf8-byte-length": "^1.0.1" - } - }, - "node_modules/tunnel": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz", - "integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==", - "dev": true, - "optional": true, - "engines": { - "node": ">=0.6.11 <=0.7.0 || >=0.7.3" - } - }, - "node_modules/type-fest": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.13.1.tgz", - "integrity": "sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==", - "dev": true, - "optional": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/unique-string": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-1.0.0.tgz", - "integrity": "sha512-ODgiYu03y5g76A1I9Gt0/chLCzQjvzDy7DsZGsLOE/1MrF6wriEskSncj1+/C58Xk/kPZDppSctDybCwOSaGAg==", - "dev": true, - "dependencies": { - "crypto-random-string": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", - "dev": true, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/update-notifier": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-3.0.1.tgz", - "integrity": "sha512-grrmrB6Zb8DUiyDIaeRTBCkgISYUgETNe7NglEbVsrLWXeESnlCSP50WfRSj/GmzMPl6Uchj24S/p80nP/ZQrQ==", - "dev": true, - "dependencies": { - "boxen": "^3.0.0", - "chalk": "^2.0.1", - "configstore": "^4.0.0", - "has-yarn": "^2.1.0", - "import-lazy": "^2.1.0", - "is-ci": "^2.0.0", - "is-installed-globally": "^0.1.0", - "is-npm": "^3.0.0", - "is-yarn-global": "^0.3.0", - "latest-version": "^5.0.0", - "semver-diff": "^2.0.0", - "xdg-basedir": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dev": true, - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ==", - "dev": true, - "dependencies": { - "prepend-http": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/utf8-byte-length": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/utf8-byte-length/-/utf8-byte-length-1.0.4.tgz", - "integrity": "sha512-4+wkEYLBbWxqTahEsWrhxepcoVOJ+1z5PGIjPZxRkytcdSUaNjIjBM7Xn8E+pdSuV7SzvWovBFA54FO0JSoqhA==", - "dev": true - }, - "node_modules/validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "dev": true, - "dependencies": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "dev": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "which": "bin/which" - } - }, - "node_modules/which-module": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz", - "integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==", - "dev": true - }, - "node_modules/widest-line": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-2.0.1.tgz", - "integrity": "sha512-Ba5m9/Fa4Xt9eb2ELXt77JxVDV8w7qQrH0zS/TWSJdLyAwQjWoOzpzj5lwVftDz6n/EOu3tNACS84v509qwnJA==", - "dev": true, - "dependencies": { - "string-width": "^2.1.1" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/widest-line/node_modules/ansi-regex": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.1.tgz", - "integrity": "sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/widest-line/node_modules/string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "dev": true, - "dependencies": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/widest-line/node_modules/strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha512-4XaJ2zQdCzROZDivEVIDPkcQn8LMFSa8kj8Gxb/Lnwzv9A8VctNZ+lfivC/sV3ivW8ElJTERXZoPBRrZKkNKow==", - "dev": true, - "dependencies": { - "ansi-regex": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", - "dev": true, - "dependencies": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "dev": true - }, - "node_modules/write-file-atomic": { - "version": "2.4.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-2.4.3.tgz", - "integrity": "sha512-GaETH5wwsX+GcnzhPgKcKjJ6M2Cq3/iZp1WyY/X1CSqrW+jVNM9Y7D8EC2sM4ZG/V8wZlSniJnCKWPmBYAucRQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.1.11", - "imurmurhash": "^0.1.4", - "signal-exit": "^3.0.2" - } - }, - "node_modules/xdg-basedir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-3.0.0.tgz", - "integrity": "sha512-1Dly4xqlulvPD3fZUQJLY+FUIeqN3N2MM3uqe4rCJftAvOjFa3jFGfctOgluGx4ahPbUCsZkmJILiP0Vi4T6lQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/xmlbuilder": { - "version": "15.1.1", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-15.1.1.tgz", - "integrity": "sha512-yMqGBqtXyeN1e3TGYvgNgDVZ3j84W4cwkOXQswghol6APgZWaff9lnbvN7MHYJOiXsvGPXtjTYJEiC9J2wv9Eg==", - "dev": true, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/y18n": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", - "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==", - "dev": true - }, - "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true, - "optional": true - }, - "node_modules/yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", - "dev": true, - "dependencies": { - "cliui": "^5.0.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" - } - }, - "node_modules/yargs-parser": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", - "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", - "dev": true, - "dependencies": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - }, - "node_modules/yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", - "dev": true, - "dependencies": { - "buffer-crc32": "~0.2.3", - "fd-slicer": "~1.1.0" - } - } - } -} diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/package.json b/ai-solutions/windows/electron-app-cv/electron_app_ui/package.json deleted file mode 100644 index d6947f30..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/package.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "name": "AI-SOLUTIONS", - "version": "1.0.0", - "description": "Application to showcase various AI Models", - "main": "main.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1", - "start": "electron .", - "package": "node package_snpe_cpp.js && node package_python.js && npm run -s package-electron", - "package-electron": "electron-builder --publish=never" - }, - "build": { - "appId": "com.ai-solutions.ai-demos", - "productName": "AI-SOLUTIONS", - "asar": false, - "asarUnpack": [ - "**/*.node" - ], - "mac": { - "category": "public.app-category.utilities" - }, - "files": [ - "assets", - "main.js", - "ai-solutions.html", - "index_sr.html", - "index_ie.html", - "index_od.html", - "index_is.html", - "icon.png", - "preload.js", - "node_modules/**/*" - ], - "extraResources": [ - { - "from": "dist-python/", - "to": "app/dist-python", - "filter": [ - "**/*" - ] - }, - { - "from": "../SNPE_CPP_Code/build/Release/", - "to": "app/Release", - "filter": [ - "**/*" - ] - } - ] - }, - "keywords": [ - "Solutions", - "Electron", - "Qualcomm", - "AI", - "ai-demos", - "AI-SOLUTIONS", - "demo" - ], - "author": "Qualcomm", - "license": "BSD", - "devDependencies": { - "electron": "^21.0.0", - "electron-builder": "^21.0.0" - }, - "homepage": "https://github.qualcomm.com/qualcomm-model-zoo-public-mirror/ai-solutions/tree/main/windows#readme", - "dependencies": { - "node-cmd": "^5.0.0", - "ps-tree": "^1.2.0" - } -} diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/package_python.js b/ai-solutions/windows/electron-app-cv/electron_app_ui/package_python.js deleted file mode 100644 index 04eb53a6..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/package_python.js +++ /dev/null @@ -1,37 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -const path = require("path"); - -const spawn = require("child_process").spawn, - ls = spawn( - "pyinstaller", - [ - "-w", - `--add-data ../python_flask_server/templates${path.delimiter}templates`, - `--add-data ../python_flask_server/static${path.delimiter}static`, - "--distpath dist-python", - "../python_flask_server/server.py", - ], - { - shell: true, - } - ); - -ls.stdout.on("data", function (data) { - // stream output of build process - console.log("INFO: ", data.toString()); -}); - -ls.stderr.on("data", function (data) { - console.log( data.toString()); -}); -ls.on("exit", function (code) { - console.log("pyinstaller process exited with code " + code.toString()); -}); diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/package_snpe_cpp.js b/ai-solutions/windows/electron-app-cv/electron_app_ui/package_snpe_cpp.js deleted file mode 100644 index abbf08f8..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/package_snpe_cpp.js +++ /dev/null @@ -1,19 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -const nodeCmd = require('node-cmd') - -let command = String.raw `cd ..\SNPE_CPP_Code && (if not exist build (mkdir build && cd build) else (cd build)) && cmake ../. -G "Visual Studio 17 2022" -A ARM64 -DCHISPSET SC8380 && cmake --build ./ --config Release`; - -nodeCmd.runSync(command, (err, data, stderr) => { -if(data) { - return res.json(data); -} -return err; -}); \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/preload.js b/ai-solutions/windows/electron-app-cv/electron_app_ui/preload.js deleted file mode 100644 index f464ed3d..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/preload.js +++ /dev/null @@ -1,26 +0,0 @@ -// -*- mode: js -*- -// ============================================================================= -// @@-COPYRIGHT-START-@@ -// -// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -// SPDX-License-Identifier: BSD-3-Clause -// -// @@-COPYRIGHT-END-@@ -// ============================================================================= -/** - * The preload script runs before. It has access to web APIs - * as well as Electron's renderer process modules and some - * polyfilled Node.js functions. - * - * https://www.electronjs.org/docs/latest/tutorial/sandbox - */ -window.addEventListener('DOMContentLoaded', () => { - const replaceText = (selector, text) => { - const element = document.getElementById(selector) - if (element) element.innerText = text - } - - for (const type of ['chrome', 'node', 'electron']) { - replaceText(`${type}-version`, process.versions[type]) - } -}) diff --git a/ai-solutions/windows/electron-app-cv/electron_app_ui/styles.css b/ai-solutions/windows/electron-app-cv/electron_app_ui/styles.css deleted file mode 100644 index ed8a34f4..00000000 --- a/ai-solutions/windows/electron-app-cv/electron_app_ui/styles.css +++ /dev/null @@ -1,3 +0,0 @@ -/* styles.css */ - -/* Add styles here to customize the appearance of your app */ \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/ImageEnhancement_blueprint.py b/ai-solutions/windows/electron-app-cv/python_flask_server/ImageEnhancement_blueprint.py deleted file mode 100644 index d3282680..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/ImageEnhancement_blueprint.py +++ /dev/null @@ -1,238 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -from flask import Blueprint -from flask import request, jsonify, make_response, send_file -from PIL import Image -from empatches import EMPatches -import io, os -import cv2 -import numpy as np -import time -import functools -import zmq -# from utils import pyinstaller_absolute_path - -import globalvar -time_taken_model = "" -upscaled_img_dims = "" -# old_runtime = "" -# old_model_name = "" - -imageEnhance_bp = Blueprint("ImageEnhance",__name__) - -runtime_name_decoder={'DSP':b"DSP",'GPU':b"GPU", 'CPU':b"CPU"} -# dlc_name_decoder={'EnhancementGAN':'quant_enhancement_240_320_8350.dlc', 'MBLLEN':'quant_mbllen_214.dlc', 'RUAS':'quant_ruas_214.dlc','SCI':'quant_sci_214.dlc','StableLLVE':'quant_StableLLVE_214.dlc','Zero-DCE':'quant_zerodce_80_214.dlc','Zero-DCE++':'quant_zerodce++_214.dlc'} -# dlc_name_decoder={'MBLLEN':'quant_mbllen_214.dlc', 'RUAS':'quant_ruas_214.dlc','SCI':'quant_sci_214.dlc','StableLLVE':'quant_StableLLVE_214.dlc','Zero-DCE':'quant_zerodce_80_214.dlc'} -dlc_name_decoder={'MBLLEN':'quant_mbllen_480_640_8350_212.dlc', 'RUAS':'quant_ruas_480_640_8350_212.dlc','SCI':'quant_sci_480_640_8350_212.dlc','StableLLVE':'quant_stablellve_480_640_8350_212.dlc','Zero-DCE':'quant_zerodce_480_640_212_8350_80_out.dlc'} - - -@imageEnhance_bp.route('/image_enhancement/ie_checkdlc', methods=['POST']) -def checkdlc(): - print("checkdlc: ") - from flask import jsonify - import os - model_name = request.form.get('model_name') - - print("MODEL NAME IN CHECKDLC: ", model_name) - dlc_path = os.path.join("C:\Qualcomm\AIStack\AI_Solutions\DLC","enhancement", dlc_name_decoder.get(model_name)) - if(os.path.isfile(dlc_path)): - print("found") - output_new = { - "dlc_available": "yes", - "dlc_path" : dlc_path - } - else: - print("not found") - output_new = { - "dlc_available": "no", - "dlc_path" : dlc_path - } - return jsonify(output_new), 200 - - -def buildnetwork_ie(socket, model_name, run_time): - - print("BUILDING NETWORK Low light") - print("Model name: ",model_name) - first_str = b"networkbuild" - - dlc_path = bytes(os.path.join("C:\Qualcomm\AIStack\AI_Solutions\DLC","enhancement", dlc_name_decoder.get(model_name)),'utf-8') - - socket.send_multipart([first_str,dlc_path, runtime_name_decoder.get(run_time)]) - - print("Messages sent for building network, waiting for reply") - message_build = socket.recv() - print(message_build) - - -def runmodel_ie(socket, patch, model_name, run_time, scaling_factor=4 ): - - try: - print("LOW LIGHT MODEL") - - ## PREPROC ## - if model_name=='MBLLEN': - patch = cv2.resize(patch, (640,480)) - patch = patch/255 - elif model_name=='RUAS': - patch = cv2.resize(patch, (640,480)) - patch = patch/255 - elif model_name=='SCI': - patch = cv2.resize(patch, (640,480)) - patch = patch/255 - elif model_name=='StableLLVE': - patch = cv2.resize(patch, (640,480)) - patch = patch/255 - elif model_name=='Zero-DCE': - patch = cv2.resize(patch, (640,480)) - patch = patch/255 - else: - print("Out of Context: Model Specified is wrong") - - - img = np.array(patch) - img = img.astype(np.float32) - img = img.tobytes() - - print("Preproc done") - - socket.send_multipart([b"infer",img]) - - print("Messages Image sent, waiting for reply") - message_img_out = socket.recv() - - prediction = np.frombuffer(message_img_out, dtype=np.float32) - print("Message received from server :: Shape: ", prediction.shape) #," data: ", prediction) - - socket.send(b"get_infer_time") - message_infer_time = socket.recv() - print("message_infer_time", message_infer_time.decode('UTF-8')) - elapsed_time = 0.0 - elapsed_time = float(message_infer_time.decode('UTF-8'))/1000 - - print("post start") - - if model_name=='MBLLEN': - prediction = prediction.reshape(480,640,3) - prediction = prediction*255 - elif model_name=='RUAS': - prediction = prediction.reshape(480,640,3) - prediction = prediction*255 - elif model_name=='SCI': - prediction = prediction.reshape(480,640,3) - prediction = prediction*255 - elif model_name=='StableLLVE': - prediction = prediction.reshape(480,640,3) - prediction = prediction*255 - elif model_name=='Zero-DCE': - prediction = prediction.reshape(480,640,3) - prediction = prediction*255 - else: - print("Out of Context: Model Specified is wrong") - - # for all other models, post proc is same # - # prediction = prediction*255 - - upscaled_patch = np.clip(prediction, 0, 255).astype(np.uint8) - - except Exception as e: - print("Exception",str(e)) - - return upscaled_patch, elapsed_time - - -# Endpoint for super resolution -@imageEnhance_bp.route('/image_enhancement', methods=['POST']) -def image_enhancement(): - try: - print("Image enhancement blueprint") - - ## GETTING DATA FROM ELECTRON ## - print("Fetching image data from the POST request") - image_data = request.files['imageData'] - - model_name = request.form['model_name'] - print("MODEL NAME:",model_name) - - runtime = request.form['runtime'] - print("RUN TIME:",runtime) - - print("load as PIL IMG") - image_data = Image.open(image_data) - #image_data.save("input_img.png") - width, height = image_data.size - print(f"Received img height = {height} ; width = {width}") - - - ## MAKING CONNECTION WITH SNPE EXE ## - context = zmq.Context() - - # Create a REQ (request) socket - socket = context.socket(zmq.REQ) - server_address = "tcp://localhost:5555" # Replace with your server's address - socket.connect(server_address) - - - ## BUILDING NETWORK ## - # global old_model_name - # global old_runtime - - if model_name != globalvar.old_model_name or runtime != globalvar.old_runtime: - print("___________________BUILDINGNETWORK________________") - print("old_model_name: ", globalvar.old_model_name, "::model_name: ",model_name) - print("old_runtime: ", globalvar.old_runtime, "::runtime: ",runtime) - buildnetwork_ie(socket, model_name, runtime) ##build network when there is some change other than image - globalvar.old_model_name = model_name - globalvar.old_runtime = runtime - - - ## INFERENCING ON NETWORK ## - - # Step 1: Read Image and Extract 128x128 patches from the image - image_np = np.array(image_data) - - merged_img, time_taken = runmodel_ie(socket, image_np, model_name, runtime) - - print("Received Enhanced Image") - - global time_taken_model - global upscaled_img_dims - time_taken_model = str(f'{time_taken*1000:.2f}')+" ms" - - - # Step 3: Getting image dimensions - - upscaled_img_dims = str(merged_img.shape[1]) + " x " +str(merged_img.shape[0]); - print("upscaled_img_dims: ",upscaled_img_dims) - merged_img = Image.fromarray(np.uint8(merged_img)) - # merged_img.save("upscaled_lowlight.png") - - # Convert the upscaled image to a binary response - output_buffer = io.BytesIO() - - merged_img.save(output_buffer, format='PNG') - - print("Sending enhanced image as output to electron ...") - output_buffer.seek(0) - return send_file(output_buffer, mimetype='image/png') - - except Exception as e: - print("#############EXCEPTION####################") - print(str(e)) - return jsonify({'error': str(e)}), 400 - -# Endpoint for super resolution -@imageEnhance_bp.route('/low-light/timer_string', methods=['POST']) -def timer_string(): - output_new = { - "infertime": time_taken_model, - "outputdims": upscaled_img_dims, - } - return jsonify(output_new), 200 \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/ImageSegmentation_blueprint.py b/ai-solutions/windows/electron-app-cv/python_flask_server/ImageSegmentation_blueprint.py deleted file mode 100644 index c45a1f8f..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/ImageSegmentation_blueprint.py +++ /dev/null @@ -1,275 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -from flask import Blueprint -from flask import request, jsonify, make_response, send_file, render_template -from flask_cors import cross_origin -from PIL import Image -import io -import os -import cv2 -import numpy as np -import zmq -from torch import from_numpy - -from threading import Lock -from datetime import datetime - -import globalvar -# from utils import pyinstaller_absolute_path - - -__sockets = [] -time_taken_model = "" -upscaled_img_dims = "" -old_runtime = "" -old_model_name = "" - -lockmut = Lock() - -imageSegment_bp = Blueprint("ImageSegment", __name__) - -runtime_name_decoder={'DSP':b"DSP",'GPU':b"GPU", 'CPU':b"CPU"} -dlc_name_decoder={'FCN_RESNET50':'fcn_resnet50_quant16_w8a16.dlc', 'FCN_RESNET101':'fcn_resnet101_quant16_w8a16.dlc',"LRASPP":"lraspp_mobilenet_v3_large_quant16_w8a16.dlc", "DEEPLABV3_RESNET50":"deeplabv3_resnet50_quant_w8a8.dlc", "DEEPLABV3_RESNET101":"deeplabv3_resnet101_quant_w8a8.dlc" } - - -from torchvision import transforms as T - -transform = T.Compose([ - T.Resize((400,400)), - - T.ToTensor(), - T.Normalize(mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225]), - ]) - -label_map = [ - (0, 0, 0), # background - (128, 0, 0), # aeroplane - (0, 128, 0), # bicycle - (128, 128, 0), # bird - (0, 0, 128), # boat - (128, 0, 128), # bottle - (0, 128, 128), # bus - (128, 128, 128), # car - (64, 0, 0), # cat - (192, 0, 0), # chair - (64, 128, 0), # cow - (192, 128, 0), # dining table - (64, 0, 128), # dog - (192, 0, 128), # horse - (64, 128, 128), # motorbike - (192, 128, 128), # person - (0, 64, 0), # potted plant - (128, 64, 0), # sheep - (0, 192, 0), # sofa - (128, 192, 0), # train - (0, 64, 128) # tv/monitor -] - -def image_overlay(image, segmented_image): - alpha = 1 # transparency for the original image - beta = 0.8 # transparency for the segmentation map - gamma = 0 # scalar added to each sum - segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR) - image = np.array(image) - image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) - cv2.addWeighted(image, alpha, segmented_image, beta, gamma, image) - return image - -def get_segment_labels(image, model, device): - # transform the image to tensor and load into computation device - image = transform(image).to(device) - image = image.unsqueeze(0) # add a batch dimension - outputs = model(image) - return outputs - -def draw_segmentation_map(outputs): - labels = outputs.detach().cpu().numpy() - # create Numpy arrays containing zeros - # later to be used to fill them with respective red, green, and blue pixels - red_map = np.zeros_like(labels).astype(np.uint8) - green_map = np.zeros_like(labels).astype(np.uint8) - blue_map = np.zeros_like(labels).astype(np.uint8) - - for label_num in range(0, len(label_map)): - index = labels == label_num - red_map[index] = np.array(label_map)[label_num, 0] - green_map[index] = np.array(label_map)[label_num, 1] - blue_map[index] = np.array(label_map)[label_num, 2] - - segmentation_map = np.stack([red_map, green_map, blue_map], axis=2) - return segmentation_map - -@imageSegment_bp.route('/is_checkdlc', methods=['POST']) -def checkdlc(): - from flask import jsonify - import os - model_name = request.form.get('model_name') - dlc_path = os.path.join("C:\Qualcomm\AIStack\AI_Solutions\DLC","imageseg", dlc_name_decoder.get(model_name)) - - if(os.path.isfile(dlc_path)): - print("found") - output_new = { - "dlc_available": "yes", - "dlc_path" : dlc_path - } - else: - print("not found") - output_new = { - "dlc_available": "no", - "dlc_path" : dlc_path - } - return jsonify(output_new), 200 -def buildnetwork_is(socket, model_name, run_time): - - print("BUILDING NETWORK imageSegment") - print("Model name: ",model_name) - first_str = b"networkbuild" - - dlc_path = bytes(os.path.join("C:\Qualcomm\AIStack\AI_Solutions\DLC","imageseg", dlc_name_decoder.get(model_name)),'utf-8') - - socket.send_multipart([first_str,dlc_path, runtime_name_decoder.get(run_time)]) - - print("Messages sent for building network, waiting for reply") - message_build = socket.recv() - print(message_build) - -@imageSegment_bp.route('/is_stop_infer', methods=['POST']) -def stop_infer(): - print("***********************************************************************************************************************************************************************") - print("Setting Gobal var 'Stop Infer'") - print("***********************************************************************************************************************************************************************") - lockmut.acquire() - globalvar.stop_infer = 1 - lockmut.release() - return jsonify({'Stopped Server': 'Done'}), 200 - -@imageSegment_bp.route('/is_build_model', methods=['POST']) -def build_model(): - - try: - ## GETTING DATA FROM ELECTRON ## - model_name = request.form['model_name'] - runtime = request.form['runtime_name'] - - - # print(" flag_modelbuild_inprocess: ",globalvar.flag_modelbuild_inprocess) - # print("model_name: ",model_name) - # print("old_model_name: ",globalvar.old_model_name) - # print("runtime: ",runtime) - # print("old_runtime: ",globalvar.old_runtime) - - - if (model_name != globalvar.old_model_name or runtime != globalvar.old_runtime) and (globalvar. flag_modelbuild_inprocess==0): - # print("Building NEW Model") - globalvar.flag_modelbuild_inprocess = 1 - print("___________________BUILDINGNETWORK________________") - print("old_model_name: ", globalvar.old_model_name, "::model_name: ",model_name) - print("old_runtime: ", globalvar.old_runtime, "::runtime: ",runtime) - buildnetwork_is(globalvar.__sockets[0]["socket"], model_name, runtime) ##build network when there is some change other than image - globalvar.flag_modelbuild_inprocess = 0 - globalvar.old_model_name = model_name - globalvar.old_runtime = runtime - globalvar.stop_infer = 0 - print("___________________DONE___________________________") - return jsonify({'msg': 'model build successfully'}), 200 - - print("Model already built") - globalvar.stop_infer = 0 - return jsonify({'msg': 'model already build'}), 200 - except Exception as e: - print("<<<<<<<<<<<<<<>>>>>>>>>>>>>>") - print(str(e)) - return jsonify({'msg': str(e)}), 400 - -@imageSegment_bp.route('/image_segmentation', methods=['POST']) -def image_segment(): - try: - ## GETTING DATA FROM ELECTRON ## - lockmut.acquire() - if globalvar.stop_infer == 1: - return jsonify({'error': 'busy'}), 400 - lockmut.release() - image_data = request.files['image_data'] - - - # MAKING CONNECTION WITH SNPE EXE ## - - - - - model_name = globalvar.old_model_name - - - ## INFERENCING ON NETWORK ## - print("INFERNCECING") - - start = datetime.now() - - original_image = Image.open(image_data) - - input_ = preprocess(image_data) - print("preprocssed") - data = input_.transpose(0, 2, 3, 1) - - end = datetime.now() - print("preprocess Time: ", end-start) - - for inds, s in enumerate(globalvar.__sockets): - if s["lock"].acquire(blocking=False): - now = datetime.now() - s["socket"].send_multipart([b"infer", data.tobytes()], zmq.NOBLOCK) - print("data sent") - message = s["socket"].recv() - end = datetime.now() - print("infer Time: ", end-now) - # print("data received") - s["lock"].release() - - # print("lock release") - inf_result = np.frombuffer(message, dtype=np.float32) - now = datetime.now() - # print("resuly") - # print("RSULT shape:",inf_result.shape) - - - - ret = postprocess(original_image, inf_result) - _, frame_encoded = cv2.imencode(".jpg", ret) - output_buffer = io.BytesIO(frame_encoded.tobytes()) - end = datetime.now() - print("postprocess time: ",end-now) - return send_file(output_buffer, mimetype='image/jpeg') - - return jsonify({'error': 'busy'}), 400 - - except Exception as e: - print("<<<<<<<<<<<<<<>>>>>>>>>>>>>>") - print(str(e)) - return jsonify({'error': str(e)}), 400 - - - -def preprocess(input): #TODO Preprocessing depends on model architecture and it can different for different models, Here all models have same pre and post processing - img = Image.open(input).convert('RGB') - img = transform(img).unsqueeze(0) # To tensor of NCHW - img = img.numpy() - return img - - -def postprocess(input,output): - res_reshape = output.reshape((1,400,400)).astype(np.float32) - model_img = from_numpy(res_reshape) - segmented_image = draw_segmentation_map(model_img[0]) - input = input.resize((400,400)) - final_image = image_overlay(input, segmented_image) - return final_image - - diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/MobileNetSSd.py b/ai-solutions/windows/electron-app-cv/python_flask_server/MobileNetSSd.py deleted file mode 100644 index 2ddf6b5b..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/MobileNetSSd.py +++ /dev/null @@ -1,194 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -import cv2 -import numpy as np - -# from torch import Tensor, clamp, max, min, from_numpy, cat -import torch - -from utils import draw_box - -class_name = ["BACKGROUND", - "aeroplane", - "bicycle", - "bird", - "boat", - "bottle", - "bus", - "car", - "cat", - "chair", - "cow", - "diningtable", - "dog", - "horse", - "motorbike", - "person", - "pottedplant", - "sheep", - "sofa", - "train", - "tvmonitor"] -label2class={str(i):x for i,x in enumerate(class_name)} -colors = np.random.uniform(0, 255, size=(len(list(label2class.values())), 3)) - -class MobileNetSSD: - def __init__(self): - self.mean = [123,117,104] - - - def area_of(self, left_top, right_bottom) -> torch.Tensor: - """Compute the areas of rectangles given two corners. - - Args: - left_top (N, 2): left top corner. - right_bottom (N, 2): right bottom corner. - - Returns: - area (N): return the area. - """ - hw = torch.clamp(right_bottom - left_top, min=0.0) - return hw[..., 0] * hw[..., 1] - - - - - def iou_of(self, boxes0, boxes1, eps=1e-5): - """Return intersection-over-union (Jaccard index) of boxes. - - Args: - boxes0 (N, 4): ground truth boxes. - boxes1 (N or 1, 4): predicted boxes. - eps: a small number to avoid 0 as denominator. - Returns: - iou (N): IoU values. - """ - overlap_left_top = torch.max(boxes0[..., :2], boxes1[..., :2]) - overlap_right_bottom = torch.min(boxes0[..., 2:], boxes1[..., 2:]) - - overlap_area = self.area_of(overlap_left_top, overlap_right_bottom) - area0 = self.area_of(boxes0[..., :2], boxes0[..., 2:]) - area1 = self.area_of(boxes1[..., :2], boxes1[..., 2:]) - return overlap_area / (area0 + area1 - overlap_area + eps) - - - - - def hard_nms(self, box_scores, iou_threshold, top_k=-1, candidate_size=200): - """ - - Args: - box_scores (N, 5): boxes in corner-form and probabilities. - iou_threshold: intersection over union threshold. - top_k: keep top_k results. If k <= 0, keep all the results. - candidate_size: only consider the candidates with the highest scores. - Returns: - picked: a list of indexes of the kept boxes - """ - scores = box_scores[:, -1] - boxes = box_scores[:, :-1] - picked = [] - print(boxes.shape) - _, indexes = scores.sort(descending=True) - indexes = indexes[:candidate_size] - while len(indexes) > 0: - current = indexes[0] - picked.append(current.item()) - if 0 < top_k == len(picked) or len(indexes) == 1: - break - current_box = boxes[current, :] - indexes = indexes[1:] - rest_boxes = boxes[indexes, :] - iou = self.iou_of( - rest_boxes, - current_box.unsqueeze(0), - ) - indexes = indexes[iou <= iou_threshold] - - return box_scores[picked, :] - - - - def nms(self, box_scores, nms_method=None, score_threshold=None, iou_threshold=None, - sigma=0.5, top_k=-1, candidate_size=200): - if nms_method == "soft": - return self.soft_nms(box_scores, score_threshold, sigma, top_k) - else: - return self.hard_nms(box_scores, iou_threshold, top_k, candidate_size=candidate_size) - - def preprocessing(self,img_bgr): - img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) - img = cv2.resize(img, (320,320)) - # print("image shape: ",img.shape) - # print("before->",img[0][0][:]) - # img = img - self.mean - # print("end->",img[0][0][:]) - # img = img* 1.070312500000 - return img - - def postProcessinghelper(self, scores, boxes, original_image): - height,width,_=original_image.shape - # print("originam_image shaoe",original_image.shape) - prob_threshold = 0.4 - picked_box_probs = [] - picked_labels = [] - # print("scores shape: ",scores.shape) - for class_index in range(1, scores.shape[1]): - - probs = scores[:, class_index] - print("highest: ",np.max(probs)) - # print("probs",probs) - mask = probs > prob_threshold - probs = probs[mask] - - if probs.shape[0] == 0: - # print("Continue") - continue - subset_boxes = boxes[mask, :] - subset_boxes = torch.from_numpy(subset_boxes) - probs = torch.from_numpy(probs) - box_probs = torch.cat([subset_boxes, probs.reshape(-1, 1)], dim=1) - box_probs = self.nms(box_probs, None, - score_threshold=prob_threshold, - iou_threshold=0.2, - sigma=0.2, - top_k=-1, - candidate_size=200) - # print("box_prods is calculated") - picked_box_probs.extend([box_probs]) - picked_labels.extend([class_index] * box_probs.size(0)) - picked_box_probs = torch.cat(picked_box_probs) - picked_box_probs[:, 0] *= width - picked_box_probs[:, 1] *= height - picked_box_probs[:, 2] *= width - picked_box_probs[:, 3] *= height - label = class_name[picked_labels[0]] - - for i in range(0,len(picked_box_probs)): - x,y=int(picked_box_probs[i, 0].numpy()),int(picked_box_probs[i, 1].numpy()) - x_plus_w,y_plus_h=int(picked_box_probs[i, 2].numpy()),int(picked_box_probs[i, 3].numpy()) - # print("cords: ", x, "::", y,"::", x_plus_w, "::", y_plus_h) - # print(picked_box_probs) - # original_image = cv2.rectangle(original_image,(x, y), (x_plus_w,y_plus_h),colors[class_index],2) - # original_image=cv2.putText(original_image, label,(int(picked_box_probs[i, 0])+9, int(picked_box_probs[i, 1])-10),cv2.FONT_HERSHEY_SIMPLEX,1,(255, 40, 255),2) # line - draw_box(original_image,[x,y,x_plus_w,y_plus_h],label,picked_box_probs[i,4].tolist(),colors[class_index]) - - picked_box_probs = [] - picked_labels = [] - return original_image - - def postprocessing(self,img, inf_result): - # print("dona") - # scores = np.fromfile(result_path+'/942.raw', dtype="float32") - scores = inf_result[:67914].copy().reshape((3234,21)) - # boxes=np.fromfile(result_path+'/993.raw', dtype="float32") - boxes= inf_result[67914:].copy().reshape((3234,4)) - return self.postProcessinghelper(scores,boxes,cv2.cvtColor(np.array(img), cv2.COLOR_BGR2RGB)) - \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/ObjectDetection_blueprint.py b/ai-solutions/windows/electron-app-cv/python_flask_server/ObjectDetection_blueprint.py deleted file mode 100644 index 1a2fd635..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/ObjectDetection_blueprint.py +++ /dev/null @@ -1,224 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -from flask import Blueprint -from flask import request, jsonify, make_response, send_file, render_template -from flask_cors import cross_origin -from PIL import Image -import io -import os -import cv2 -import numpy as np -import zmq - -from utils import draw_box -from threading import Lock - -from datetime import datetime - -import MobileNetSSd -import YoloNas -import YoloX -import SSDLite - -import globalvar - -# from utils import pyinstaller_absolute_path - -runtime_name_decoder={'DSP':b"DSP",'GPU':b"GPU", 'CPU':b"CPU"} -# dlc_name_decoder={'yolonas':'Quant_yoloNas_s_320.dlc', 'ssdlite':'yolo_nas_s.dlc'} -dlc_name_decoder={'yolonas':'quant_yolo_nas_s.dlc', 'mobilenetssd':'ssd_mobilenetV2_without_ABP-NMS_Q.dlc', 'yolox':'yolox_x_212_Q.dlc'} - -objectDetect_bp = Blueprint("ObjectDetect", __name__) - -lockmut = Lock() - - - -@objectDetect_bp.route('/') -def index(): - return render_template('index.html') - - - -@objectDetect_bp.route('/od_checkdlc', methods=['POST']) -def checkdlc(): - from flask import jsonify - import os - model_name = request.form.get('model_name') - dlc_path = os.path.join("C:\Qualcomm\AIStack\AI_Solutions\DLC","objectdetect", dlc_name_decoder.get(model_name)) - - if(os.path.isfile(dlc_path)): - print("found") - output_new = { - "dlc_available": "yes", - "dlc_path" : dlc_path - } - else: - print("not found") - output_new = { - "dlc_available": "no", - "dlc_path" : dlc_path - } - return jsonify(output_new), 200 - -def buildnetwork_od(socket, model_name, run_time): - - print("BUILDING NETWORK ObjectDetect") - print("Model name: ",model_name) - first_str = b"networkbuild" - - dlc_path = bytes(os.path.join("C:\Qualcomm\AIStack\AI_Solutions\DLC","objectdetect", dlc_name_decoder.get(model_name)),'utf-8') - - # global flag_modelbuild_inprocess - - # if flag_modelbuild_inprocess==0: - # flag_modelbuild_inprocess = 1 - socket.send_multipart([first_str,dlc_path, runtime_name_decoder.get(run_time)]) - print("Messages sent for building network, waiting for reply") - message_build = socket.recv() - # flag_modelbuild_inprocess = 0 - print(message_build) - - - - -@objectDetect_bp.route('/od_stop_infer', methods=['POST']) -def stop_infer(): - print("***********************************************************************************************************************************************************************") - print("Setting Gobal var 'Stop Infer'") - print("***********************************************************************************************************************************************************************") - lockmut.acquire() - globalvar.stop_infer = 1 - lockmut.release() - return jsonify({'Stopped Server': 'Done'}), 200 - -@objectDetect_bp.route('/od_build_model', methods=['POST']) -def build_model(): - - try: - ## GETTING DATA FROM ELECTRON ## - model_name = request.form['model_name'] - runtime = request.form['runtime_name'] - - - # print(" flag_modelbuild_inprocess: ",globalvar.flag_modelbuild_inprocess) - # print("model_name: ",model_name) - # print("old_model_name: ",globalvar.old_model_name) - # print("runtime: ",runtime) - # print("old_runtime: ",globalvar.old_runtime) - - - if (model_name != globalvar.old_model_name or runtime != globalvar.old_runtime) and (globalvar. flag_modelbuild_inprocess==0): - # print("Building NEW Model") - globalvar.flag_modelbuild_inprocess = 1 - print("___________________BUILDINGNETWORK________________") - print("old_model_name: ", globalvar.old_model_name, "::model_name: ",model_name) - print("old_runtime: ", globalvar.old_runtime, "::runtime: ",runtime) - buildnetwork_od(globalvar.__sockets[0]["socket"], model_name, runtime) ##build network when there is some change other than image - globalvar.flag_modelbuild_inprocess = 0 - globalvar.old_model_name = model_name - globalvar.old_runtime = runtime - globalvar.stop_infer = 0 - print("___________________DONE___________________________") - return jsonify({'msg': 'model build successfully'}), 200 - - print("Model already built") - globalvar.stop_infer = 0 - return jsonify({'msg': 'model already build'}), 200 - except Exception as e: - print("<<<<<<<<<<<<<<>>>>>>>>>>>>>>") - print(str(e)) - return jsonify({'msg': str(e)}), 400 - -@objectDetect_bp.route('/object_detection', methods=['POST']) -def object_detection(): - - try: - # print("Value of globalvar.stop_infer: ",globalvar.stop_infer) - lockmut.acquire() - if globalvar.stop_infer == 1: - return jsonify({'error': 'busy'}), 400 - lockmut.release() - - image_data = request.files['image_data'] - - ## INFERENCING ON NETWORK ## - # print("INFERNCECING") - start = datetime.now() - - image_data = Image.open(image_data) - - image_np = np.array(image_data) - image_np = image_np.astype(np.float32) - - model_name = globalvar.old_model_name - - if model_name == 'yolonas': - od_model = YoloNas.YoloNAS() - elif model_name == 'ssdlite': - od_model = SSDLite.SSDLITE() - elif model_name == 'mobilenetssd': - od_model = MobileNetSSd.MobileNetSSD() - elif model_name == 'yolox': - od_model = YoloX.YoloX() - else: - print("FATAL ISSUE") - print('model_name: ',model_name) - od_model = YoloNas.YoloNAS() - - image_pre = od_model.preprocessing(image_np) - end = datetime.now() - print("preprocess Time: ", end-start) - - for inds, s in enumerate(globalvar.__sockets): - if s["lock"].acquire(blocking=False): - # socket.send_multipart([b"infer", image_np]) - # socket.send_multipart([b"infer", data.tobytes()]) - # message = socket.recv() - now = datetime.now() - # s["socket"].send_multipart([b"infer", data.tobytes()], zmq.NOBLOCK) - print("index of socket:", inds) - s["socket"].send_multipart([b"infer", image_pre.tobytes()], zmq.NOBLOCK) - print("DATA sent to snpe") - # if globalvar.stop_infer == 1: - # s["lock"].release() - # return jsonify({'error': 'busy'}), 400 - # print("data sent") - message = s["socket"].recv() - print("data received from snpe") - end = datetime.now() - print("infer Time: ", end-now) - # print("data received") - s["lock"].release() - - print("lock release") - inf_result = np.frombuffer(message, dtype=np.float32) - # print("inf_result.shape:: ",inf_result.shape) - # print("First Value of vector in python: ",inf_result[:5]) - # print("Last 5 Value of vector in python: ",inf_result[-5:]) - now = datetime.now() - # print("resuly") - # print("RSULT shape:",inf_result.shape) - - img = od_model.postprocessing(image_data, inf_result) - # print("postprocess done") - _, frame_encoded = cv2.imencode(".jpeg", img) - - output_buffer = io.BytesIO(frame_encoded.tobytes()) - end = datetime.now() - print("postprocess time: ",end-now) - return send_file(output_buffer, mimetype='image/jpeg') - - return jsonify({'error': 'busy'}), 400 - - except Exception as e: - print("<<<<<<<<<<<<<<>>>>>>>>>>>>>>") - print(str(e)) - return jsonify({'error': str(e)}), 400 \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/SSDLite.py b/ai-solutions/windows/electron-app-cv/python_flask_server/SSDLite.py deleted file mode 100644 index fe933bc3..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/SSDLite.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -class SSDLITE: - def __init__(self): - self.mean = [127,127,127] - self.stddev = 128 - self.class_name = ["BACKGROUND", - "aeroplane", - "bicycle", - "bird", - "boat", - "bottle", - "bus", - "car", - "cat", - "chair", - "cow", - "diningtable", - "dog", - "horse", - "motorbike", - "person", - "pottedplant", - "sheep", - "sofa", - "train", - "tvmonitor"] - self.label2class={str(i):x for i,x in enumerate(self.class_name)} - self.colors = np.random.uniform(0, 255, size=(len(list(self.label2class.values())), 3)) - - def preprocessing(self,img): - img = cv2.resize(img, (300,300)) - img = img - self.mean - img = img/self.stddev - return img - - def postProcessinghelper(self, scores,boxes,original_image_path): - height,width,_=original_image.shape - prob_threshold = 0.2 - # this version of nms is slower on GPU, so we move data to CPU. - picked_box_probs = [] - picked_labels = [] - for class_index in range(1, scores.shape[1]): - - probs = scores[:, class_index] - - mask = probs > prob_threshold - probs = probs[mask] - - if probs.shape[0] == 0: - continue - subset_boxes = boxes[mask, :] - subset_boxes = torch.from_numpy(subset_boxes) - probs = torch.from_numpy(probs) - box_probs = torch.cat([subset_boxes, probs.reshape(-1, 1)], dim=1) - box_probs = nms(box_probs, None, - score_threshold=prob_threshold, - iou_threshold=0.2, - sigma=0.2, - top_k=-1, - candidate_size=200) - picked_box_probs.extend([box_probs]) - picked_labels.extend([class_index] * box_probs.size(0)) - picked_box_probs = torch.cat(picked_box_probs) - picked_box_probs[:, 0] *= width - picked_box_probs[:, 1] *= height - picked_box_probs[:, 2] *= width - picked_box_probs[:, 3] *= height - label = class_name[picked_labels[0]] - - for i in range(0,len(picked_box_probs)): - x,y=int(picked_box_probs[i, 0].numpy()),int(picked_box_probs[i, 1].numpy()) - x_plus_w,y_plus_h=int(picked_box_probs[i, 2].numpy()),int(picked_box_probs[i, 3].numpy()) - original_image = cv2.rectangle(original_image,(x, y), (x_plus_w,y_plus_h),colors[class_index],2) - original_image=cv2.putText(original_image, label,(int(picked_box_probs[i, 0])+9, int(picked_box_probs[i, 1])+20),cv2.FONT_HERSHEY_SIMPLEX,1,(255, 40, 255),2) # line type - picked_box_probs = [] - picked_labels = [] - return original_image - - def postprocessing(self,img, inf_result): - print("dona") - # scores = np.fromfile(result_path+'/942.raw', dtype="float32") - scores = inf_result[:12000].copy().reshape((3000,4)) - # boxes=np.fromfile(result_path+'/993.raw', dtype="float32") - boxes= inf_result[12000:].copy().reshape((3000,21)) - return postProcessinghelper(scores,boxes,img) - \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/SuperResolution_blueprint.py b/ai-solutions/windows/electron-app-cv/python_flask_server/SuperResolution_blueprint.py deleted file mode 100644 index 4ed5794c..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/SuperResolution_blueprint.py +++ /dev/null @@ -1,247 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -from flask import Blueprint -from flask import request, jsonify, make_response, send_file, render_template -from PIL import Image -from empatches import EMPatches -import io, os -import cv2 -import numpy as np -import zmq - -from datetime import datetime -import time - -import globalvar -# from utils import pyinstaller_absolute_path - -time_taken_model = "" -upscaled_img_dims = "" - -superRes_bp = Blueprint("SuperRes",__name__) - -runtime_name_decoder={'DSP':b"DSP",'GPU':b"GPU", 'CPU':b"CPU"} -# dlc_name_decoder={'ESRGAN':'quant_ESRGAN_128_512_8350_214.dlc', 'SRGAN':'quant_SRGAN_128_512_8350_214.dlc', 'SESR':'quant_SESR_128_512_8350_214.dlc','QuickSR_large':'quant_quickSRnet_large_128_512_8350_214.dlc','QuickSR_medium':'quant_quickSRnet_medium_128_512_8350_214.dlc','QuickSR_small':'quant_quickSRnet_small_128_512_8350_214.dlc','XLSR':'quant_XLSR_128_512_8350_214.dlc'} -dlc_name_decoder={'ESRGAN':'quant_ESRGAN_128_4_8350.dlc', 'SRGAN':'quant_SRGAN_128_512_8350.dlc', 'SESR':'quant_SESR_128_512_8350.dlc','QuickSR_large':'quant_quickSRnet_large_128_512_8350.dlc','QuickSR_medium':'quant_quickSRnet_medium_128_512_8350.dlc','QuickSR_small':'quant_quickSRnet_small_128_512_8350.dlc','XLSR':'quant_XLSR_128_512_8350.dlc'} - -@superRes_bp.route('/sr_checkdlc', methods=['POST']) -def checkdlc(): - print("checkdlc: ") - from flask import jsonify - import os - model_name = request.form.get('model_name') - - print("MODEL NAME IN CHECKDLC: ", model_name) - dlc_path = os.path.join("C:\Qualcomm\AIStack\AI_Solutions\DLC","superresolution", dlc_name_decoder.get(model_name)) - if(os.path.isfile(dlc_path)): - print("found") - output_new = { - "dlc_available": "yes", - "dlc_path" : dlc_path - } - else: - print("not found") - output_new = { - "dlc_available": "no", - "dlc_path" : dlc_path - } - return jsonify(output_new), 200 -def buildnetwork(socket, model_name, run_time): - - print("BUILDING NETWORK") - first_str = b"networkbuild" - - - dlc_path = bytes(os.path.join("C:\Qualcomm\AIStack\AI_Solutions\DLC","superresolution", dlc_name_decoder.get(model_name)),'utf-8') - - socket.send_multipart([first_str,dlc_path, runtime_name_decoder.get(run_time)]) - - print("Messages sent for building network, waiting for reply") - message_build = socket.recv() - print(message_build) - -def upscale_patch(socket, patch, model_name, run_time, scaling_factor=4 ): - - try: - print("MODEL::::::::::::::::::::::") - runtime_name_decoder={'DSP':"--use_dsp",'GPU':"--use_gpu", 'CPU':""} - # dlc_name_decoder={'ESRGAN':'quant_ESRGAN_128_4_8350.dlc', 'SRGAN':'quant_SRGAN_128_512_8350.dlc', 'SESR':'quant_SESR_128_512_8350.dlc','QuickSR_large':'quant_quickSRnet_large_128_512_8350.dlc','QuickSR_medium':'quant_quickSRnet_medium_128_512_8350.dlc','QuickSR_small':'quant_quickSRnet_small_128_512_8350.dlc','XLSR':'quant_XLSR_128_512_8350.dlc'} - # dlc_path = os.path.join("sr_dlc", dlc_name_decoder.get(model_name)) - - ## PREPROC ## - start = time.time() - if model_name=='ESRGAN': - # do nothing # - print("no preproc needed---Only resize") - - else: - patch = patch/255 - end = time.time() - print("preprocess Time: ", end-start) - - img = np.array(patch) - img = img.astype(np.float32) - img = img.tobytes() - - socket.send_multipart([b"infer",img]) - - print("Messages Image sent, waiting for reply") - message_img_out = socket.recv() - - prediction = np.frombuffer(message_img_out, dtype=np.float32) - #print("Message received from server :: Shape: ", prediction.shape," data: ", prediction) - - print("inf_result.shape:: ",prediction.shape) - print("First Value of vector in python: ",prediction[0]) - print("Last 5 Value of vector in python: ",prediction[prediction.shape[0]-5:]) - - socket.send(b"get_infer_time") - message_infer_time = socket.recv() - print("message_infer_time", message_infer_time.decode('UTF-8')) - elapsed_time = 0.0 - elapsed_time = float(message_infer_time.decode('UTF-8'))/1000 - - start = time.time() - prediction = prediction.reshape(512,512,3) - - ## POSTPROC ## - if model_name=='ESRGAN': - # do nothing # - print("no postproc needed for ESRGAN") - else: - # for all other models, post proc is same # - prediction = prediction*255 - - upscaled_patch = np.clip(prediction, 0, 255).astype(np.uint8) - end = time.time() - print("postprocess Time: ", end-start) - - except Exception as e: - print("Exception",str(e)) - return - - return upscaled_patch, elapsed_time - -# Serve INDEX HTML file -@superRes_bp.route('/') -def index(): - return render_template('index.html') - -# Endpoint for super resolution -@superRes_bp.route('/timer_string', methods=['POST']) -def timer_string(): - output_new = { - "infertime": time_taken_model, - "outputdims": upscaled_img_dims, - } - return jsonify(output_new), 200 - -# Endpoint for super resolution -@superRes_bp.route('/super_resolution', methods=['POST']) -def super_resolution(): - try: - - ## GETTING DATA FROM ELECTRON ## - print("Fetching image data from the POST request") - image_data = request.files['imageData'] - - model_name = request.form['model_name'] - print("MODEL NAME:",model_name) - - runtime = request.form['runtime'] - print("RUN TIME:",runtime) - - print("load as PIL IMG") - image_data = Image.open(image_data) - #image_data.save("input_img.png") - width, height = image_data.size - print(f"Received img height = {height} ; width = {width}") - - - ## MAKING CONNECTION WITH SNPE EXE ## - context = zmq.Context() - # Create a REQ (request) socket - socket = context.socket(zmq.REQ) - server_address = "tcp://localhost:5555" # Replace with your server's address - socket.connect(server_address) - - - ## BUILDING NETWORK ## - - if model_name != globalvar.old_model_name or runtime != globalvar.old_runtime: - print("___________________BUILDINGNETWORK________________") - print("old_model_name: ", globalvar.old_model_name, "::model_name: ",model_name) - print("old_runtime: ", globalvar.old_runtime, "::runtime: ",runtime) - buildnetwork(socket, model_name, runtime) ##build network when there is some change other than image - globalvar.old_model_name = model_name - globalvar.old_runtime = runtime - - - ## INFERENCING ON NETWORK ## - - - # Step 0: Set upscaling params - patch_size = 128 - overlap_factor = 0.1 - scaling_factor= 4 - - - # Step 1: Read Image and Extract 128x128 patches from the image - image_np = np.array(image_data) - - # Dividing image into small patches - emp = EMPatches() - img_patches, indices = emp.extract_patches(image_np, patchsize=patch_size, overlap=overlap_factor) - print(f"Num of patches of 128 = {len(img_patches)}") - - - # Step 2: Upscale each patch by a factor of 4 - upscaled_patches= [] - infer_time_list = [] - time_taken = 0 - for patch in img_patches: - pt, single_infer_time = upscale_patch(socket, patch, model_name, runtime) - upscaled_patches.append(pt) - time_taken = time_taken + single_infer_time ##Adding time for all patches - - print("Received upscaled patches") - - global time_taken_model - global upscaled_img_dims - time_taken_model = str(f'{time_taken*1000:.2f}')+" ms" - - - - # Step 3: Stitch back the upscaled patches into a single image - - # Calculate the upscaled stiching indices - up_img = np.zeros((image_np.shape[0]*scaling_factor, image_np.shape[1]*scaling_factor, image_np.shape[2]), np.uint8) - _, new_indices = emp.extract_patches(up_img, patchsize=patch_size*scaling_factor, overlap=overlap_factor) - - # merge with new_indices - merged_img = emp.merge_patches(upscaled_patches, new_indices, mode='min') - upscaled_img_dims = str(merged_img.shape[1]) + " x " +str(merged_img.shape[0]); - - merged_img = Image.fromarray(np.uint8(merged_img)) - # merged_img.save("upscaled_model.png") - - # Convert the upscaled image to a binary response - output_buffer = io.BytesIO() - - merged_img.save(output_buffer, format='PNG') - - print("Sending upscaled image as output to electron ...") - output_buffer.seek(0) - return send_file(output_buffer, mimetype='image/png') - - except Exception as e: - print("#############EXCEPTION####################") - print(str(e)) - return jsonify({'error': str(e)}), 400 diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/YoloNas.py b/ai-solutions/windows/electron-app-cv/python_flask_server/YoloNas.py deleted file mode 100644 index 0a7d79a4..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/YoloNas.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -import cv2 -import numpy as np - -from utils import draw_box - -label2class = {'0': 'person', '1': 'bicycle', '2': 'car', '3': 'motorcycle', '4': 'airplane', '5': 'bus', - '6': 'train', '7': 'truck', '8': 'boat', '9': 'traffic', '10': 'fire', '11': 'stop', '12': 'parking', - '13': 'bench', '14': 'bird', '15': 'cat', '16': 'dog', '17': 'horse', '18': 'sheep', '19': 'cow', - '20': 'elephant', '21': 'bear', '22': 'zebra', '23': 'giraffe', '24': 'backpack', '25': 'umbrella', - '26': 'handbag', '27': 'tie', '28': 'suitcase', '29': 'frisbee', '30': 'skis', '31': 'snowboard', - '32': 'sports', '33': 'kite', '34': 'baseball', '35': 'baseball', '36': 'skateboard', '37': 'surfboard', - '38': 'tennis', '39': 'bottle', '40': 'wine', '41': 'cup', '42': 'fork', '43': 'knife', '44': 'spoon', - '45': 'bowl', '46': 'banana', '47': 'apple', '48': 'sandwich', '49': 'orange', '50': 'broccoli', - '51': 'carrot', '52': 'hot', '53': 'pizza', '54': 'donut', '55': 'cake', '56': 'chair', '57': 'couch', - '58': 'potted', '59': 'bed', '60': 'dining', '61': 'toilet', '62': 'tv', '63': 'laptop', '64': 'mouse', - '65': 'remote', '66': 'keyboard', '67': 'cell', '68': 'microwave', '69': 'oven', '70': 'toaster', - '71': 'sink', '72': 'refrigerator', '73': 'book', '74': 'clock', '75': 'vase', '76': 'scissors', - '77': 'teddy', '78': 'hair', '79': 'toothbrush'} - -colors = np.random.uniform(0, 255, size=(len(list(label2class.values())), 3)) - -class YoloNAS: - - def preprocessing(self,img): - img = cv2.resize(img, (320,320)) - img = img/255 - return img - - def postprocessing(self, image_data, inf_result): - # print("step-2") - print(inf_result) - raw_scores = inf_result[:168000].copy() #.reshape(YOLONAS_MODEL_CLASSES_OUTPUT_SIZE) - output_909_reshape = raw_scores.reshape(2100,80) - # print("step-1") - output = output_909_reshape - raw_boxes = inf_result[168000:].copy() #.reshape(YOLONAS_MODEL_BOXES_OUTPUT_SIZE) - output_917_reshape = raw_boxes.reshape(2100,4) - - # print("step0") - boxes = [] - scores = [] - class_ids = [] - original_image = np.array(image_data, np.float32) - ratio_1 = original_image.shape[0]/320 - ratio_2 = original_image.shape[1]/320 - - # print("step 1") - for i in range(0, output.shape[0]): - classes_scores = output[i] - (minScore, maxScore, minClassLoc, (x, maxClassIndex)) = cv2.minMaxLoc(classes_scores) - if maxScore >= 0.05: - x = round(output_917_reshape[i][0]) ; y = round(output_917_reshape[i][1]); - w = round(output_917_reshape[i][2]) ; h = round(output_917_reshape[i][3]); - - x1, y1 = x, y - x2, y2 = w, h - box = [x1, y1, x2, y2] - boxes.append(box) - scores.append(float(maxScore)) - class_ids.append(maxClassIndex) - - # print("step2") - result_boxes = cv2.dnn.NMSBoxes(boxes, scores, 0.40, 0.5, 0.5) #32b CPU - - # print("result_boxes :: ",result_boxes) - detections = [] - img = np.array(image_data) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - - for i in range(len(result_boxes)): - index = result_boxes[i] - box = boxes[index] - - print("-----------") - detection = { - 'class_id': class_ids[index], - 'class_name': label2class[str(class_ids[index])], - 'confidence': scores[index], - 'box': box - } - detections.append(detection) - img = self.draw_bounding_box(img, class_ids[index], scores[index], int(box[0]*ratio_2), int(box[1]*ratio_1), int(box[2]*ratio_2), int(box[3]*ratio_1)) - - return img - - def draw_bounding_box(self, img, class_id, confidence, x, y, x_plus_w, y_plus_h): - label = f'{label2class[str(class_id)]} ({confidence:.2f})' - color = colors[class_id] - draw_box(img,[x,y,x_plus_w,y_plus_h],label,confidence,color) - return img - diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/YoloX.py b/ai-solutions/windows/electron-app-cv/python_flask_server/YoloX.py deleted file mode 100644 index e8c9fcdb..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/YoloX.py +++ /dev/null @@ -1,151 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -from PIL import Image -import io -import os -import cv2 -import numpy as np -import time -import zmq -import sys -from utils import draw_box - -label2class = {'0': 'person', '1': 'bicycle', '2': 'car', '3': 'motorcycle', '4': 'airplane', '5': 'bus', - '6': 'train', '7': 'truck', '8': 'boat', '9': 'traffic', '10': 'fire', '11': 'stop', '12': 'parking', - '13': 'bench', '14': 'bird', '15': 'cat', '16': 'dog', '17': 'horse', '18': 'sheep', '19': 'cow', - '20': 'elephant', '21': 'bear', '22': 'zebra', '23': 'giraffe', '24': 'backpack', '25': 'umbrella', - '26': 'handbag', '27': 'tie', '28': 'suitcase', '29': 'frisbee', '30': 'skis', '31': 'snowboard', - '32': 'sports', '33': 'kite', '34': 'baseball', '35': 'baseball', '36': 'skateboard', '37': 'surfboard', - '38': 'tennis', '39': 'bottle', '40': 'wine', '41': 'cup', '42': 'fork', '43': 'knife', '44': 'spoon', - '45': 'bowl', '46': 'banana', '47': 'apple', '48': 'sandwich', '49': 'orange', '50': 'broccoli', - '51': 'carrot', '52': 'hot', '53': 'pizza', '54': 'donut', '55': 'cake', '56': 'chair', '57': 'couch', - '58': 'potted', '59': 'bed', '60': 'dining', '61': 'toilet', '62': 'tv', '63': 'laptop', '64': 'mouse', - '65': 'remote', '66': 'keyboard', '67': 'cell', '68': 'microwave', '69': 'oven', '70': 'toaster', - '71': 'sink', '72': 'refrigerator', '73': 'book', '74': 'clock', '75': 'vase', '76': 'scissors', - '77': 'teddy', '78': 'hair', '79': 'toothbrush'} -colors = np.random.uniform(0, 255, size=(len(list(label2class.values())), 3)) - -class YoloX: - def __init__(self): - self.input_shape = tuple(map(int,[640,640])) - - - def preproc_helper(self, img, input_size, swap=(2, 0, 1)): - if len(img.shape) == 3: - padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114 - else: - padded_img = np.ones(input_size, dtype=np.uint8) * 114 - - r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1]) - resized_img = cv2.resize( - img, - (int(img.shape[1] * r), int(img.shape[0] * r)), - interpolation=cv2.INTER_LINEAR, - ).astype(np.uint8) - padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img - - padded_img = padded_img.transpose(swap) - padded_img = np.ascontiguousarray(padded_img, dtype=np.float32) - return padded_img, r - - def preprocessing(self,img): - img = np.array(img, np.float32) - img, self.ratio = self.preproc_helper(img, self.input_shape) - img = np.transpose(img,(1,2,0)) - - return img - - def draw_bounding_box(self,img, class_id, confidence, x, y, x_plus_w, y_plus_h): - label = f'{label2class[str(class_id)]}' - color = colors[class_id] - draw_box(img,[x,y,x_plus_w,y_plus_h],label,confidence,color) - # img = cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2) - # img = cv2.putText(img, label, (x +2, y -10), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color, 2) - # print("Label of oject:", label) - # print("confidence", confidence) - return img - - def demo_postprocess(self, outputs, img_size, p6=False): - grids = [] - expanded_strides = [] - strides = [8, 16, 32] if not p6 else [8, 16, 32, 64] - - hsizes = [img_size[0] // stride for stride in strides] - wsizes = [img_size[1] // stride for stride in strides] - - for hsize, wsize, stride in zip(hsizes, wsizes, strides): - xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize)) - grid = np.stack((xv, yv), 2).reshape(1, -1, 2) - grids.append(grid) - shape = grid.shape[:2] - expanded_strides.append(np.full((*shape, 1), stride)) - - grids = np.concatenate(grids, 1) - expanded_strides = np.concatenate(expanded_strides, 1) - newoutputs = np.copy(outputs) - newoutputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides - finaloutputs = np.copy(newoutputs) - finaloutputs[..., 2:4] = np.exp(newoutputs[..., 2:4]) * expanded_strides - - return finaloutputs - - def postprocessing(self, image_data, inf_result): - - inf_result = inf_result.reshape((1,8400, 85)) - output = self.demo_postprocess(inf_result,self.input_shape)[0] - - #Initializing the lists - boxes_updated = [] - scores_updated = [] - class_ids = [] - - # Preprocessing the boxes and scores - #format of output is first 4 is the bounding boxes, 5th one is objectness score, last 80 column is score of each classes - boxes = output[:, :4] - scores = output[:, 4:5] * output[:, 5:] - - #Processing of bounding boxes - boxes_xyxy = np.ones_like(boxes) - boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. - boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. - boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. - boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. - boxes_xyxy /= self.ratio - - #For each prediction from 8400 predictions finding the results - for i in range(0, output.shape[0]): - (minScore, maxScore, minClassLoc, (x, maxClassIndex)) = cv2.minMaxLoc(scores[i]) - if maxScore >= 0.2: - boxes_updated.append(boxes_xyxy[i]) - # print("boxes_xyxy",boxes_xyxy[i][:]) - scores_updated.append(float(maxScore)) - class_ids.append(maxClassIndex) - - # Removing Overlapping predictions - result_boxes = cv2.dnn.NMSBoxes(boxes_updated, scores_updated, 0.40, 0.5, 0.5) #32b CPU - # detections = [] - img = np.array(image_data) ##int8 - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - - #For each prediction showing drawing the bounding boxes - for i in range(len(result_boxes)): - index = result_boxes[i] - box = boxes_updated[index] - - detection = { - 'class_id': class_ids[index], - 'class_name': label2class[str(class_ids[index])], - 'confidence': scores_updated[index], - 'box': box - } - # detections.append(detection) - img = self.draw_bounding_box(img, class_ids[index],detection['confidence'], int(box[0]), int(box[1]), int(box[2]), int(box[3])) - - return img diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/globalvar.py b/ai-solutions/windows/electron-app-cv/python_flask_server/globalvar.py deleted file mode 100644 index 43a820b4..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/globalvar.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -old_runtime = "" -old_model_name = "" -flag_modelbuild_inprocess = 0 -stop_infer = 0 -__sockets = [] \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/server.py b/ai-solutions/windows/electron-app-cv/python_flask_server/server.py deleted file mode 100644 index a0c07281..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/server.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -from flask import Flask -from flask_cors import CORS - -from ImageEnhancement_blueprint import imageEnhance_bp -from SuperResolution_blueprint import superRes_bp -from ObjectDetection_blueprint import objectDetect_bp -from ImageSegmentation_blueprint import imageSegment_bp -from utils import objectDetect_init - -from waitress import serve - - -app = Flask(__name__, - static_url_path='', - static_folder='static') -CORS(app) - -app.register_blueprint(objectDetect_bp) -app.register_blueprint(imageEnhance_bp) -app.register_blueprint(superRes_bp) -app.register_blueprint(imageSegment_bp) - - -if __name__ == '__main__': - objectDetect_init() - - ## Debug/developer Mode - # app.run(host='0.0.0.0', port=9081, debug=True, threaded=True) - - ##Production server - serve(app, host='0.0.0.0', port=9081, threads=4) - diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/static/assets/test_face_lite.jpg b/ai-solutions/windows/electron-app-cv/python_flask_server/static/assets/test_face_lite.jpg deleted file mode 100644 index f0ef1300..00000000 Binary files a/ai-solutions/windows/electron-app-cv/python_flask_server/static/assets/test_face_lite.jpg and /dev/null differ diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/static/assets/test_face_lr.jpg b/ai-solutions/windows/electron-app-cv/python_flask_server/static/assets/test_face_lr.jpg deleted file mode 100644 index f0ef1300..00000000 Binary files a/ai-solutions/windows/electron-app-cv/python_flask_server/static/assets/test_face_lr.jpg and /dev/null differ diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/templates/index.html b/ai-solutions/windows/electron-app-cv/python_flask_server/templates/index.html deleted file mode 100644 index b0ff9c08..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/templates/index.html +++ /dev/null @@ -1,283 +0,0 @@ - - - - - Slider Example - - - - -
-
-
- - - - -
- - - - - - \ No newline at end of file diff --git a/ai-solutions/windows/electron-app-cv/python_flask_server/utils.py b/ai-solutions/windows/electron-app-cv/python_flask_server/utils.py deleted file mode 100644 index 69a24012..00000000 --- a/ai-solutions/windows/electron-app-cv/python_flask_server/utils.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -# https://github.com/Hyuto/yolo-nas-onnx -import cv2 -import numpy as np -import globalvar -import zmq -from threading import Lock -import sys -import os - -def draw_box(source, box, label, score, color, alpha=0.25): - """Draw boxes on images""" - - # print("::::source:::",source.shape) - # fill box - crop_box = source[ - box[1]: (box[1] + box[3]), box[0]: (box[0] + box[2]) - ] # crop box from source - color_box = np.ones([*crop_box.shape[:2], 1], dtype=np.uint8) * np.asarray( - color, dtype=np.uint8 - ) # color box (same size with crop). [h, w, 1] * [c] => [h, w, c] - cv2.addWeighted( - crop_box, 1 - alpha, color_box, alpha, 1.0, crop_box - ) # weighted from color box to source - - cv2.rectangle(source, box, color, 2) # draw box - - # measuring text - size = min(source.shape[:2]) * 0.0007 - thickness = int(min(source.shape[:2]) * 0.001) - (label_width, label_height), _ = cv2.getTextSize( - f"{label} - {round(score, 2)}%", - cv2.FONT_HERSHEY_SIMPLEX, - size, - thickness, - ) - # draw labels (filled rect with text inside) - cv2.rectangle( - source, - (box[0] - 1, box[1] - int(label_height * 2)), - (box[0] + int(label_width * 1.1), box[1]), - color, - cv2.FILLED, - ) - cv2.putText( - source, - f"{label} - {round(score, 2)}%", - (box[0], box[1] - int(label_height * 0.7)), - cv2.FONT_HERSHEY_SIMPLEX, - size, - [255, 255, 255], - thickness, - cv2.LINE_AA, - ) - -def objectDetect_init(): - for _ in range(4): - socket = zmq.Context().socket(zmq.REQ) - socket.connect("tcp://localhost:5555") - socket.setsockopt(zmq.RCVTIMEO, 30000) - globalvar.__sockets.append({ - "socket": socket, - "lock": Lock(), - }) - -def pyinstaller_absolute_path(relative_path): - """ For PyInstaller, getting absolute path of resources""" - base_path = getattr( sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__))) - abs_path = os.path.join(base_path, relative_path) - return abs_path \ No newline at end of file diff --git a/models-accuracy/readme.md b/models-accuracy/readme.md deleted file mode 100644 index 0b54a467..00000000 --- a/models-accuracy/readme.md +++ /dev/null @@ -1 +0,0 @@ -# To be released soon. diff --git a/models-for-solutions/01-super-resolution/SESR/README.md b/models-for-solutions/01-super-resolution/SESR/README.md deleted file mode 100644 index 67d3ae40..00000000 --- a/models-for-solutions/01-super-resolution/SESR/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Image Super Resolution - SESR - -| Field | Description | -| --- | --- | -| Model Name | SESR | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/quic/aimet-model-zoo/#pytorch-model-zoo | -| Paper | https://arxiv.org/abs/2103.09404 | -| Accuracy Metric | PSNR | -| Input Resolution | 128 x 128 | -| Output Resolution | 512 x 512 | -| Pre-Processing | cv2.resize, cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.clip | -| Post-Processing | np.reshape, np.clip, transpose | - - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - -## Pre-Trained Model - -Please refer to notebook for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/01-super-resolution/SESR/SESR.patch b/models-for-solutions/01-super-resolution/SESR/SESR.patch deleted file mode 100644 index 510ff1fb..00000000 --- a/models-for-solutions/01-super-resolution/SESR/SESR.patch +++ /dev/null @@ -1,59 +0,0 @@ -diff --git a/aimet_zoo_torch/sesr/model/model_definition.py b/aimet_zoo_torch/sesr/model/model_definition.py -index b5ce935..e18c228 100644 ---- a/aimet_zoo_torch/sesr/model/model_definition.py -+++ b/aimet_zoo_torch/sesr/model/model_definition.py -@@ -15,9 +15,8 @@ - import json - import os - import torch --from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim --from aimet_zoo_torch.common.downloader import Downloader --from aimet_zoo_torch.common.super_resolution.models import SESRRelease -+from utils.super_resolution.downloader import Downloader -+from utils.super_resolution.models import SESRRelease - - - class SESR(SESRRelease, Downloader): -@@ -92,40 +91,7 @@ class SESR(SESRRelease, Downloader): - self.load_state_dict(state_dict) - self.cuda() - else: -- state_dict = torch.load(self.path_pre_opt_weights)["state_dict"] -+ state_dict = torch.load(self.path_pre_opt_weights, map_location = torch.device('cpu'))["state_dict"] - self.load_state_dict(state_dict) -- self.cuda() -+ #self.cuda() - self.eval() -- -- def get_quantsim(self, quantized=False): -- """get quantsim object with pre-loaded encodings""" -- if not self.cfg: -- raise NotImplementedError( -- "There is no Quantization Simulation available for the model_config passed" -- ) -- if quantized: -- self.from_pretrained(quantized=True) -- else: -- self.from_pretrained(quantized=False) -- device = torch.device("cuda") -- dummy_input = torch.rand(self.input_shape, device=device) -- kwargs = { -- "quant_scheme": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["quant_scheme"], -- "default_param_bw": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["param_bw"], -- "default_output_bw": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["output_bw"], -- "config_file": self.path_aimet_config, -- "dummy_input": dummy_input, -- } -- sim = QuantizationSimModel(self, **kwargs) -- if self.path_aimet_encodings and quantized: -- load_encodings_to_sim(sim, self.path_aimet_encodings) -- if self.path_adaround_encodings and quantized: -- sim.set_and_freeze_param_encodings(self.path_adaround_encodings) -- sim.model.eval() -- return sim diff --git a/models-for-solutions/01-super-resolution/SESR/sesr.ipynb b/models-for-solutions/01-super-resolution/SESR/sesr.ipynb deleted file mode 100644 index d77bd473..00000000 --- a/models-for-solutions/01-super-resolution/SESR/sesr.ipynb +++ /dev/null @@ -1,763 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "12380c63", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "855257d0", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/snpe/2.16.0.231029/\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/sesr_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/sesr_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"sesr\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c9ec6316-8d97-45a2-be1b-8896b204f808", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "import glob\n", - "import os\n", - "import numpy as np\n", - "import torch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0d8f2e7c-4033-47a2-9a68-e51180a5b6af", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('utils', exist_ok= True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2a65f5f5-181b-48f0-9bc9-95a47a212316", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/quic/aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d8e23cb1-451b-4084-9e8a-041b24deb031", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r SESR.patch aimet-model-zoo\n", - "cd aimet-model-zoo\n", - "git apply SESR.patch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "621b9a25-9c70-4dcd-9d01-4c10de2505a1", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/common/super_resolution/ utils/\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/common/downloader.py utils/super_resolution/\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/sesr/model/ utils/\n", - "rm -rf aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "01b7c905-b52f-4c36-ae0a-4369469698ca", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cd utils\n", - "touch __init__.py" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ab280bf2-28c0-4dd6-a848-6959ee8dc192", - "metadata": {}, - "outputs": [], - "source": [ - "from utils.model.model_definition import SESR\n", - "from utils.super_resolution.imresize import imresize" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2b55eb27-0ffb-488c-b3ca-a42dc4df9fcf", - "metadata": {}, - "outputs": [], - "source": [ - "model_fp32 = SESR(\"sesr_m11_4x_w8a8\",scaling_factor=4)\n", - "model_fp32.from_pretrained(quantized=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29e440d2-2496-4fb3-a5d9-56638d54a108", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models', exist_ok= True)\n", - "os.makedirs('output', exist_ok= True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "675c3487-d8ce-487e-a48f-b3ccb7939e41", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "dummy_input = torch.randn(1,3, 128, 128).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(model_fp32, dummy_input, \"./models/sesr.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b07e763d-d689-42be-bdd8-4763679a09a7", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/sesr.onnx --output_path models/sesr_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "6c1a3a1d", - "metadata": {}, - "source": [ - "# Download dataset\n", - "
    \n", - "
  • Dataset link wget https://figshare.com/ndownloader/files/38256855
  • \n", - "
  • Below block will automatically download datsest, but in case if it fails please download from above link.
  • \n", - "
  • Recommended, to comment below code, if already downloaded dataset once.
  • \n", - "
      " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7940d5f2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "wget https://figshare.com/ndownloader/files/38256855\n", - "unzip 38256855 -d input\n", - "rm -rf 38256855\n", - "rm -rf input/Set14/image_SRF_4\n", - "rm -rf input/Set14/image_SRF_3\n", - "mkdir input/raw\n", - "find input/Set14/image_SRF_2 -name '*_LR*' -delete\n", - "mv input/Set14/image_SRF_2/* input/Set14/\n", - "rm -rf input/Set14/image_SRF_2/" - ] - }, - { - "cell_type": "markdown", - "id": "3762f717", - "metadata": {}, - "source": [ - "# Pre-processing data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "137cbe80-cf28-4a4d-b3f0-6d36fbfedfef", - "metadata": {}, - "outputs": [], - "source": [ - "img_paths = glob.glob(os.path.join(\"input/Set14/\", '*'))\n", - "img_paths = sorted(img_paths)\n", - "img_paths" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bd33905d-6988-47cc-87b4-04a3f3290855", - "metadata": {}, - "outputs": [], - "source": [ - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "def preprocess(img, scaling_factor=2):\n", - " lr_img, hr_img = create_hr_lr_pair(img, scaling_factor)\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7db661f3-8ea6-493c-b99e-2588462fed8c", - "metadata": {}, - "outputs": [], - "source": [ - "def create_hr_lr_pair(img, scaling_factor=2):\n", - " height, width = img.shape[0:2]\n", - "\n", - " # Take the largest possible center-crop of it such that its dimensions are perfectly divisible by the scaling factor\n", - " x_remainder = width % (scaling_factor)\n", - " y_remainder = height % (scaling_factor)\n", - " left = x_remainder // 2\n", - " top = y_remainder // 2\n", - " right = left + (width - x_remainder)\n", - " bottom = top + (height - y_remainder)\n", - " hr_img = img[top:bottom, left:right]\n", - "\n", - " hr_height, hr_width = hr_img.shape[0:2]\n", - "\n", - " hr_img = np.array(hr_img, dtype='float32')\n", - " lr_img = imresize(hr_img, 1. / scaling_factor) # equivalent to matlab's imresize\n", - " flag=0\n", - " lr_img = np.uint8(np.clip(lr_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hr_img = np.uint8(hr_img)\n", - " lr_height, lr_width = lr_img.shape[0:2]\n", - "\n", - " # Sanity check\n", - " assert hr_width == lr_width * scaling_factor and hr_height == lr_height * scaling_factor\n", - " lr_img = convert_image(lr_img, source='array', target='[0, 1]')\n", - " hr_img = convert_image(hr_img, source='array', target='[0, 1]')\n", - "\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "802bb5cb-7aad-40ef-a17e-15b15f86d773", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255)\n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " \n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " elif target == 'y-channel':\n", - " img = torch.matmul(img.permute(0, 2, 3, 1), RGB_WEIGHTS.to(img.device)) + 16.\n", - " \n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aedc81bd-184d-488b-85d8-3ec626c15447", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2c7f677f-2989-4cce-9add-d31ba5efd5c4", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir, scaling_factor=2):\n", - " # Input images for the model\n", - " INPUTS_LR = []\n", - " # Post-processed images for visualization\n", - " IMAGES_LR = []\n", - " IMAGES_HR = [] \n", - " # Load the test images\n", - " count=0\n", - " img_paths = glob.glob(os.path.join(test_images_dir, '*'))\n", - " img_paths = sorted(img_paths)\n", - " for img_path in img_paths:\n", - " img = cv2.resize(cv2.imread(img_path),[512,512],interpolation=cv2.INTER_CUBIC)\n", - " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", - " lr_img, hr_img = preprocess(img, scaling_factor)#chw\n", - " INPUTS_LR.append(lr_img)#chw\n", - " IMAGES_LR.append(post_process(lr_img))#hwc\n", - " IMAGES_HR.append(post_process(hr_img))#hwc\n", - "\n", - " return INPUTS_LR, IMAGES_LR, IMAGES_HR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17ec75e4", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"input/Set14\"\n", - "INPUTS_LR, IMAGES_LR, IMAGES_HR = load_dataset(test_images_dir, scaling_factor=4)\n", - "for i, img_lr in enumerate(INPUTS_LR):\n", - " img_lr = img_lr.cpu().detach().numpy()\n", - " img_lr = img_lr.astype(np.float32)\n", - " fid = open(\"input/raw/img\"+str(i)+ \".raw\", 'wb')\n", - " img_lr.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37bd17b7-3a3f-42a0-a662-8df8a13f544e", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "plt.imshow(IMAGES_LR[0])\n", - "plt.imshow(IMAGES_HR[0])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b1db2108-45f9-4047-b2cc-f68bd9b6febc", - "metadata": {}, - "outputs": [], - "source": [ - "with open(\"input/input.txt\", \"w\") as f:\n", - " for i in range(14):\n", - " file_path = f\"./raw/img{i}.raw\"\n", - " f.write(file_path + \"\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3b9d8c9c-e18d-4d20-a868-d55f267fe4fd", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input\n", - "snpe-dlc-quantize --input_dlc ../models/sesr_fp32.dlc --input_list input.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc ../models/sesr_w8a8.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "f4921976", - "metadata": {}, - "source": [ - "# Post-process model output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7fa25133", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_sr(img):\n", - "# img = img.detach().cpu().numpy()\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,512, 512)).astype(np.float32)\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "markdown", - "id": "0b94790c", - "metadata": {}, - "source": [ - "# Method to calcualte PSNR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d4103113", - "metadata": {}, - "outputs": [], - "source": [ - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " \"\"\"\n", - " Compute PSNR between super-resolved and original images.\n", - " \n", - " :param img_pred:\n", - " The super-resolved image obtained from the model\n", - " :param img_true:\n", - " The original high-res image\n", - " :param data_range:\n", - " Default = 255\n", - " :param eps:\n", - " Default = 1e-8\n", - " :return:\n", - " PSNR value\n", - " \"\"\"\n", - " err = (img_pred - img_true) ** 2\n", - " err = err.mean(dim=-1).mean(dim=-1)\n", - " return 10. * torch.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e230a6fe", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_psnr(y_pred, y_true):\n", - " \"\"\"\n", - " Evaluate individual PSNR metric for each super-res and actual high-res image-pair.\n", - " \n", - " :param y_pred:\n", - " The super-resolved image from the model\n", - " :param y_true:\n", - " The original high-res image\n", - " :return:\n", - " The evaluated PSNR metric for the image-pair\n", - " \"\"\"\n", - " y_pred = y_pred.transpose(2, 0, 1)[None] / 255.\n", - " y_true = y_true.transpose(2, 0, 1)[None] / 255.\n", - "\n", - " sr_img = convert_image(torch.FloatTensor(y_pred),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - " hr_img = convert_image(torch.FloatTensor(y_true),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - " return compute_psnr(sr_img, hr_img)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b389e852", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(sr_images, hr_images):\n", - " \"\"\"\n", - " Evaluate the avg PSNR metric for all test-set super-res and high-res images.\n", - "\n", - " :param sr_images:\n", - " The list of super-resolved images obtained from the model for the given test-images\n", - " :param hr_images:\n", - " The list of original high-res test-images\n", - " :return:\n", - " Average PSNR metric for all super-resolved and high-res test-set image-pairs\n", - " \"\"\"\n", - " psnr = []\n", - " for sr_img, hr_img in zip(sr_images, hr_images):\n", - " psnr.append(evaluate_psnr(sr_img, hr_img))\n", - "\n", - " # Convert the list of tensor values to a tensor array\n", - " psnr_tensor = torch.cat(psnr)\n", - " # Calculate the mean of the tensor array\n", - " average_psnr = torch.mean(psnr_tensor)\n", - " return average_psnr" - ] - }, - { - "cell_type": "markdown", - "id": "63a1687e", - "metadata": {}, - "source": [ - "# Creating Bin and Lib Folder on Device " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a8c2b9ce", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "ea80b847", - "metadata": {}, - "source": [ - "# Pushing all Lib and Bin files onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed6634e3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "markdown", - "id": "5bf854eb", - "metadata": {}, - "source": [ - "# Pushing Artifacts on to Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "088a8923", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7d34aeac", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "1efd9588", - "metadata": {}, - "source": [ - "# Inferencing 8 bit DLC on DSP Runtime\n", - "Give name of DLC in OUTPUT_DLC_QUANTIZED8 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "484a6e4c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=sesr_w8a8.dlc\n", - "export ONDEVICE_FOLDER=\"sesr\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "c93ea4c5", - "metadata": {}, - "source": [ - "# Inferencing 32b DLC on CPU Runtime\n", - "Give name of DLC in OUTPUT_DLC_32 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "83c3a198", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=sesr_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"sesr\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "ec8c1af8", - "metadata": {}, - "source": [ - "# Pulling Output folder generated on different precision and cores" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3bf075ed-ae3f-400d-97b8-e75acc863a20", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf output/OUTPUT_8b_DSP\n", - "rm -rf output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3015b52c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "8b3b27e6", - "metadata": {}, - "source": [ - "# Calculate PSNR\n", - "* Pass path of two raw image in Argument." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22c037ca-46aa-4dc8-95ca-e14b69d94ae5", - "metadata": {}, - "outputs": [], - "source": [ - "val = []\n", - "for i in range(10):\n", - " val.append(IMAGES_HR[i])\n", - "val[0].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ff341bf7-5590-4a0e-9df6-cfadb807179d", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7cea900", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"output/OUTPUT_8b_DSP/\",\"output/OUTPUT_32b_CPU\"]\n", - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "for j in range(0,len(folder)):\n", - " IMAGES_SR = []\n", - " for i in range(0,10):\n", - " IMAGES_SR.append(post_process_sr(folder[j]+\"/Result_\"+str(i)+\"/94.raw\"))\n", - " print(folder[j],\" (Average PSNR) :: \",evaluate_average_psnr(IMAGES_SR, IMAGES_HR))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dad3b82e", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "plt.imshow(IMAGES_SR[5])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7bc9d09-8de6-443e-b99f-1e10c1d00686", - "metadata": {}, - "outputs": [], - "source": [ - "plt.imshow(IMAGES_HR[5])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/01-super-resolution/SRGAN/mmsr-changes.patch b/models-for-solutions/01-super-resolution/SRGAN/mmsr-changes.patch deleted file mode 100644 index cf77ff97..00000000 --- a/models-for-solutions/01-super-resolution/SRGAN/mmsr-changes.patch +++ /dev/null @@ -1,160 +0,0 @@ -diff --git a/codes/models/SRGAN_model.py b/codes/models/SRGAN_model.py -index 051f507..594b724 100644 ---- a/codes/models/SRGAN_model.py -+++ b/codes/models/SRGAN_model.py -@@ -3,10 +3,10 @@ from collections import OrderedDict - import torch - import torch.nn as nn - from torch.nn.parallel import DataParallel, DistributedDataParallel --import models.networks as networks --import models.lr_scheduler as lr_scheduler -+import codes.models.networks as networks -+import codes.models.lr_scheduler as lr_scheduler - from .base_model import BaseModel --from models.loss import GANLoss -+from codes.models.loss import GANLoss - - logger = logging.getLogger('base') - -diff --git a/codes/models/SR_model.py b/codes/models/SR_model.py -index 6782762..29a43ac 100644 ---- a/codes/models/SR_model.py -+++ b/codes/models/SR_model.py -@@ -4,10 +4,10 @@ from collections import OrderedDict - import torch - import torch.nn as nn - from torch.nn.parallel import DataParallel, DistributedDataParallel --import models.networks as networks --import models.lr_scheduler as lr_scheduler -+import codes.models.networks as networks -+import codes.models.lr_scheduler as lr_scheduler - from .base_model import BaseModel --from models.loss import CharbonnierLoss -+from codes.models.loss import CharbonnierLoss - - logger = logging.getLogger('base') - -@@ -23,7 +23,7 @@ class SRModel(BaseModel): - train_opt = opt['train'] - - # define network and load pretrained models -- self.netG = networks.define_G(opt).to(self.device) -+ self.netG = networks.define_G(opt).to(torch.device('cpu')) - if opt['dist']: - self.netG = DistributedDataParallel(self.netG, device_ids=[torch.cuda.current_device()]) - else: -diff --git a/codes/models/archs/EDVR_arch.py b/codes/models/archs/EDVR_arch.py -index df9c032..fa79ef1 100644 ---- a/codes/models/archs/EDVR_arch.py -+++ b/codes/models/archs/EDVR_arch.py -@@ -3,11 +3,12 @@ import functools - import torch - import torch.nn as nn - import torch.nn.functional as F --import models.archs.arch_util as arch_util -+import codes.models.archs.arch_util as arch_util - try: - from models.archs.dcn.deform_conv import ModulatedDeformConvPack as DCN - except ImportError: -- raise ImportError('Failed to import DCNv2 module.') -+ #raise ImportError('Failed to import DCNv2 module.') -+ pass - - - class Predeblur_ResNet_Pyramid(nn.Module): -diff --git a/codes/models/archs/RRDBNet_arch.py b/codes/models/archs/RRDBNet_arch.py -index 9d61256..9dbc413 100644 ---- a/codes/models/archs/RRDBNet_arch.py -+++ b/codes/models/archs/RRDBNet_arch.py -@@ -2,7 +2,7 @@ import functools - import torch - import torch.nn as nn - import torch.nn.functional as F --import models.archs.arch_util as arch_util -+import codes.models.archs.arch_util as arch_util - - - class ResidualDenseBlock_5C(nn.Module): -diff --git a/codes/models/archs/SRResNet_arch.py b/codes/models/archs/SRResNet_arch.py -index 6e622ac..cbe3049 100644 ---- a/codes/models/archs/SRResNet_arch.py -+++ b/codes/models/archs/SRResNet_arch.py -@@ -1,7 +1,7 @@ - import functools - import torch.nn as nn - import torch.nn.functional as F --import models.archs.arch_util as arch_util -+import codes.models.archs.arch_util as arch_util - - - class MSRResNet(nn.Module): -diff --git a/codes/models/archs/TOF_arch.py b/codes/models/archs/TOF_arch.py -old mode 100755 -new mode 100644 -diff --git a/codes/models/archs/arch_util.py b/codes/models/archs/arch_util.py -index ca5d7fa..e920c23 100644 ---- a/codes/models/archs/arch_util.py -+++ b/codes/models/archs/arch_util.py -@@ -41,13 +41,15 @@ class ResidualBlock_noBN(nn.Module): - super(ResidualBlock_noBN, self).__init__() - self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) -+ self.relu = nn.ReLU() - - # initialization - initialize_weights([self.conv1, self.conv2], 0.1) - - def forward(self, x): - identity = x -- out = F.relu(self.conv1(x), inplace=True) -+ # out = F.relu(self.conv1(x), inplace=True) -+ out = self.relu(self.conv1(x)) - out = self.conv2(out) - return identity + out - -diff --git a/codes/models/base_model.py b/codes/models/base_model.py -index 8a5d222..b01ed77 100644 ---- a/codes/models/base_model.py -+++ b/codes/models/base_model.py -@@ -84,7 +84,7 @@ class BaseModel(): - def load_network(self, load_path, network, strict=True): - if isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel): - network = network.module -- load_net = torch.load(load_path) -+ load_net = torch.load('MSRGANx4.pth') - load_net_clean = OrderedDict() # remove unnecessary 'module.' - for k, v in load_net.items(): - if k.startswith('module.'): -diff --git a/codes/models/networks.py b/codes/models/networks.py -index 2a67913..3b2c1d6 100644 ---- a/codes/models/networks.py -+++ b/codes/models/networks.py -@@ -1,8 +1,8 @@ - import torch --import models.archs.SRResNet_arch as SRResNet_arch --import models.archs.discriminator_vgg_arch as SRGAN_arch --import models.archs.RRDBNet_arch as RRDBNet_arch --import models.archs.EDVR_arch as EDVR_arch -+import codes.models.archs.SRResNet_arch as SRResNet_arch -+import codes.models.archs.discriminator_vgg_arch as SRGAN_arch -+import codes.models.archs.RRDBNet_arch as RRDBNet_arch -+import codes.models.archs.EDVR_arch as EDVR_arch - - - # Generator -diff --git a/codes/options/options.py b/codes/options/options.py -index 99181b3..3929cf8 100644 ---- a/codes/options/options.py -+++ b/codes/options/options.py -@@ -2,7 +2,7 @@ import os - import os.path as osp - import logging - import yaml --from utils.util import OrderedYaml -+from codes.utils.util import OrderedYaml - Loader, Dumper = OrderedYaml() - - -diff --git a/experiments/pretrained_models/Put pretrained models here. b/experiments/pretrained_models/Put pretrained models here. -deleted file mode 100644 -index e69de29..0000000 diff --git a/models-for-solutions/01-super-resolution/SRGAN/readme.md b/models-for-solutions/01-super-resolution/SRGAN/readme.md deleted file mode 100644 index 23ad6f83..00000000 --- a/models-for-solutions/01-super-resolution/SRGAN/readme.md +++ /dev/null @@ -1,105 +0,0 @@ -# Super Image Resolution with SRGAN - -| Field | Description | -| --- | --- | -| Model Name | SRGAN | -| DNN Framwork | PyTorch | -| Public Repo | https://github.com/quic/aimet-model-zoo/ | -| Paper | NA | -| Accuracy Metric | PSNR | -| Input Resolution | 128 x 128 | -| Output Resolution | 512 x 512 | -| Pre-Process |
      1. Read the input [BGR ,LQ(128 x 128)] as a numpy array
      2. Expand input dimensions i.e., add Channel(C)
      3. BGR -> RGB
      4. HWC -> CWH
      5. numpy -> tensor
      6. Export the tensor to the model
      | -| post-Process|
      1. Read the input tensor [ RGB, 512 x 512 x 3] , i.e., flattened array
      2. Reshape the Input to 512 x 512 x 3 array
      3. numpy -> tensor
      4. Do Min(0),Max(1) Clamping
      5. tensor -> numpy
      6. Multiply the the array with 255.0 and round it off to the nearest Integer
      7. RGB -> BGR
      8. Export the array as UINT8
      | - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - -- Please follow the instructions for setting up Qualcomm Neural Processing SDK using the [link] (https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) provided. -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) - -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. - -- Install OpenCV ```pip install cv2``` - -- Install mxnet ```pip install mxnet``` - - - -## Modify srgan_quanteval.py to get model ? - -1. In mmsr/codes folder create a ```__init__.py ``` file. - -2. Copy the ```srgan_quanteval.py``` from ```aimet_zoo_torch/srgan/evaluators/``` into your ```mmsr``` directory -3. In the ```mmsr``` repo , go to ```srgan_quanteval.py``` file and do the following changes: -4. Comment imports -```python -# from aimet_torch import quantsim - -# from aimet_zoo_torch.common.utils import utils - - -``` -5. Replace the main function of srgan_quanteval.py to this code -```Python -def main(args): - """Evaluation main script""" - - # Adding hardcoded values to config on top of args - config = ModelConfig(args) - - download_weights() - print("download complete!") - - print("configuration complete!") - - print(f"Parsing file {config.yml}...") - opt = option.parse(config.yml, is_train=False) - opt = option.dict_to_nonedict(opt) - - print("Loading test images...") # comment - - device = torch.device('cuda' if torch.cuda.is_available() and config.use_cuda else 'cpu') - - model = create_model(opt) - generator = model.netG.module - generator.eval() - dummy_input = torch.randn(1,3, 128, 128).type(torch.FloatTensor).to('cpu') - torch.onnx.export(generator, dummy_input, "srgan.onnx",opset_version=11) - - -``` - -6. Replace the ModelConfig function of srgan_quanteval.py to this code -```python - class ModelConfig: - """Adding hardcoded values into args from parseargs() and return config object""" - - def __init__(self, args): - self.yml = "./codes/options/test/test_SRGAN.yml" - self.quant_scheme = "tf_enhanced" - for arg in vars(args): - setattr(self, arg, getattr(args, arg)) -``` - - -## follow notebook for further steps - - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* - diff --git a/models-for-solutions/01-super-resolution/SRGAN/srgan.ipynb b/models-for-solutions/01-super-resolution/SRGAN/srgan.ipynb deleted file mode 100644 index db099a54..00000000 --- a/models-for-solutions/01-super-resolution/SRGAN/srgan.ipynb +++ /dev/null @@ -1,867 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "12380c63", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "72d9ad17-0fe3-4982-b0d0-d8f187641cb0", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/snpe/2.16.0.231029/\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/srgan_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/srgan_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"srgan\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cad4a5b8-78c0-4935-8583-44c079125549", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/andreas128/mmsr.git" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21ee2808-b3d4-463b-af10-caeefc2a2328", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cd mmsr \n", - "git reset --hard a73b318f0f07feb6505ef5cb1abf0db33e33807a" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ac945d00-d3c1-4cdd-9d06-e38e8669d3b4", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "cd mmsr\n", - "git apply ../mmsr-changes.patch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8f32a089-da28-4e61-bbe8-284874b3efe3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/quic/aimet-model-zoo/\n", - "cp -r srgan.patch aimet-model-zoo\n", - "cd aimet-model-zoo\n", - "git apply srgan.patch" - ] - }, - { - "cell_type": "markdown", - "id": "aeb89fb1-a5fa-46f4-91a3-dea34a2d1ca4", - "metadata": {}, - "source": [ - "## Steps to generate ONNX model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c9067ab6-b34a-4171-a15a-921595b380be", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/srgan/evaluators/srgan_quanteval.py mmsr/\n", - "rm -rf aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ac7ff871-3b61-47cc-8252-5e4c1b189108", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cd mmsr/codes/\n", - "touch __init__.py\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ab608f72-b48f-4d33-8c4d-dbb687573a79", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cd mmsr/\n", - "python srgan_quanteval.py --mmsr-path '../mmsr'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "855257d0", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "import glob\n", - "import os\n", - "import numpy as np\n", - "import torch\n", - "from math import ceil, floor\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29e440d2-2496-4fb3-a5d9-56638d54a108", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models', exist_ok= True)\n", - "os.makedirs('output', exist_ok= True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "382fd637-e42c-416e-82fe-728bb0d7f218", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r mmsr/srgan.onnx models/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "050ba477-6649-4c53-85ae-611b6bfb6ca5", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b07e763d-d689-42be-bdd8-4763679a09a7", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/srgan.onnx --output_path models/srgan_fp32.dlc\n" - ] - }, - { - "cell_type": "markdown", - "id": "6c1a3a1d", - "metadata": {}, - "source": [ - "# Download dataset\n", - "
        \n", - "
      • Dataset link wget https://figshare.com/ndownloader/files/38256855
      • \n", - "
      • Below block will automatically download datsest, but in case if it fails please download from above link.
      • \n", - "
      • Recommended, to comment below code, if already downloaded dataset once.
      • \n", - "
          " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7940d5f2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "wget https://figshare.com/ndownloader/files/38256855\n", - "unzip 38256855 -d input\n", - "rm -rf 38256855\n", - "rm -rf input/Set14/image_SRF_4\n", - "rm -rf input/Set14/image_SRF_3\n", - "mkdir input/raw\n", - "find input/Set14/image_SRF_2 -name '*_LR*' -delete\n", - "mv input/Set14/image_SRF_2/* input/Set14/\n", - "rm -rf input/Set14/image_SRF_2/" - ] - }, - { - "cell_type": "markdown", - "id": "3762f717", - "metadata": {}, - "source": [ - "# Pre-processing data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "137cbe80-cf28-4a4d-b3f0-6d36fbfedfef", - "metadata": {}, - "outputs": [], - "source": [ - "img_paths = glob.glob(os.path.join(\"input/Set14/\", '*'))\n", - "img_paths = sorted(img_paths)\n", - "img_paths" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12f4bfe6-1eb9-4c12-bbda-1a1a8e96b1e6", - "metadata": {}, - "outputs": [], - "source": [ - "import requests\n", - "\n", - "url = 'https://raw.githubusercontent.com/quic/aimet-model-zoo/develop/aimet_zoo_torch/common/super_resolution/imresize.py'\n", - "response = requests.get(url)\n", - "\n", - "if response.status_code == 200:\n", - " with open('imresize.py', 'wb') as f:\n", - " f.write(response.content)\n", - " print('done')\n", - "else:\n", - " print(\"error\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6effdbc2-b0f2-401e-9276-512d82afeccc", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b528f62f-8628-4010-94af-dc30f4516ffe", - "metadata": {}, - "outputs": [], - "source": [ - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "from imresize import *\n", - "def preprocess(img, scaling_factor=2):\n", - " lr_img, hr_img = create_hr_lr_pair(img, scaling_factor)\n", - "\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1e64def8-1471-4408-b9af-ff9d7944ac5a", - "metadata": {}, - "outputs": [], - "source": [ - "def create_hr_lr_pair(img, scaling_factor=2):\n", - " height, width = img.shape[0:2]\n", - "\n", - " # Take the largest possible center-crop of it such that its dimensions are perfectly divisible by the scaling factor\n", - " x_remainder = width % (scaling_factor)\n", - " y_remainder = height % (scaling_factor)\n", - " left = x_remainder // 2\n", - " top = y_remainder // 2\n", - " right = left + (width - x_remainder)\n", - " bottom = top + (height - y_remainder)\n", - " hr_img = img[top:bottom, left:right]\n", - "\n", - " hr_height, hr_width = hr_img.shape[0:2]\n", - "\n", - " hr_img = np.array(hr_img, dtype='float32')\n", - " lr_img = imresize(hr_img, 1. / scaling_factor) # equivalent to matlab's imresize\n", - " flag=0\n", - " lr_img = np.uint8(np.clip(lr_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hr_img = np.uint8(hr_img)\n", - " lr_height, lr_width = lr_img.shape[0:2]\n", - "\n", - " # Sanity check\n", - " assert hr_width == lr_width * scaling_factor and hr_height == lr_height * scaling_factor\n", - " lr_img = convert_image(lr_img, source='array', target='[0, 1]')\n", - " hr_img = convert_image(hr_img, source='array', target='[0, 1]')\n", - "\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "87550bcf-a8cf-454f-9db0-75d67b69f809", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255)\n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " \n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " elif target == 'y-channel':\n", - " img = torch.matmul(img.permute(0, 2, 3, 1), RGB_WEIGHTS.to(img.device)) + 16.\n", - " \n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fc69e106-46e9-49de-bbc5-5a702b15cee1", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5efab17b-342b-43bf-9def-b9fc80b7c985", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir, scaling_factor=2):\n", - " # Input images for the model\n", - " INPUTS_LR = []\n", - "\n", - " # Post-processed images for visualization\n", - " IMAGES_LR = []\n", - " IMAGES_HR = []\n", - " \n", - " # Load the test images\n", - " count=0\n", - " img_paths = glob.glob(os.path.join(test_images_dir, '*'))\n", - " img_paths = sorted(img_paths)\n", - " for img_path in img_paths:\n", - " img = cv2.resize(cv2.imread(img_path),[512,512],interpolation=cv2.INTER_CUBIC)\n", - " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", - " lr_img, hr_img = preprocess(img, scaling_factor)#chw\n", - " INPUTS_LR.append(lr_img)#chw\n", - " IMAGES_LR.append(post_process(lr_img))#hwc\n", - " IMAGES_HR.append(post_process(hr_img))#hwc\n", - "\n", - " return INPUTS_LR, IMAGES_LR, IMAGES_HR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed304b6e-cd34-4064-a471-ddad10278445", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bf059f9e-ce60-4264-91c2-22a45b58edec", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ec5beb0d-faa5-4bd7-bede-45ec131c5afc", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17ec75e4", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"input/Set14\"\n", - "INPUTS_LR, IMAGES_LR, IMAGES_HR = load_dataset(test_images_dir, scaling_factor=4)\n", - "for i, img_lr in enumerate(INPUTS_LR):\n", - " img_lr = img_lr.cpu().detach().numpy()\n", - " img_lr = img_lr.astype(np.float32)\n", - " fid = open(\"input/raw/img\"+str(i)+ \".raw\", 'wb')\n", - " img_lr.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37bd17b7-3a3f-42a0-a662-8df8a13f544e", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "plt.imshow(IMAGES_LR[0])\n", - "plt.imshow(IMAGES_HR[0])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b1db2108-45f9-4047-b2cc-f68bd9b6febc", - "metadata": {}, - "outputs": [], - "source": [ - "with open(\"input/input.txt\", \"w\") as f:\n", - " for i in range(14):\n", - " file_path = f\"./raw/img{i}.raw\"\n", - " f.write(file_path + \"\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3b9d8c9c-e18d-4d20-a868-d55f267fe4fd", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input\n", - "snpe-dlc-quantize --input_dlc ../models/srgan_fp32.dlc --input_list input.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc ../models/srgan_w8a8.dlc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0455e1a5-51af-4e90-8dd9-993b62f52bdb", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "markdown", - "id": "f4921976", - "metadata": {}, - "source": [ - "# Post-process model output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7fa25133", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_sr(img):\n", - "# img = img.detach().cpu().numpy()\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,512, 512)).astype(np.float32)\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "markdown", - "id": "0b94790c", - "metadata": {}, - "source": [ - "Method to calcualte PSNR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d4103113", - "metadata": {}, - "outputs": [], - "source": [ - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " \"\"\"\n", - " Compute PSNR between super-resolved and original images.\n", - " \n", - " :param img_pred:\n", - " The super-resolved image obtained from the model\n", - " :param img_true:\n", - " The original high-res image\n", - " :param data_range:\n", - " Default = 255\n", - " :param eps:\n", - " Default = 1e-8\n", - " :return:\n", - " PSNR value\n", - " \"\"\"\n", - " err = (img_pred - img_true) ** 2\n", - " err = err.mean(dim=-1).mean(dim=-1)\n", - "\n", - " return 10. * torch.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e230a6fe", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_psnr(y_pred, y_true):\n", - " \"\"\"\n", - " Evaluate individual PSNR metric for each super-res and actual high-res image-pair.\n", - " \n", - " :param y_pred:\n", - " The super-resolved image from the model\n", - " :param y_true:\n", - " The original high-res image\n", - " :return:\n", - " The evaluated PSNR metric for the image-pair\n", - " \"\"\"\n", - " y_pred = y_pred.transpose(2, 0, 1)[None] / 255.\n", - " y_true = y_true.transpose(2, 0, 1)[None] / 255.\n", - "\n", - " sr_img = convert_image(torch.FloatTensor(y_pred),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - " hr_img = convert_image(torch.FloatTensor(y_true),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - "\n", - " return compute_psnr(sr_img, hr_img)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b389e852", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(sr_images, hr_images):\n", - " \"\"\"\n", - " Evaluate the avg PSNR metric for all test-set super-res and high-res images.\n", - "\n", - " :param sr_images:\n", - " The list of super-resolved images obtained from the model for the given test-images\n", - " :param hr_images:\n", - " The list of original high-res test-images\n", - " :return:\n", - " Average PSNR metric for all super-resolved and high-res test-set image-pairs\n", - " \"\"\"\n", - " psnr = []\n", - " for sr_img, hr_img in zip(sr_images, hr_images):\n", - " psnr.append(evaluate_psnr(sr_img, hr_img))\n", - " print(evaluate_psnr(sr_img, hr_img))\n", - "\n", - " \n", - "\n", - " # Convert the list of tensor values to a tensor array\n", - " psnr_tensor = torch.cat(psnr)\n", - "\n", - " \n", - "\n", - " # Calculate the mean of the tensor array\n", - " average_psnr = torch.mean(psnr_tensor)\n", - "\n", - " return average_psnr" - ] - }, - { - "cell_type": "markdown", - "id": "63a1687e", - "metadata": {}, - "source": [ - "# Creating Bin and Lib Folder on Device " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a8c2b9ce", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "ea80b847", - "metadata": {}, - "source": [ - "# Pushing all Lib and Bin files onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed6634e3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "markdown", - "id": "5bf854eb", - "metadata": {}, - "source": [ - "# Pushing Artifacts on to Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "088a8923", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7d34aeac", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "1efd9588", - "metadata": {}, - "source": [ - "# Inferencing 8 bit DLC on DSP Runtime\n", - "Give name of DLC in OUTPUT_DLC_QUANTIZED8 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "484a6e4c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=srgan_w8a8.dlc\n", - "export ONDEVICE_FOLDER=\"srgan\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "c93ea4c5", - "metadata": {}, - "source": [ - "# Inferencing 32b DLC on CPU Runtime\n", - "Give name of DLC in OUTPUT_DLC_32 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "83c3a198", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=srgan_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"srgan\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "ec8c1af8", - "metadata": {}, - "source": [ - "# Pulling Output folder generated on different precision and cores" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3bf075ed-ae3f-400d-97b8-e75acc863a20", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf output/OUTPUT_8b_DSP\n", - "rm -rf output/OUTPUT_32b_CPU\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "77e5bf2b-66d9-498a-9e24-8d0a227eff09", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3015b52c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "# $DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "8b3b27e6", - "metadata": {}, - "source": [ - "# Calculate PSNR\n", - "* Pass path of two raw image in Argument." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22c037ca-46aa-4dc8-95ca-e14b69d94ae5", - "metadata": {}, - "outputs": [], - "source": [ - "val = []\n", - "for i in range(10):\n", - " val.append(IMAGES_HR[i])\n", - "val[0].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ff341bf7-5590-4a0e-9df6-cfadb807179d", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7cea900", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"output/OUTPUT_32b_CPU\"]\n", - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "for j in range(0,len(folder)):\n", - " IMAGES_SR = []\n", - " for i in range(0,13):\n", - " IMAGES_SR.append(post_process_sr(folder[j]+\"/Result_\"+str(i)+\"/155.raw\"))\n", - " print(folder[j],\" (Average PSNR) :: \",evaluate_average_psnr(IMAGES_SR, IMAGES_HR))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dad3b82e", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "plt.imshow(IMAGES_SR[5])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7bc9d09-8de6-443e-b99f-1e10c1d00686", - "metadata": {}, - "outputs": [], - "source": [ - "plt.imshow(IMAGES_HR[5])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/01-super-resolution/SRGAN/srgan.patch b/models-for-solutions/01-super-resolution/SRGAN/srgan.patch deleted file mode 100644 index dd573d97..00000000 --- a/models-for-solutions/01-super-resolution/SRGAN/srgan.patch +++ /dev/null @@ -1,114 +0,0 @@ -diff --git a/aimet_zoo_torch/srgan/evaluators/srgan_quanteval.py b/aimet_zoo_torch/srgan/evaluators/srgan_quanteval.py -index 002d160..fcab794 100644 ---- a/aimet_zoo_torch/srgan/evaluators/srgan_quanteval.py -+++ b/aimet_zoo_torch/srgan/evaluators/srgan_quanteval.py -@@ -28,7 +28,7 @@ import shutil - import numpy as np - import torch - --from aimet_torch import quantsim -+# from aimet_torch import quantsim - - import codes.options.options as option - #pylint:disable = consider-using-from-import -@@ -38,7 +38,7 @@ from codes.data import create_dataset, create_dataloader - from codes.models import create_model - - # import common util in AIMET examples folder --from aimet_zoo_torch.common.utils import utils -+# from aimet_zoo_torch.common.utils import utils - - - def evaluate_generator( -@@ -311,84 +311,35 @@ class ModelConfig: - """Adding hardcoded values into args from parseargs() and return config object""" - - def __init__(self, args): -- self.yml = "./test_SRGAN.yml" -+ self.yml = "./codes/options/test/test_SRGAN.yml" - self.quant_scheme = "tf_enhanced" - for arg in vars(args): - setattr(self, arg, getattr(args, arg)) -- -- -+ - def main(args): - """Evaluation main script""" - - # Adding hardcoded values to config on top of args - config = ModelConfig(args) - -- # Download pretrained weights from github repo - download_weights() - print("download complete!") - -- # Make options file from args -- setup_mmsr_configuration(config) - print("configuration complete!") - -- # parse the options file - print(f"Parsing file {config.yml}...") - opt = option.parse(config.yml, is_train=False) - opt = option.dict_to_nonedict(opt) - -- print("Loading test images...") -- test_loaders = [] -- for _, dataset_opt in sorted(opt["datasets"].items()): -- test_set = create_dataset(dataset_opt) -- test_loader = create_dataloader(test_set, dataset_opt) -- test_loaders.append(test_loader) -+ print("Loading test images...") # comment - -- device = utils.get_device(args) -- # device = torch.device('cuda' if torch.cuda.is_available() and config.use_cuda else 'cpu') -+ device = torch.device('cuda' if torch.cuda.is_available() and config.use_cuda else 'cpu') - - model = create_model(opt) - generator = model.netG.module -- -- for test_loader in test_loaders: -- test_set_name = test_loader.dataset.opt["name"] -- print(f"Testing on dataset {test_set_name}") -- psnr_vals, ssim_vals = evaluate_generator( -- generator, test_loader, opt, device=device -- ) -- psnr_val = np.mean(psnr_vals) -- ssim_val = np.mean(ssim_vals) -- print( -- f"Mean PSNR and SSIM for {test_set_name} on original model are: [{psnr_val}, {ssim_val}]" -- ) -- -- # The input shape is chosen arbitrarily to generate dummy input for -- # creating quantsim object -- input_shapes = (1, 3, 24, 24) -- # Initialize Quantized model -- dummy_input = torch.rand(input_shapes, device=device) -- kwargs = { -- "quant_scheme": config.quant_scheme, -- "default_param_bw": config.default_param_bw, -- "default_output_bw": config.default_output_bw, -- "dummy_input": dummy_input, -- "config_file": "./default_config.json", -- } -- sim = quantsim.QuantizationSimModel(generator, **kwargs) -- -- evaluate_func = partial(evaluate_generator, options=opt, device=device) -- sim.compute_encodings(evaluate_func, test_loaders[0]) -- -- for test_loader in test_loaders: -- test_set_name = test_loader.dataset.opt["name"] -- print(f"Testing on dataset {test_set_name}") -- psnr_vals, ssim_vals = evaluate_generator( -- sim.model, test_loader, opt, device=device, output_dir=config.output_dir -- ) -- psnr_val = np.mean(psnr_vals) -- ssim_val = np.mean(ssim_vals) -- print( -- f"Mean PSNR and SSIM for {test_set_name} on quantized model are: [{psnr_val}, {ssim_val}]" -- ) -+ generator.eval() -+ dummy_input = torch.randn(1,3, 128, 128).type(torch.FloatTensor).to('cpu') -+ torch.onnx.export(generator, dummy_input, "srgan.onnx",opset_version=11) - - - if __name__ == "__main__": diff --git a/models-for-solutions/01-super-resolution/quicksrnet_large/quicksrnet_large.ipynb b/models-for-solutions/01-super-resolution/quicksrnet_large/quicksrnet_large.ipynb deleted file mode 100644 index 0782dfef..00000000 --- a/models-for-solutions/01-super-resolution/quicksrnet_large/quicksrnet_large.ipynb +++ /dev/null @@ -1,780 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "12380c63", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "855257d0", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/snpe/2.16.0.231029/\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/quicksrnet_large_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/quicksrnet_large_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"quicksrnet_large\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b5d5fc12-dedd-41e8-95a6-78ca2ed037a1", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "import glob\n", - "import os\n", - "import numpy as np\n", - "import torch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6009a977-402c-449f-8e52-2b507f291097", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('utils', exist_ok= True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fc897578-810c-44ee-a6ce-7e6d523580b4", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/quic/aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f228a936-da97-42eb-ae4c-ce34b3900de5", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r quicksrnet_large.patch aimet-model-zoo\n", - "cd aimet-model-zoo\n", - "git apply quicksrnet_large.patch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c56b9333-c15f-4962-aa1e-860df9dd8cc5", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/common/super_resolution/ utils/\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/common/downloader.py utils/super_resolution/\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/quicksrnet/model/ utils/\n", - "rm -rf aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "16d9f89f-54e9-435c-a07f-0bbbed790588", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cd utils\n", - "touch __init__.py" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dcfe259e-1b27-4c01-954a-808de13745d8", - "metadata": {}, - "outputs": [], - "source": [ - "from utils.model.model_definition import QuickSRNet\n", - "from utils.super_resolution.imresize import imresize" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2b55eb27-0ffb-488c-b3ca-a42dc4df9fcf", - "metadata": {}, - "outputs": [], - "source": [ - "model_fp32 = QuickSRNet(\"quicksrnet_large_4x_w8a8\",scaling_factor=4)\n", - "model_fp32.from_pretrained(quantized=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29e440d2-2496-4fb3-a5d9-56638d54a108", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models', exist_ok= True)\n", - "os.makedirs('output', exist_ok= True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "675c3487-d8ce-487e-a48f-b3ccb7939e41", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "dummy_input = torch.randn(1,3, 128, 128).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(model_fp32, dummy_input, \"./models/quicksrnet_large.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b07e763d-d689-42be-bdd8-4763679a09a7", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/quicksrnet_large.onnx --output_path models/quicksrnet_large_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "6c1a3a1d", - "metadata": {}, - "source": [ - "# Download dataset\n", - "
            \n", - "
          • Dataset link wget https://figshare.com/ndownloader/files/38256855
          • \n", - "
          • Below block will automatically download datsest, but in case if it fails please download from above link.
          • \n", - "
          • Recommended, to comment below code, if already downloaded dataset once.
          • \n", - "
              " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7940d5f2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "wget https://figshare.com/ndownloader/files/38256855\n", - "unzip 38256855 -d input\n", - "rm -rf 38256855\n", - "rm -rf input/Set14/image_SRF_4\n", - "rm -rf input/Set14/image_SRF_3\n", - "mkdir input/raw\n", - "find input/Set14/image_SRF_2 -name '*_LR*' -delete\n", - "mv input/Set14/image_SRF_2/* input/Set14/\n", - "rm -rf input/Set14/image_SRF_2/" - ] - }, - { - "cell_type": "markdown", - "id": "3762f717", - "metadata": {}, - "source": [ - "# Pre-processing data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "137cbe80-cf28-4a4d-b3f0-6d36fbfedfef", - "metadata": {}, - "outputs": [], - "source": [ - "img_paths = glob.glob(os.path.join(\"input/Set14/\", '*'))\n", - "img_paths = sorted(img_paths)\n", - "img_paths" - ] - }, - { - "cell_type": "markdown", - "id": "82b6adc3-9df0-46a7-a38c-c925e6275003", - "metadata": {}, - "source": [ - "## Preprocess dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e019c37-fe81-4aaa-842c-a27698d8b29a", - "metadata": {}, - "outputs": [], - "source": [ - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "def preprocess(img, scaling_factor=2):\n", - " lr_img, hr_img = create_hr_lr_pair(img, scaling_factor)\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b35873a5-55cb-477d-b5b9-ae5b359f514b", - "metadata": {}, - "outputs": [], - "source": [ - "def create_hr_lr_pair(img, scaling_factor=2):\n", - " height, width = img.shape[0:2]\n", - "\n", - " # Take the largest possible center-crop of it such that its dimensions are perfectly divisible by the scaling factor\n", - " x_remainder = width % (scaling_factor)\n", - " y_remainder = height % (scaling_factor)\n", - " left = x_remainder // 2\n", - " top = y_remainder // 2\n", - " right = left + (width - x_remainder)\n", - " bottom = top + (height - y_remainder)\n", - " hr_img = img[top:bottom, left:right]\n", - "\n", - " hr_height, hr_width = hr_img.shape[0:2]\n", - "\n", - " hr_img = np.array(hr_img, dtype='float32')\n", - " lr_img = imresize(hr_img, 1. / scaling_factor) # equivalent to matlab's imresize\n", - " flag=0\n", - " lr_img = np.uint8(np.clip(lr_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hr_img = np.uint8(hr_img)\n", - " lr_height, lr_width = lr_img.shape[0:2]\n", - "\n", - " # Sanity check\n", - " assert hr_width == lr_width * scaling_factor and hr_height == lr_height * scaling_factor\n", - " lr_img = convert_image(lr_img, source='array', target='[0, 1]')\n", - " hr_img = convert_image(hr_img, source='array', target='[0, 1]')\n", - "\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20431195-1874-4b87-bf3d-54e7f376a4af", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255)\n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " \n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " elif target == 'y-channel':\n", - " img = torch.matmul(img.permute(0, 2, 3, 1), RGB_WEIGHTS.to(img.device)) + 16.\n", - " \n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c275e2de-194e-4682-bcaf-984467126d3a", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dfd18eaf-123d-469d-8e86-8e24a42bb795", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir, scaling_factor=2):\n", - " # Input images for the model\n", - " INPUTS_LR = []\n", - "\n", - " # Post-processed images for visualization\n", - " IMAGES_LR = []\n", - " IMAGES_HR = []\n", - " \n", - " # Load the test images\n", - " count=0\n", - " img_paths = glob.glob(os.path.join(test_images_dir, '*'))\n", - " img_paths = sorted(img_paths)\n", - " for img_path in img_paths:\n", - " img = cv2.resize(cv2.imread(img_path),[512,512],interpolation=cv2.INTER_CUBIC)\n", - " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", - " lr_img, hr_img = preprocess(img, scaling_factor)#chw\n", - " INPUTS_LR.append(lr_img)#chw\n", - " IMAGES_LR.append(post_process(lr_img))#hwc\n", - " IMAGES_HR.append(post_process(hr_img))#hwc\n", - "\n", - " return INPUTS_LR, IMAGES_LR, IMAGES_HR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17ec75e4", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"input/Set14\"\n", - "INPUTS_LR, IMAGES_LR, IMAGES_HR = load_dataset(test_images_dir, scaling_factor=4)\n", - "for i, img_lr in enumerate(INPUTS_LR):\n", - " img_lr = img_lr.cpu().detach().numpy()\n", - " img_lr = img_lr.astype(np.float32)\n", - " fid = open(\"input/raw/img\"+str(i)+ \".raw\", 'wb')\n", - " img_lr.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37bd17b7-3a3f-42a0-a662-8df8a13f544e", - "metadata": {}, - "outputs": [], - "source": [ - "INPUTS_LR[0].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b1db2108-45f9-4047-b2cc-f68bd9b6febc", - "metadata": {}, - "outputs": [], - "source": [ - "with open(\"input/input.txt\", \"w\") as f:\n", - " for i in range(14):\n", - " file_path = f\"./raw/img{i}.raw\"\n", - " f.write(file_path + \"\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3b9d8c9c-e18d-4d20-a868-d55f267fe4fd", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input\n", - "snpe-dlc-quantize --input_dlc ../models/quicksrnet_large_fp32.dlc --input_list input.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc ../models/quicksrnet_large_w8a8.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "f4921976", - "metadata": {}, - "source": [ - "# Post-process model output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7fa25133", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_sr(img):\n", - "# img = img.detach().cpu().numpy()\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,512, 512)).astype(np.float32)\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "markdown", - "id": "0b94790c", - "metadata": {}, - "source": [ - "# Method to calcualte PSNR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d4103113", - "metadata": {}, - "outputs": [], - "source": [ - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " \"\"\"\n", - " Compute PSNR between super-resolved and original images.\n", - " \n", - " :param img_pred:\n", - " The super-resolved image obtained from the model\n", - " :param img_true:\n", - " The original high-res image\n", - " :param data_range:\n", - " Default = 255\n", - " :param eps:\n", - " Default = 1e-8\n", - " :return:\n", - " PSNR value\n", - " \"\"\"\n", - " err = (img_pred - img_true) ** 2\n", - " err = err.mean(dim=-1).mean(dim=-1)\n", - "\n", - " return 10. * torch.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e230a6fe", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_psnr(y_pred, y_true):\n", - " \"\"\"\n", - " Evaluate individual PSNR metric for each super-res and actual high-res image-pair.\n", - " \n", - " :param y_pred:\n", - " The super-resolved image from the model\n", - " :param y_true:\n", - " The original high-res image\n", - " :return:\n", - " The evaluated PSNR metric for the image-pair\n", - " \"\"\"\n", - " y_pred = y_pred.transpose(2, 0, 1)[None] / 255.\n", - " y_true = y_true.transpose(2, 0, 1)[None] / 255.\n", - "\n", - " sr_img = convert_image(torch.FloatTensor(y_pred),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - " hr_img = convert_image(torch.FloatTensor(y_true),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - "\n", - " return compute_psnr(sr_img, hr_img)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b389e852", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(sr_images, hr_images):\n", - " \"\"\"\n", - " Evaluate the avg PSNR metric for all test-set super-res and high-res images.\n", - "\n", - " :param sr_images:\n", - " The list of super-resolved images obtained from the model for the given test-images\n", - " :param hr_images:\n", - " The list of original high-res test-images\n", - " :return:\n", - " Average PSNR metric for all super-resolved and high-res test-set image-pairs\n", - " \"\"\"\n", - " psnr = []\n", - " for sr_img, hr_img in zip(sr_images, hr_images):\n", - " psnr.append(evaluate_psnr(sr_img, hr_img))\n", - "\n", - " \n", - "\n", - " # Convert the list of tensor values to a tensor array\n", - " psnr_tensor = torch.cat(psnr)\n", - "\n", - " \n", - "\n", - " # Calculate the mean of the tensor array\n", - " average_psnr = torch.mean(psnr_tensor)\n", - "\n", - " return average_psnr" - ] - }, - { - "cell_type": "markdown", - "id": "63a1687e", - "metadata": {}, - "source": [ - "# Creating Bin and Lib Folder on Device " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a8c2b9ce", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "ea80b847", - "metadata": {}, - "source": [ - "# Pushing all Lib and Bin files onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed6634e3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "markdown", - "id": "5bf854eb", - "metadata": {}, - "source": [ - "# Pushing Artifacts on to Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "088a8923", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7d34aeac", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "1efd9588", - "metadata": {}, - "source": [ - "# Inferencing 8 bit DLC on DSP Runtime\n", - "Give name of DLC in OUTPUT_DLC_QUANTIZED8 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "484a6e4c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=quicksrnet_large_w8a8.dlc\n", - "export ONDEVICE_FOLDER=\"quicksrnet_large\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "c93ea4c5", - "metadata": {}, - "source": [ - "# Inferencing 32b DLC on CPU Runtime\n", - "Give name of DLC in OUTPUT_DLC_32 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "83c3a198", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=quicksrnet_large_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"quicksrnet_large\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "ec8c1af8", - "metadata": {}, - "source": [ - "# Pulling Output folder generated on different precision and cores" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3bf075ed-ae3f-400d-97b8-e75acc863a20", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf output/OUTPUT_8b_DSP\n", - "rm -rf output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3015b52c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "8b3b27e6", - "metadata": {}, - "source": [ - "# Calculate PSNR\n", - "* Pass path of two raw image in Argument." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22c037ca-46aa-4dc8-95ca-e14b69d94ae5", - "metadata": {}, - "outputs": [], - "source": [ - "val = []\n", - "for i in range(10):\n", - " val.append(IMAGES_HR[i])\n", - "val[0].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7cea900", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"output/OUTPUT_8b_DSP/\",\"output/OUTPUT_32b_CPU\"]\n", - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "for j in range(0,len(folder)):\n", - " IMAGES_SR = []\n", - " for i in range(0,10):\n", - " IMAGES_SR.append(post_process_sr(folder[j]+\"/Result_\"+str(i)+\"/83.raw\"))\n", - " print(folder[j],\" (Average PSNR) :: \",evaluate_average_psnr(IMAGES_SR, IMAGES_HR))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dad3b82e", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "plt.imshow(IMAGES_SR[5])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7bc9d09-8de6-443e-b99f-1e10c1d00686", - "metadata": {}, - "outputs": [], - "source": [ - "plt.imshow(IMAGES_HR[5])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8d50c962-41c0-400e-bda6-32e55652a0e0", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/01-super-resolution/quicksrnet_large/quicksrnet_large.patch b/models-for-solutions/01-super-resolution/quicksrnet_large/quicksrnet_large.patch deleted file mode 100644 index b2c71668..00000000 --- a/models-for-solutions/01-super-resolution/quicksrnet_large/quicksrnet_large.patch +++ /dev/null @@ -1,61 +0,0 @@ -diff --git a/aimet_zoo_torch/quicksrnet/model/model_definition.py b/aimet_zoo_torch/quicksrnet/model/model_definition.py -index 172a493..f514056 100644 ---- a/aimet_zoo_torch/quicksrnet/model/model_definition.py -+++ b/aimet_zoo_torch/quicksrnet/model/model_definition.py -@@ -15,9 +15,8 @@ - import json - import os - import torch --from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim --from aimet_zoo_torch.common.downloader import Downloader --from aimet_zoo_torch.quicksrnet.model.models import QuickSRNetBase -+from utils.super_resolution.downloader import Downloader -+from utils.super_resolution.models import QuickSRNetBase - - - class QuickSRNet(QuickSRNetBase, Downloader): -@@ -95,42 +94,8 @@ class QuickSRNet(QuickSRNetBase, Downloader): - self.load_state_dict(state_dict) - self.cuda() - else: -- state_dict = torch.load(self.path_pre_opt_weights)["state_dict"] -+ state_dict = torch.load(self.path_pre_opt_weights, map_location = torch.device('cpu'))["state_dict"] - self.load_state_dict(state_dict) -- self.cuda() -+ #self.cuda() - self.eval() - -- def get_quantsim(self, quantized=False): -- """get quantsim object with pre-loaded encodings""" -- if not self.cfg: -- raise NotImplementedError( -- "There is no Quantization Simulation available for the model_config passed" -- ) -- if quantized: -- self.from_pretrained(quantized=True) -- else: -- self.from_pretrained(quantized=False) -- device = torch.device("cuda") -- dummy_input = torch.rand(self.input_shape, device=device) -- kwargs = { -- "quant_scheme": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["quant_scheme"], -- "default_param_bw": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["param_bw"], -- "default_output_bw": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["output_bw"], -- "config_file": self.path_aimet_config, -- "dummy_input": dummy_input, -- } -- sim = QuantizationSimModel(self, **kwargs) -- if self.path_aimet_encodings and quantized: -- load_encodings_to_sim(sim, self.path_aimet_encodings) -- print("load_encodings_to_sim finished!") -- if self.path_adaround_encodings and quantized: -- sim.set_and_freeze_param_encodings(self.path_adaround_encodings) -- print("set_and_freeze_param_encodings finished!") -- sim.model.eval() -- return sim diff --git a/models-for-solutions/01-super-resolution/quicksrnet_large/readme.md b/models-for-solutions/01-super-resolution/quicksrnet_large/readme.md deleted file mode 100644 index 726995db..00000000 --- a/models-for-solutions/01-super-resolution/quicksrnet_large/readme.md +++ /dev/null @@ -1,47 +0,0 @@ -# Image Super Resolution - quicksrnet_large - -| Field | Description | -| --- | --- | -| Model Name | quicksrnet_large | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/quic/aimet-model-zoo/ | -| Paper | https://arxiv.org/abs/2303.04336 | -| Accuracy Metric | PSNR | -| Input Resolution | 128 x 128 | -| Output Resolution | 512 x 512 | -| Pre-Processing | Resize, Normalize on tensor input; unsqueeze and transpose | -| Post-Processing | reshape, transpose, max prediction value, decoding depending on dataset, image from array | - - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - -- Please follow the instructions for setting up Qualcomm Neural Processing SDK using the [link] (https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) provided. -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) - -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. - -- Install OpenCV ```pip install cv2``` - -- Install mxnet ```pip install mxnet``` - -## Pre-Trained Model - -Please refer to notebook for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/01-super-resolution/quicksrnet_medium/quicksrnet_medium.ipynb b/models-for-solutions/01-super-resolution/quicksrnet_medium/quicksrnet_medium.ipynb deleted file mode 100644 index 32135a12..00000000 --- a/models-for-solutions/01-super-resolution/quicksrnet_medium/quicksrnet_medium.ipynb +++ /dev/null @@ -1,772 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "12380c63", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9bf741e0-aaa8-4ebd-9ccb-e8c6d86c3532", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/snpe/2.16.0.231029/\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/quicksrnet_medium_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/quicksrnet_medium_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"quicksrnet_medium\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "855257d0", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "import glob\n", - "import os\n", - "import numpy as np\n", - "import torch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6009a977-402c-449f-8e52-2b507f291097", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('utils', exist_ok= True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fc897578-810c-44ee-a6ce-7e6d523580b4", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/quic/aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "05282739-a373-48a8-b063-419e06f84d6a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r quicksrnet_medium.patch aimet-model-zoo\n", - "cd aimet-model-zoo\n", - "git apply quicksrnet_medium.patch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c56b9333-c15f-4962-aa1e-860df9dd8cc5", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/common/super_resolution/ utils/\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/common/downloader.py utils/super_resolution/\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/quicksrnet/model/ utils/\n", - "rm -rf aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ee319200-942c-4836-a20d-5182d4e6fabd", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cd utils\n", - "touch __init__.py" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dcfe259e-1b27-4c01-954a-808de13745d8", - "metadata": {}, - "outputs": [], - "source": [ - "from utils.model.model_definition import QuickSRNet\n", - "from utils.super_resolution.imresize import imresize" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2b55eb27-0ffb-488c-b3ca-a42dc4df9fcf", - "metadata": {}, - "outputs": [], - "source": [ - "model_fp32 = QuickSRNet(\"quicksrnet_medium_4x_w8a8\",scaling_factor=4)\n", - "model_fp32.from_pretrained(quantized=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29e440d2-2496-4fb3-a5d9-56638d54a108", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models', exist_ok= True)\n", - "os.makedirs('output', exist_ok= True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "675c3487-d8ce-487e-a48f-b3ccb7939e41", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "dummy_input = torch.randn(1,3, 128, 128).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(model_fp32, dummy_input, \"./models/quicksrnet_medium.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b07e763d-d689-42be-bdd8-4763679a09a7", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/quicksrnet_medium.onnx --output_path models/quicksrnet_medium_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "6c1a3a1d", - "metadata": {}, - "source": [ - "# Download dataset\n", - "
                \n", - "
              • Dataset link wget https://figshare.com/ndownloader/files/38256855
              • \n", - "
              • Below block will automatically download datsest, but in case if it fails please download from above link.
              • \n", - "
              • Recommended, to comment below code, if already downloaded dataset once.
              • \n", - "
                  " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7940d5f2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "wget https://figshare.com/ndownloader/files/38256855\n", - "unzip 38256855 -d input\n", - "rm -rf 38256855\n", - "rm -rf input/Set14/image_SRF_4\n", - "rm -rf input/Set14/image_SRF_3\n", - "mkdir input/raw\n", - "find input/Set14/image_SRF_2 -name '*_LR*' -delete\n", - "mv input/Set14/image_SRF_2/* input/Set14/\n", - "rm -rf input/Set14/image_SRF_2/" - ] - }, - { - "cell_type": "markdown", - "id": "3762f717", - "metadata": {}, - "source": [ - "# Pre-processing data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "137cbe80-cf28-4a4d-b3f0-6d36fbfedfef", - "metadata": {}, - "outputs": [], - "source": [ - "img_paths = glob.glob(os.path.join(\"input/Set14/\", '*'))\n", - "img_paths = sorted(img_paths)\n", - "img_paths" - ] - }, - { - "cell_type": "markdown", - "id": "82b6adc3-9df0-46a7-a38c-c925e6275003", - "metadata": {}, - "source": [ - "## Preprocess dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e019c37-fe81-4aaa-842c-a27698d8b29a", - "metadata": {}, - "outputs": [], - "source": [ - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "def preprocess(img, scaling_factor=2):\n", - " lr_img, hr_img = create_hr_lr_pair(img, scaling_factor)\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b35873a5-55cb-477d-b5b9-ae5b359f514b", - "metadata": {}, - "outputs": [], - "source": [ - "def create_hr_lr_pair(img, scaling_factor=2):\n", - " height, width = img.shape[0:2]\n", - "\n", - " # Take the largest possible center-crop of it such that its dimensions are perfectly divisible by the scaling factor\n", - " x_remainder = width % (scaling_factor)\n", - " y_remainder = height % (scaling_factor)\n", - " left = x_remainder // 2\n", - " top = y_remainder // 2\n", - " right = left + (width - x_remainder)\n", - " bottom = top + (height - y_remainder)\n", - " hr_img = img[top:bottom, left:right]\n", - "\n", - " hr_height, hr_width = hr_img.shape[0:2]\n", - "\n", - " hr_img = np.array(hr_img, dtype='float32')\n", - " lr_img = imresize(hr_img, 1. / scaling_factor) # equivalent to matlab's imresize\n", - " flag=0\n", - " lr_img = np.uint8(np.clip(lr_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hr_img = np.uint8(hr_img)\n", - " lr_height, lr_width = lr_img.shape[0:2]\n", - "\n", - " # Sanity check\n", - " assert hr_width == lr_width * scaling_factor and hr_height == lr_height * scaling_factor\n", - " lr_img = convert_image(lr_img, source='array', target='[0, 1]')\n", - " hr_img = convert_image(hr_img, source='array', target='[0, 1]')\n", - "\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20431195-1874-4b87-bf3d-54e7f376a4af", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255)\n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " \n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " elif target == 'y-channel':\n", - " img = torch.matmul(img.permute(0, 2, 3, 1), RGB_WEIGHTS.to(img.device)) + 16.\n", - " \n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c275e2de-194e-4682-bcaf-984467126d3a", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dfd18eaf-123d-469d-8e86-8e24a42bb795", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir, scaling_factor=2):\n", - " # Input images for the model\n", - " INPUTS_LR = []\n", - "\n", - " # Post-processed images for visualization\n", - " IMAGES_LR = []\n", - " IMAGES_HR = []\n", - " \n", - " # Load the test images\n", - " count=0\n", - " img_paths = glob.glob(os.path.join(test_images_dir, '*'))\n", - " img_paths = sorted(img_paths)\n", - " for img_path in img_paths:\n", - " img = cv2.resize(cv2.imread(img_path),[512,512],interpolation=cv2.INTER_CUBIC)\n", - " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", - " lr_img, hr_img = preprocess(img, scaling_factor)#chw\n", - " INPUTS_LR.append(lr_img)#chw\n", - " IMAGES_LR.append(post_process(lr_img))#hwc\n", - " IMAGES_HR.append(post_process(hr_img))#hwc\n", - "\n", - " return INPUTS_LR, IMAGES_LR, IMAGES_HR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17ec75e4", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"input/Set14\"\n", - "INPUTS_LR, IMAGES_LR, IMAGES_HR = load_dataset(test_images_dir, scaling_factor=4)\n", - "for i, img_lr in enumerate(INPUTS_LR):\n", - " img_lr = img_lr.cpu().detach().numpy()\n", - " img_lr = img_lr.astype(np.float32)\n", - " fid = open(\"input/raw/img\"+str(i)+ \".raw\", 'wb')\n", - " img_lr.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37bd17b7-3a3f-42a0-a662-8df8a13f544e", - "metadata": {}, - "outputs": [], - "source": [ - "INPUTS_LR[0].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b1db2108-45f9-4047-b2cc-f68bd9b6febc", - "metadata": {}, - "outputs": [], - "source": [ - "with open(\"input/input.txt\", \"w\") as f:\n", - " for i in range(14):\n", - " file_path = f\"./raw/img{i}.raw\"\n", - " f.write(file_path + \"\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3b9d8c9c-e18d-4d20-a868-d55f267fe4fd", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input\n", - "snpe-dlc-quantize --input_dlc ../models/quicksrnet_medium_fp32.dlc --input_list input.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc ../models/quicksrnet_medium_w8a8.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "f4921976", - "metadata": {}, - "source": [ - "# Post-process model output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7fa25133", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_sr(img):\n", - "# img = img.detach().cpu().numpy()\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,512, 512)).astype(np.float32)\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "markdown", - "id": "0b94790c", - "metadata": {}, - "source": [ - "# Method to calcualte PSNR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d4103113", - "metadata": {}, - "outputs": [], - "source": [ - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " \"\"\"\n", - " Compute PSNR between super-resolved and original images.\n", - " \n", - " :param img_pred:\n", - " The super-resolved image obtained from the model\n", - " :param img_true:\n", - " The original high-res image\n", - " :param data_range:\n", - " Default = 255\n", - " :param eps:\n", - " Default = 1e-8\n", - " :return:\n", - " PSNR value\n", - " \"\"\"\n", - " err = (img_pred - img_true) ** 2\n", - " err = err.mean(dim=-1).mean(dim=-1)\n", - "\n", - " return 10. * torch.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e230a6fe", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_psnr(y_pred, y_true):\n", - " \"\"\"\n", - " Evaluate individual PSNR metric for each super-res and actual high-res image-pair.\n", - " \n", - " :param y_pred:\n", - " The super-resolved image from the model\n", - " :param y_true:\n", - " The original high-res image\n", - " :return:\n", - " The evaluated PSNR metric for the image-pair\n", - " \"\"\"\n", - " y_pred = y_pred.transpose(2, 0, 1)[None] / 255.\n", - " y_true = y_true.transpose(2, 0, 1)[None] / 255.\n", - "\n", - " sr_img = convert_image(torch.FloatTensor(y_pred),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - " hr_img = convert_image(torch.FloatTensor(y_true),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - "\n", - " return compute_psnr(sr_img, hr_img)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b389e852", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(sr_images, hr_images):\n", - " \"\"\"\n", - " Evaluate the avg PSNR metric for all test-set super-res and high-res images.\n", - "\n", - " :param sr_images:\n", - " The list of super-resolved images obtained from the model for the given test-images\n", - " :param hr_images:\n", - " The list of original high-res test-images\n", - " :return:\n", - " Average PSNR metric for all super-resolved and high-res test-set image-pairs\n", - " \"\"\"\n", - " psnr = []\n", - " for sr_img, hr_img in zip(sr_images, hr_images):\n", - " psnr.append(evaluate_psnr(sr_img, hr_img))\n", - " # Convert the list of tensor values to a tensor array\n", - " psnr_tensor = torch.cat(psnr)\n", - " # Calculate the mean of the tensor array\n", - " average_psnr = torch.mean(psnr_tensor)\n", - " return average_psnr" - ] - }, - { - "cell_type": "markdown", - "id": "63a1687e", - "metadata": {}, - "source": [ - "# Creating Bin and Lib Folder on Device " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a8c2b9ce", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "ea80b847", - "metadata": {}, - "source": [ - "# Pushing all Lib and Bin files onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed6634e3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "markdown", - "id": "5bf854eb", - "metadata": {}, - "source": [ - "# Pushing Artifacts on to Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "088a8923", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7d34aeac", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "1efd9588", - "metadata": {}, - "source": [ - "# Inferencing 8 bit DLC on DSP Runtime\n", - "Give name of DLC in OUTPUT_DLC_QUANTIZED8 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "484a6e4c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=quicksrnet_medium_w8a8.dlc\n", - "export ONDEVICE_FOLDER=\"quicksrnet_medium\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "c93ea4c5", - "metadata": {}, - "source": [ - "# Inferencing 32b DLC on CPU Runtime\n", - "Give name of DLC in OUTPUT_DLC_32 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "83c3a198", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=quicksrnet_medium_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"quicksrnet_medium\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "ec8c1af8", - "metadata": {}, - "source": [ - "# Pulling Output folder generated on different precision and cores" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3bf075ed-ae3f-400d-97b8-e75acc863a20", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf output/OUTPUT_8b_DSP\n", - "rm -rf output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3015b52c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "8b3b27e6", - "metadata": {}, - "source": [ - "# Calculate PSNR\n", - "* Pass path of two raw image in Argument." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22c037ca-46aa-4dc8-95ca-e14b69d94ae5", - "metadata": {}, - "outputs": [], - "source": [ - "val = []\n", - "for i in range(10):\n", - " val.append(IMAGES_HR[i])\n", - "val[0].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ff341bf7-5590-4a0e-9df6-cfadb807179d", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7cea900", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"output/OUTPUT_8b_DSP/\",\"output/OUTPUT_32b_CPU\"]\n", - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "for j in range(0,len(folder)):\n", - " IMAGES_SR = []\n", - " for i in range(0,10):\n", - " IMAGES_SR.append(post_process_sr(folder[j]+\"/Result_\"+str(i)+\"/43.raw\"))\n", - " print(folder[j],\" (Average PSNR) :: \",evaluate_average_psnr(IMAGES_SR, IMAGES_HR))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dad3b82e", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "plt.imshow(IMAGES_SR[5])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7bc9d09-8de6-443e-b99f-1e10c1d00686", - "metadata": {}, - "outputs": [], - "source": [ - "plt.imshow(IMAGES_HR[5])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/01-super-resolution/quicksrnet_medium/quicksrnet_medium.patch b/models-for-solutions/01-super-resolution/quicksrnet_medium/quicksrnet_medium.patch deleted file mode 100644 index 873b5a74..00000000 --- a/models-for-solutions/01-super-resolution/quicksrnet_medium/quicksrnet_medium.patch +++ /dev/null @@ -1,61 +0,0 @@ -diff --git a/aimet_zoo_torch/quicksrnet/model/model_definition.py b/aimet_zoo_torch/quicksrnet/model/model_definition.py -index 172a493..a1645b1 100644 ---- a/aimet_zoo_torch/quicksrnet/model/model_definition.py -+++ b/aimet_zoo_torch/quicksrnet/model/model_definition.py -@@ -15,9 +15,8 @@ - import json - import os - import torch --from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim --from aimet_zoo_torch.common.downloader import Downloader --from aimet_zoo_torch.quicksrnet.model.models import QuickSRNetBase -+from utils.super_resolution.downloader import Downloader -+from utils.super_resolution.models import QuickSRNetBase - - - class QuickSRNet(QuickSRNetBase, Downloader): -@@ -95,42 +94,7 @@ class QuickSRNet(QuickSRNetBase, Downloader): - self.load_state_dict(state_dict) - self.cuda() - else: -- state_dict = torch.load(self.path_pre_opt_weights)["state_dict"] -+ state_dict = torch.load(self.path_pre_opt_weights, map_location = torch.device('cpu'))["state_dict"] - self.load_state_dict(state_dict) -- self.cuda() -+ #self.cuda() - self.eval() -- -- def get_quantsim(self, quantized=False): -- """get quantsim object with pre-loaded encodings""" -- if not self.cfg: -- raise NotImplementedError( -- "There is no Quantization Simulation available for the model_config passed" -- ) -- if quantized: -- self.from_pretrained(quantized=True) -- else: -- self.from_pretrained(quantized=False) -- device = torch.device("cuda") -- dummy_input = torch.rand(self.input_shape, device=device) -- kwargs = { -- "quant_scheme": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["quant_scheme"], -- "default_param_bw": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["param_bw"], -- "default_output_bw": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["output_bw"], -- "config_file": self.path_aimet_config, -- "dummy_input": dummy_input, -- } -- sim = QuantizationSimModel(self, **kwargs) -- if self.path_aimet_encodings and quantized: -- load_encodings_to_sim(sim, self.path_aimet_encodings) -- print("load_encodings_to_sim finished!") -- if self.path_adaround_encodings and quantized: -- sim.set_and_freeze_param_encodings(self.path_adaround_encodings) -- print("set_and_freeze_param_encodings finished!") -- sim.model.eval() -- return sim diff --git a/models-for-solutions/01-super-resolution/quicksrnet_medium/readme.md b/models-for-solutions/01-super-resolution/quicksrnet_medium/readme.md deleted file mode 100644 index 4370f5f0..00000000 --- a/models-for-solutions/01-super-resolution/quicksrnet_medium/readme.md +++ /dev/null @@ -1,47 +0,0 @@ -# Image Super Resolution - quicksrnet_medium - -| Field | Description | -| --- | --- | -| Model Name | quicksrnet_medium | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/quic/aimet-model-zoo/ | -| Paper | https://arxiv.org/abs/2303.04336 | -| Accuracy Metric | PSNR | -| Input Resolution | 128 x 128 | -| Output Resolution | 512 x 512 | -| Pre-Processing | Resize, Normalize on tensor input; unsqueeze and transpose | -| Post-Processing | reshape, transpose, max prediction value, decoding depending on dataset, image from array | - - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - -- Please follow the instructions for setting up Qualcomm Neural Processing SDK using the [link] (https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) provided. -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) - -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. - -- Install OpenCV ```pip install cv2``` - -- Install mxnet ```pip install mxnet``` - -## Pre-Trained Model - -Please refer to notebook for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/01-super-resolution/quicksrnet_small/quicksrnet_small.ipynb b/models-for-solutions/01-super-resolution/quicksrnet_small/quicksrnet_small.ipynb deleted file mode 100644 index 94ca069d..00000000 --- a/models-for-solutions/01-super-resolution/quicksrnet_small/quicksrnet_small.ipynb +++ /dev/null @@ -1,803 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "12380c63", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3c652258-414b-459f-ab47-0250b2f37864", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/snpe/2.16.0.231029/\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/quicksrnet_small_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/quicksrnet_small_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"quicksrnet_small\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "855257d0", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "import glob\n", - "import os\n", - "import numpy as np\n", - "import torch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6009a977-402c-449f-8e52-2b507f291097", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('utils', exist_ok= True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fc897578-810c-44ee-a6ce-7e6d523580b4", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/quic/aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ee46ccb6-f50b-4db8-ac77-50130f99b0d9", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r quicksrnet_small.patch aimet-model-zoo\n", - "cd aimet-model-zoo\n", - "git apply quicksrnet_small.patch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c56b9333-c15f-4962-aa1e-860df9dd8cc5", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/common/super_resolution/ utils/\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/common/downloader.py utils/super_resolution/\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/quicksrnet/model/ utils/\n", - "rm -rf aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c53f1e20-b966-43f2-9a4a-43c22e4e042c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cd utils\n", - "touch __init__.py" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dcfe259e-1b27-4c01-954a-808de13745d8", - "metadata": {}, - "outputs": [], - "source": [ - "from utils.model.model_definition import QuickSRNet\n", - "from utils.super_resolution.imresize import imresize" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2b55eb27-0ffb-488c-b3ca-a42dc4df9fcf", - "metadata": {}, - "outputs": [], - "source": [ - "model_fp32 = QuickSRNet(\"quicksrnet_small_4x_w8a8\",scaling_factor=4)\n", - "model_fp32.from_pretrained(quantized=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29e440d2-2496-4fb3-a5d9-56638d54a108", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models', exist_ok= True)\n", - "os.makedirs('output', exist_ok= True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "675c3487-d8ce-487e-a48f-b3ccb7939e41", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "dummy_input = torch.randn(1,3, 128, 128).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(model_fp32, dummy_input, \"./models/quicksrnet_small.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b07e763d-d689-42be-bdd8-4763679a09a7", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/quicksrnet_small.onnx --output_path models/quicksrnet_small_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "6c1a3a1d", - "metadata": {}, - "source": [ - "# Download dataset\n", - "
                    \n", - "
                  • Dataset link wget https://figshare.com/ndownloader/files/38256855
                  • \n", - "
                  • Below block will automatically download datsest, but in case if it fails please download from above link.
                  • \n", - "
                  • Recommended, to comment below code, if already downloaded dataset once.
                  • \n", - "
                      " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7940d5f2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "wget https://figshare.com/ndownloader/files/38256855\n", - "unzip 38256855 -d input\n", - "rm -rf 38256855\n", - "rm -rf input/Set14/image_SRF_4\n", - "rm -rf input/Set14/image_SRF_3\n", - "mkdir input/raw\n", - "find input/Set14/image_SRF_2 -name '*_LR*' -delete\n", - "mv input/Set14/image_SRF_2/* input/Set14/\n", - "rm -rf input/Set14/image_SRF_2/" - ] - }, - { - "cell_type": "markdown", - "id": "3762f717", - "metadata": {}, - "source": [ - "# Pre-processing data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "137cbe80-cf28-4a4d-b3f0-6d36fbfedfef", - "metadata": {}, - "outputs": [], - "source": [ - "img_paths = glob.glob(os.path.join(\"input/Set14/\", '*'))\n", - "img_paths = sorted(img_paths)\n", - "img_paths" - ] - }, - { - "cell_type": "markdown", - "id": "82b6adc3-9df0-46a7-a38c-c925e6275003", - "metadata": {}, - "source": [ - "## Preprocess dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e019c37-fe81-4aaa-842c-a27698d8b29a", - "metadata": {}, - "outputs": [], - "source": [ - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "def preprocess(img, scaling_factor=2):\n", - " lr_img, hr_img = create_hr_lr_pair(img, scaling_factor)\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b35873a5-55cb-477d-b5b9-ae5b359f514b", - "metadata": {}, - "outputs": [], - "source": [ - "def create_hr_lr_pair(img, scaling_factor=2):\n", - " height, width = img.shape[0:2]\n", - " # Take the largest possible center-crop of it such that its dimensions are perfectly divisible by the scaling factor\n", - " x_remainder = width % (scaling_factor)\n", - " y_remainder = height % (scaling_factor)\n", - " left = x_remainder // 2\n", - " top = y_remainder // 2\n", - " right = left + (width - x_remainder)\n", - " bottom = top + (height - y_remainder)\n", - " hr_img = img[top:bottom, left:right]\n", - "\n", - " hr_height, hr_width = hr_img.shape[0:2]\n", - "\n", - " hr_img = np.array(hr_img, dtype='float32')\n", - " lr_img = imresize(hr_img, 1. / scaling_factor) # equivalent to matlab's imresize\n", - " flag=0\n", - " lr_img = np.uint8(np.clip(lr_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hr_img = np.uint8(hr_img)\n", - " lr_height, lr_width = lr_img.shape[0:2]\n", - "\n", - " # Sanity check\n", - " assert hr_width == lr_width * scaling_factor and hr_height == lr_height * scaling_factor\n", - " lr_img = convert_image(lr_img, source='array', target='[0, 1]')\n", - " hr_img = convert_image(hr_img, source='array', target='[0, 1]')\n", - "\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20431195-1874-4b87-bf3d-54e7f376a4af", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255)\n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " \n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " elif target == 'y-channel':\n", - " img = torch.matmul(img.permute(0, 2, 3, 1), RGB_WEIGHTS.to(img.device)) + 16.\n", - " \n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c275e2de-194e-4682-bcaf-984467126d3a", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dfd18eaf-123d-469d-8e86-8e24a42bb795", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir, scaling_factor=2):\n", - " # Input images for the model\n", - " INPUTS_LR = []\n", - "\n", - " # Post-processed images for visualization\n", - " IMAGES_LR = []\n", - " IMAGES_HR = []\n", - " \n", - " # Load the test images\n", - " count=0\n", - " img_paths = glob.glob(os.path.join(test_images_dir, '*'))\n", - " img_paths = sorted(img_paths)\n", - " for img_path in img_paths:\n", - " img = cv2.resize(cv2.imread(img_path),[512,512],interpolation=cv2.INTER_CUBIC)\n", - " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", - " lr_img, hr_img = preprocess(img, scaling_factor)#chw\n", - " INPUTS_LR.append(lr_img)#chw\n", - " IMAGES_LR.append(post_process(lr_img))#hwc\n", - " IMAGES_HR.append(post_process(hr_img))#hwc\n", - "\n", - " return INPUTS_LR, IMAGES_LR, IMAGES_HR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17ec75e4", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"input/Set14\"\n", - "INPUTS_LR, IMAGES_LR, IMAGES_HR = load_dataset(test_images_dir, scaling_factor=4)\n", - "for i, img_lr in enumerate(INPUTS_LR):\n", - " img_lr = img_lr.cpu().detach().numpy()\n", - " img_lr = img_lr.astype(np.float32)\n", - " fid = open(\"input/raw/img\"+str(i)+ \".raw\", 'wb')\n", - " img_lr.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37bd17b7-3a3f-42a0-a662-8df8a13f544e", - "metadata": {}, - "outputs": [], - "source": [ - "INPUTS_LR[0].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b1db2108-45f9-4047-b2cc-f68bd9b6febc", - "metadata": {}, - "outputs": [], - "source": [ - "with open(\"input/input.txt\", \"w\") as f:\n", - " for i in range(14):\n", - " file_path = f\"./raw/img{i}.raw\"\n", - " f.write(file_path + \"\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3b9d8c9c-e18d-4d20-a868-d55f267fe4fd", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input\n", - "snpe-dlc-quantize --input_dlc ../models/quicksrnet_small_fp32.dlc --input_list input.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc ../models/quicksrnet_small_w8a8.dlc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0455e1a5-51af-4e90-8dd9-993b62f52bdb", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "markdown", - "id": "f4921976", - "metadata": {}, - "source": [ - "# Post-process model output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7fa25133", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_sr(img):\n", - "# img = img.detach().cpu().numpy()\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,512, 512)).astype(np.float32)\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "markdown", - "id": "0b94790c", - "metadata": {}, - "source": [ - "Method to calcualte PSNR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d4103113", - "metadata": {}, - "outputs": [], - "source": [ - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " \"\"\"\n", - " Compute PSNR between super-resolved and original images.\n", - " \n", - " :param img_pred:\n", - " The super-resolved image obtained from the model\n", - " :param img_true:\n", - " The original high-res image\n", - " :param data_range:\n", - " Default = 255\n", - " :param eps:\n", - " Default = 1e-8\n", - " :return:\n", - " PSNR value\n", - " \"\"\"\n", - " err = (img_pred - img_true) ** 2\n", - " err = err.mean(dim=-1).mean(dim=-1)\n", - "\n", - " return 10. * torch.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e230a6fe", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_psnr(y_pred, y_true):\n", - " \"\"\"\n", - " Evaluate individual PSNR metric for each super-res and actual high-res image-pair.\n", - " \n", - " :param y_pred:\n", - " The super-resolved image from the model\n", - " :param y_true:\n", - " The original high-res image\n", - " :return:\n", - " The evaluated PSNR metric for the image-pair\n", - " \"\"\"\n", - " y_pred = y_pred.transpose(2, 0, 1)[None] / 255.\n", - " y_true = y_true.transpose(2, 0, 1)[None] / 255.\n", - "\n", - " sr_img = convert_image(torch.FloatTensor(y_pred),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - " hr_img = convert_image(torch.FloatTensor(y_true),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - "\n", - " return compute_psnr(sr_img, hr_img)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b389e852", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(sr_images, hr_images):\n", - " \"\"\"\n", - " Evaluate the avg PSNR metric for all test-set super-res and high-res images.\n", - "\n", - " :param sr_images:\n", - " The list of super-resolved images obtained from the model for the given test-images\n", - " :param hr_images:\n", - " The list of original high-res test-images\n", - " :return:\n", - " Average PSNR metric for all super-resolved and high-res test-set image-pairs\n", - " \"\"\"\n", - " psnr = []\n", - " for sr_img, hr_img in zip(sr_images, hr_images):\n", - " psnr.append(evaluate_psnr(sr_img, hr_img))\n", - "\n", - " \n", - "\n", - " # Convert the list of tensor values to a tensor array\n", - " psnr_tensor = torch.cat(psnr)\n", - "\n", - " \n", - "\n", - " # Calculate the mean of the tensor array\n", - " average_psnr = torch.mean(psnr_tensor)\n", - "\n", - " return average_psnr" - ] - }, - { - "cell_type": "markdown", - "id": "63a1687e", - "metadata": {}, - "source": [ - "# Creating Bin and Lib Folder on Device " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a8c2b9ce", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "ea80b847", - "metadata": {}, - "source": [ - "# Pushing all Lib and Bin files onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed6634e3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "markdown", - "id": "5bf854eb", - "metadata": {}, - "source": [ - "# Pushing Artifacts on to Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "088a8923", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7d34aeac", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "1efd9588", - "metadata": {}, - "source": [ - "# Inferencing 8 bit DLC on DSP Runtime\n", - "Give name of DLC in OUTPUT_DLC_QUANTIZED8 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "484a6e4c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=quicksrnet_small_w8a8.dlc\n", - "export ONDEVICE_FOLDER=\"quicksrnet_small\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "c93ea4c5", - "metadata": {}, - "source": [ - "# Inferencing 32b DLC on CPU Runtime\n", - "Give name of DLC in OUTPUT_DLC_32 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "83c3a198", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=quicksrnet_small_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"quicksrnet_small\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "ec8c1af8", - "metadata": {}, - "source": [ - "# Pulling Output folder generated on different precision and cores" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "091c14cb-1e73-46fc-883f-c16235c6315a", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3bf075ed-ae3f-400d-97b8-e75acc863a20", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf output/OUTPUT_8b_DSP\n", - "rm -rf output/OUTPUT_32b_CPU\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "77e5bf2b-66d9-498a-9e24-8d0a227eff09", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3015b52c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "8b3b27e6", - "metadata": {}, - "source": [ - "# Calculate PSNR\n", - "* Pass path of two raw image in Argument." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22c037ca-46aa-4dc8-95ca-e14b69d94ae5", - "metadata": {}, - "outputs": [], - "source": [ - "val = []\n", - "for i in range(10):\n", - " val.append(IMAGES_HR[i])\n", - "val[0].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ff341bf7-5590-4a0e-9df6-cfadb807179d", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7cea900", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"output/OUTPUT_8b_DSP/\",\"output/OUTPUT_32b_CPU\"]\n", - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "for j in range(0,len(folder)):\n", - " IMAGES_SR = []\n", - " for i in range(0,10):\n", - " IMAGES_SR.append(post_process_sr(folder[j]+\"/Result_\"+str(i)+\"/25.raw\"))\n", - " print(folder[j],\" (Average PSNR) :: \",evaluate_average_psnr(IMAGES_SR, IMAGES_HR))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dad3b82e", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "plt.imshow(IMAGES_SR[5])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7bc9d09-8de6-443e-b99f-1e10c1d00686", - "metadata": {}, - "outputs": [], - "source": [ - "plt.imshow(IMAGES_HR[5])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/01-super-resolution/quicksrnet_small/quicksrnet_small.patch b/models-for-solutions/01-super-resolution/quicksrnet_small/quicksrnet_small.patch deleted file mode 100644 index 873b5a74..00000000 --- a/models-for-solutions/01-super-resolution/quicksrnet_small/quicksrnet_small.patch +++ /dev/null @@ -1,61 +0,0 @@ -diff --git a/aimet_zoo_torch/quicksrnet/model/model_definition.py b/aimet_zoo_torch/quicksrnet/model/model_definition.py -index 172a493..a1645b1 100644 ---- a/aimet_zoo_torch/quicksrnet/model/model_definition.py -+++ b/aimet_zoo_torch/quicksrnet/model/model_definition.py -@@ -15,9 +15,8 @@ - import json - import os - import torch --from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim --from aimet_zoo_torch.common.downloader import Downloader --from aimet_zoo_torch.quicksrnet.model.models import QuickSRNetBase -+from utils.super_resolution.downloader import Downloader -+from utils.super_resolution.models import QuickSRNetBase - - - class QuickSRNet(QuickSRNetBase, Downloader): -@@ -95,42 +94,7 @@ class QuickSRNet(QuickSRNetBase, Downloader): - self.load_state_dict(state_dict) - self.cuda() - else: -- state_dict = torch.load(self.path_pre_opt_weights)["state_dict"] -+ state_dict = torch.load(self.path_pre_opt_weights, map_location = torch.device('cpu'))["state_dict"] - self.load_state_dict(state_dict) -- self.cuda() -+ #self.cuda() - self.eval() -- -- def get_quantsim(self, quantized=False): -- """get quantsim object with pre-loaded encodings""" -- if not self.cfg: -- raise NotImplementedError( -- "There is no Quantization Simulation available for the model_config passed" -- ) -- if quantized: -- self.from_pretrained(quantized=True) -- else: -- self.from_pretrained(quantized=False) -- device = torch.device("cuda") -- dummy_input = torch.rand(self.input_shape, device=device) -- kwargs = { -- "quant_scheme": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["quant_scheme"], -- "default_param_bw": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["param_bw"], -- "default_output_bw": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["output_bw"], -- "config_file": self.path_aimet_config, -- "dummy_input": dummy_input, -- } -- sim = QuantizationSimModel(self, **kwargs) -- if self.path_aimet_encodings and quantized: -- load_encodings_to_sim(sim, self.path_aimet_encodings) -- print("load_encodings_to_sim finished!") -- if self.path_adaround_encodings and quantized: -- sim.set_and_freeze_param_encodings(self.path_adaround_encodings) -- print("set_and_freeze_param_encodings finished!") -- sim.model.eval() -- return sim diff --git a/models-for-solutions/01-super-resolution/quicksrnet_small/readme.md b/models-for-solutions/01-super-resolution/quicksrnet_small/readme.md deleted file mode 100644 index 0226568b..00000000 --- a/models-for-solutions/01-super-resolution/quicksrnet_small/readme.md +++ /dev/null @@ -1,47 +0,0 @@ -# Image Super Resolution - quicksrnet_small - -| Field | Description | -| --- | --- | -| Model Name | quicksrnet_small | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/quic/aimet-model-zoo/ | -| Paper | https://arxiv.org/abs/2303.04336 | -| Accuracy Metric | PSNR | -| Input Resolution | 128 x 128 | -| Output Resolution | 512 x 512 | -| Pre-Processing | Resize, Normalize on tensor input; unsqueeze and transpose | -| Post-Processing | reshape, transpose, max prediction value, decoding depending on dataset, image from array | - - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - -- Please follow the instructions for setting up Qualcomm Neural Processing SDK using the [link] (https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) provided. -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) - -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. - -- Install OpenCV ```pip install cv2``` - -- Install mxnet ```pip install mxnet``` - -## Pre-Trained Model - -Please refer to notebook for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/01-super-resolution/xlsr/readme.md b/models-for-solutions/01-super-resolution/xlsr/readme.md deleted file mode 100644 index 8e3b04d5..00000000 --- a/models-for-solutions/01-super-resolution/xlsr/readme.md +++ /dev/null @@ -1,37 +0,0 @@ -# Image Super Resolution - XLSR - -| Field | Description | -| --- | --- | -| Model Name | XLSR | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/quic/aimet-model-zoo/blob/develop/aimet_zoo_torch/xlsr/XLSR.md | -| Paper | https://arxiv.org/abs/2105.10288 | -| Accuracy Metric | PSNR | -| Input Resolution | 128 x 128 | -| Output Resolution | 512 x 512 | -| Pre-Process | Resize, Normalize on tensor input; unsqueeze and transpose | -| post-Process| reshape, transpose, max prediction value, decoding depending on dataset, image from array | - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - -## Pre-Trained Model - -Please refer to notebook for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* \ No newline at end of file diff --git a/models-for-solutions/01-super-resolution/xlsr/xlsr.ipynb b/models-for-solutions/01-super-resolution/xlsr/xlsr.ipynb deleted file mode 100644 index 9a6db805..00000000 --- a/models-for-solutions/01-super-resolution/xlsr/xlsr.ipynb +++ /dev/null @@ -1,773 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "12380c63", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0eef650b-cf22-43e4-b988-9ac826def509", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/snpe/2.16.0.231029/\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/xlsr_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/xlsr_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"xlsr\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "855257d0", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "import glob\n", - "import os\n", - "import numpy as np\n", - "import torch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6009a977-402c-449f-8e52-2b507f291097", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('utils', exist_ok = True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fc897578-810c-44ee-a6ce-7e6d523580b4", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/quic/aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8c981332-4eb2-45c6-be67-09b7e87a7b54", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r xlsr.patch aimet-model-zoo\n", - "cd aimet-model-zoo\n", - "git apply xlsr.patch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c56b9333-c15f-4962-aa1e-860df9dd8cc5", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/common/super_resolution/ utils/\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/common/downloader.py utils/super_resolution/\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/xlsr/model/ utils/\n", - "rm -rf aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "52f82600-a226-468e-8159-f38f5ada79fe", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cd utils\n", - "touch __init__.py\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dcfe259e-1b27-4c01-954a-808de13745d8", - "metadata": {}, - "outputs": [], - "source": [ - "from utils.model.model_definition import XLSR\n", - "from utils.super_resolution.imresize import imresize" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2b55eb27-0ffb-488c-b3ca-a42dc4df9fcf", - "metadata": {}, - "outputs": [], - "source": [ - "model_fp32 = XLSR(\"xlsr_4x_w8a8\",scaling_factor=4)\n", - "model_fp32.from_pretrained(quantized=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29e440d2-2496-4fb3-a5d9-56638d54a108", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models', exist_ok= True)\n", - "os.makedirs('output', exist_ok= True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "675c3487-d8ce-487e-a48f-b3ccb7939e41", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "dummy_input = torch.randn(1,3, 128, 128).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(model_fp32, dummy_input, \"./models/xlsr.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b07e763d-d689-42be-bdd8-4763679a09a7", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/xlsr.onnx --output_path models/xlsr_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "6c1a3a1d", - "metadata": {}, - "source": [ - "# Download dataset\n", - "
                        \n", - "
                      • Dataset link wget https://figshare.com/ndownloader/files/38256855
                      • \n", - "
                      • Below block will automatically download datsest, but in case if it fails please download from above link.
                      • \n", - "
                      • Recommended, to comment below code, if already downloaded dataset once.
                      • \n", - "
                          " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7940d5f2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "wget https://figshare.com/ndownloader/files/38256855\n", - "unzip 38256855 -d input\n", - "rm -rf 38256855\n", - "rm -rf input/Set14/image_SRF_4\n", - "rm -rf input/Set14/image_SRF_3\n", - "mkdir input/raw\n", - "find input/Set14/image_SRF_2 -name '*_LR*' -delete\n", - "mv input/Set14/image_SRF_2/* input/Set14/\n", - "rm -rf input/Set14/image_SRF_2/" - ] - }, - { - "cell_type": "markdown", - "id": "3762f717", - "metadata": {}, - "source": [ - "# Pre-processing data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "137cbe80-cf28-4a4d-b3f0-6d36fbfedfef", - "metadata": {}, - "outputs": [], - "source": [ - "img_paths = glob.glob(os.path.join(\"input/Set14/\", '*'))\n", - "img_paths = sorted(img_paths)\n", - "img_paths" - ] - }, - { - "cell_type": "markdown", - "id": "82b6adc3-9df0-46a7-a38c-c925e6275003", - "metadata": {}, - "source": [ - "## Preprocess dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e019c37-fe81-4aaa-842c-a27698d8b29a", - "metadata": {}, - "outputs": [], - "source": [ - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "def preprocess(img, scaling_factor=2):\n", - " lr_img, hr_img = create_hr_lr_pair(img, scaling_factor)\n", - "\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b35873a5-55cb-477d-b5b9-ae5b359f514b", - "metadata": {}, - "outputs": [], - "source": [ - "def create_hr_lr_pair(img, scaling_factor=2):\n", - " height, width = img.shape[0:2]\n", - "\n", - " # Take the largest possible center-crop of it such that its dimensions are perfectly divisible by the scaling factor\n", - " x_remainder = width % (scaling_factor)\n", - " y_remainder = height % (scaling_factor)\n", - " left = x_remainder // 2\n", - " top = y_remainder // 2\n", - " right = left + (width - x_remainder)\n", - " bottom = top + (height - y_remainder)\n", - " hr_img = img[top:bottom, left:right]\n", - "\n", - " hr_height, hr_width = hr_img.shape[0:2]\n", - "\n", - " hr_img = np.array(hr_img, dtype='float32')\n", - " lr_img = imresize(hr_img, 1. / scaling_factor) # equivalent to matlab's imresize\n", - " flag=0\n", - " lr_img = np.uint8(np.clip(lr_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hr_img = np.uint8(hr_img)\n", - " lr_height, lr_width = lr_img.shape[0:2]\n", - "\n", - " # Sanity check\n", - " assert hr_width == lr_width * scaling_factor and hr_height == lr_height * scaling_factor\n", - " lr_img = convert_image(lr_img, source='array', target='[0, 1]')\n", - " hr_img = convert_image(hr_img, source='array', target='[0, 1]')\n", - "\n", - " return lr_img, hr_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20431195-1874-4b87-bf3d-54e7f376a4af", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255)\n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " \n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " elif target == 'y-channel':\n", - " img = torch.matmul(img.permute(0, 2, 3, 1), RGB_WEIGHTS.to(img.device)) + 16.\n", - " \n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c275e2de-194e-4682-bcaf-984467126d3a", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dfd18eaf-123d-469d-8e86-8e24a42bb795", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir, scaling_factor=2):\n", - " # Input images for the model\n", - " INPUTS_LR = []\n", - "\n", - " # Post-processed images for visualization\n", - " IMAGES_LR = []\n", - " IMAGES_HR = []\n", - " \n", - " # Load the test images\n", - " count=0\n", - " img_paths = glob.glob(os.path.join(test_images_dir, '*'))\n", - " img_paths = sorted(img_paths)\n", - " for img_path in img_paths:\n", - " img = cv2.resize(cv2.imread(img_path),[512,512],interpolation=cv2.INTER_CUBIC)\n", - " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", - " lr_img, hr_img = preprocess(img, scaling_factor)#chw\n", - " INPUTS_LR.append(lr_img)#chw\n", - " IMAGES_LR.append(post_process(lr_img))#hwc\n", - " IMAGES_HR.append(post_process(hr_img))#hwc\n", - "\n", - " return INPUTS_LR, IMAGES_LR, IMAGES_HR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17ec75e4", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"input/Set14\"\n", - "INPUTS_LR, IMAGES_LR, IMAGES_HR = load_dataset(test_images_dir, scaling_factor=4)\n", - "for i, img_lr in enumerate(INPUTS_LR):\n", - " img_lr = img_lr.cpu().detach().numpy()\n", - " img_lr = img_lr.astype(np.float32)\n", - " fid = open(\"input/raw/img\"+str(i)+ \".raw\", 'wb')\n", - " img_lr.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37bd17b7-3a3f-42a0-a662-8df8a13f544e", - "metadata": {}, - "outputs": [], - "source": [ - "INPUTS_LR[0].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b1db2108-45f9-4047-b2cc-f68bd9b6febc", - "metadata": {}, - "outputs": [], - "source": [ - "with open(\"input/input.txt\", \"w\") as f:\n", - " for i in range(14):\n", - " file_path = f\"./raw/img{i}.raw\"\n", - " f.write(file_path + \"\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3b9d8c9c-e18d-4d20-a868-d55f267fe4fd", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input\n", - "snpe-dlc-quantize --input_dlc ../models/xlsr_fp32.dlc --input_list input.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc ../models/xlsr_w8a8.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "f4921976", - "metadata": {}, - "source": [ - "# Post-process model output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7fa25133", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_sr(img):\n", - "# img = img.detach().cpu().numpy()\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,512, 512)).astype(np.float32)\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "markdown", - "id": "0b94790c", - "metadata": {}, - "source": [ - "# Method to calcualte PSNR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d4103113", - "metadata": {}, - "outputs": [], - "source": [ - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " \"\"\"\n", - " Compute PSNR between super-resolved and original images.\n", - " \n", - " :param img_pred:\n", - " The super-resolved image obtained from the model\n", - " :param img_true:\n", - " The original high-res image\n", - " :param data_range:\n", - " Default = 255\n", - " :param eps:\n", - " Default = 1e-8\n", - " :return:\n", - " PSNR value\n", - " \"\"\"\n", - " err = (img_pred - img_true) ** 2\n", - " err = err.mean(dim=-1).mean(dim=-1)\n", - "\n", - " return 10. * torch.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e230a6fe", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_psnr(y_pred, y_true):\n", - " \"\"\"\n", - " Evaluate individual PSNR metric for each super-res and actual high-res image-pair.\n", - " \n", - " :param y_pred:\n", - " The super-resolved image from the model\n", - " :param y_true:\n", - " The original high-res image\n", - " :return:\n", - " The evaluated PSNR metric for the image-pair\n", - " \"\"\"\n", - " y_pred = y_pred.transpose(2, 0, 1)[None] / 255.\n", - " y_true = y_true.transpose(2, 0, 1)[None] / 255.\n", - "\n", - " sr_img = convert_image(torch.FloatTensor(y_pred),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - " hr_img = convert_image(torch.FloatTensor(y_true),\n", - " source='[0, 1]',\n", - " target='y-channel')\n", - "\n", - " return compute_psnr(sr_img, hr_img)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b389e852", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(sr_images, hr_images):\n", - " \"\"\"\n", - " Evaluate the avg PSNR metric for all test-set super-res and high-res images.\n", - "\n", - " :param sr_images:\n", - " The list of super-resolved images obtained from the model for the given test-images\n", - " :param hr_images:\n", - " The list of original high-res test-images\n", - " :return:\n", - " Average PSNR metric for all super-resolved and high-res test-set image-pairs\n", - " \"\"\"\n", - " psnr = []\n", - " for sr_img, hr_img in zip(sr_images, hr_images):\n", - " psnr.append(evaluate_psnr(sr_img, hr_img))\n", - "\n", - " \n", - "\n", - " # Convert the list of tensor values to a tensor array\n", - " psnr_tensor = torch.cat(psnr)\n", - "\n", - " \n", - "\n", - " # Calculate the mean of the tensor array\n", - " average_psnr = torch.mean(psnr_tensor)\n", - "\n", - " return average_psnr" - ] - }, - { - "cell_type": "markdown", - "id": "63a1687e", - "metadata": {}, - "source": [ - "# Creating Bin and Lib Folder on Device " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a8c2b9ce", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "ea80b847", - "metadata": {}, - "source": [ - "# Pushing all Lib and Bin files onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed6634e3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "markdown", - "id": "5bf854eb", - "metadata": {}, - "source": [ - "# Pushing Artifacts on to Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "088a8923", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7d34aeac", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "1efd9588", - "metadata": {}, - "source": [ - "# Inferencing 8 bit DLC on DSP Runtime\n", - "Give name of DLC in OUTPUT_DLC_QUANTIZED8 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "484a6e4c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=xlsr_w8a8.dlc\n", - "export ONDEVICE_FOLDER=\"xlsr\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "c93ea4c5", - "metadata": {}, - "source": [ - "# Inferencing 32b DLC on CPU Runtime\n", - "Give name of DLC in OUTPUT_DLC_32 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "83c3a198", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=xlsr_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"xlsr\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "ec8c1af8", - "metadata": {}, - "source": [ - "# Pulling Output folder generated on different precision and cores" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3bf075ed-ae3f-400d-97b8-e75acc863a20", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf output/OUTPUT_8b_DSP\n", - "rm -rf output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3015b52c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "8b3b27e6", - "metadata": {}, - "source": [ - "# Calculate PSNR\n", - "* Pass path of two raw image in Argument." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22c037ca-46aa-4dc8-95ca-e14b69d94ae5", - "metadata": {}, - "outputs": [], - "source": [ - "val = []\n", - "for i in range(10):\n", - " val.append(IMAGES_HR[i])\n", - "val[0].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7cea900", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"output/OUTPUT_8b_DSP/\",\"output/OUTPUT_32b_CPU\"]\n", - "RGB_WEIGHTS = torch.FloatTensor([65.481, 128.553, 24.966])\n", - "for j in range(0,len(folder)):\n", - " IMAGES_SR = []\n", - " for i in range(0,10):\n", - " IMAGES_SR.append(post_process_sr(folder[j]+\"/Result_\"+str(i)+\"/43.raw\"))\n", - " print(folder[j],\" (Average PSNR) :: \",evaluate_average_psnr(IMAGES_SR, IMAGES_HR))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dad3b82e", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "plt.imshow(IMAGES_SR[5])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7bc9d09-8de6-443e-b99f-1e10c1d00686", - "metadata": {}, - "outputs": [], - "source": [ - "plt.imshow(IMAGES_HR[5])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/01-super-resolution/xlsr/xlsr.patch b/models-for-solutions/01-super-resolution/xlsr/xlsr.patch deleted file mode 100644 index e611afd5..00000000 --- a/models-for-solutions/01-super-resolution/xlsr/xlsr.patch +++ /dev/null @@ -1,59 +0,0 @@ -diff --git a/aimet_zoo_torch/xlsr/model/model_definition.py b/aimet_zoo_torch/xlsr/model/model_definition.py -index c1b446d..beee86e 100644 ---- a/aimet_zoo_torch/xlsr/model/model_definition.py -+++ b/aimet_zoo_torch/xlsr/model/model_definition.py -@@ -14,9 +14,8 @@ - import json - import os - import torch --from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim --from aimet_zoo_torch.common.downloader import Downloader --from aimet_zoo_torch.common.super_resolution.models import XLSRRelease -+from utils.super_resolution.downloader import Downloader -+from utils.super_resolution.models import XLSRRelease - - - class XLSR(XLSRRelease, Downloader): -@@ -77,40 +76,7 @@ class XLSR(XLSRRelease, Downloader): - self.load_state_dict(state_dict) - self.cuda() - else: -- state_dict = torch.load(self.path_pre_opt_weights)["state_dict"] -+ state_dict = torch.load(self.path_pre_opt_weights, map_location = torch.device('cpu'))["state_dict"] - self.load_state_dict(state_dict) -- self.cuda() -+ #self.cuda() - self.eval() -- -- def get_quantsim(self, quantized=False): -- """get quantsim object with pre-loaded encodings""" -- if not self.cfg: -- raise NotImplementedError( -- "There is no Quantization Simulation available for the model_config passed" -- ) -- if quantized: -- self.from_pretrained(quantized=True) -- else: -- self.from_pretrained(quantized=False) -- device = torch.device("cuda") -- dummy_input = torch.rand(self.input_shape, device=device) -- kwargs = { -- "quant_scheme": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["quant_scheme"], -- "default_param_bw": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["param_bw"], -- "default_output_bw": self.cfg["optimization_config"][ -- "quantization_configuration" -- ]["output_bw"], -- "config_file": self.path_aimet_config, -- "dummy_input": dummy_input, -- } -- sim = QuantizationSimModel(self, **kwargs) -- if self.path_aimet_encodings and quantized: -- load_encodings_to_sim(sim, self.path_aimet_encodings) -- if self.path_adaround_encodings and quantized: -- sim.set_and_freeze_param_encodings(self.path_adaround_encodings) -- sim.model.eval() -- return sim diff --git a/models-for-solutions/02-low-light-enhancement/EnlightenGAN/Accuracy_Analyzer_enlightenGAN.ipynb b/models-for-solutions/02-low-light-enhancement/EnlightenGAN/Accuracy_Analyzer_enlightenGAN.ipynb deleted file mode 100644 index 62efcf20..00000000 --- a/models-for-solutions/02-low-light-enhancement/EnlightenGAN/Accuracy_Analyzer_enlightenGAN.ipynb +++ /dev/null @@ -1,558 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "75df6b48", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3879b5c0-cd54-4b42-9953-f912506cfc3d", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"EnhancementGAN\"\n", - "os.environ['DLC32']=\"enlighten_fp32.dlc\"\n", - "os.environ['DLC8']=\"enlighten_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"EnhancementGAN_comparision\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #change with your device-id. Use command \"adb devices\" to get devices names.\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6d904ce4", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "import glob\n", - "import os\n", - "import numpy as np\n", - "import torch\n", - "!pip install onnxsim" - ] - }, - { - "cell_type": "markdown", - "id": "73d25715", - "metadata": {}, - "source": [ - "# Download dataset\n", - "\n", - "Before proceeding with next steps, download dataset of your choice, and keep it ready to test on the images. Sample pre-processing and post-processing operations are shown below. They can be applied for the selected dataset. " - ] - }, - { - "cell_type": "markdown", - "id": "54764069", - "metadata": {}, - "source": [ - "# Pre-processing data " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0782fcfa", - "metadata": {}, - "outputs": [], - "source": [ - "def preprocess(ll_img, hl_img):\n", - " ll_img = np.array(ll_img, dtype='float32')\n", - " hl_img = np.array(hl_img, dtype='float32')\n", - " \n", - " ll_img = np.uint8(np.clip(ll_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hl_img = np.uint8(np.clip(hl_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " \n", - " ll_img = np.uint8(ll_img)\n", - " hl_img = np.uint8(hl_img)\n", - " \n", - " ll_img = convert_image(ll_img, source='array', target='[0, 1]')\n", - " hl_img = convert_image(hl_img, source='array', target='[0, 1]')\n", - " \n", - " return ll_img, hl_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5901102f", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255) \n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " \n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "765569a7", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a021d75f", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir):\n", - " # Input images for the model\n", - " INPUTS_LL = []\n", - " # Post-processed images for visualization\n", - " IMAGES_LL = [] # LL:Low Light\n", - " IMAGES_HL = [] # HL:High Light\n", - " # Load the test images\n", - " count=0\n", - " for img_path in glob.glob(os.path.join(test_images_dir, '*')):\n", - " l_img = cv2.resize(cv2.imread(img_path),[320,240],interpolation=cv2.INTER_CUBIC)\n", - " l_img = cv2.cvtColor(l_img, cv2.COLOR_BGR2RGB)\n", - " h_img = cv2.resize(cv2.imread(img_path.replace(\"low\",\"high\")),[320,240],interpolation=cv2.INTER_CUBIC)\n", - " h_img = cv2.cvtColor(h_img, cv2.COLOR_BGR2RGB)\n", - " ll_img, hl_img = preprocess(l_img, h_img)#chw\n", - " INPUTS_LL.append(ll_img)#chw\n", - " IMAGES_LL.append(post_process(ll_img))#hwc\n", - " IMAGES_HL.append(post_process(hl_img))#hwc\n", - " return INPUTS_LL, IMAGES_LL, IMAGES_HL" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "128de23f-4c1a-4cca-a854-9b7f7d405941", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('raw', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7fa25133", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"Directory with test images - set this variable to directory name\"\n", - "INPUTS_LL, IMAGES_LL, IMAGES_HL = load_dataset(test_images_dir)\n", - "print(len(INPUTS_LL),len(IMAGES_LL),len(IMAGES_HL))\n", - "for i, img_ll in enumerate(INPUTS_LL):\n", - " img_ll = img_ll.cpu().detach().numpy()\n", - " img_ll = img_ll.astype(np.float32)\n", - " fid = open(\"raw/img_\"+str(i)+ \".raw\", 'wb')\n", - " img_ll.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "046a874e-cb3a-4739-80dc-17882ddf0c89", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 15\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"raw/img_{}.raw\\n\".format(i))" - ] - }, - { - "cell_type": "markdown", - "id": "289c33ca-95a5-4c3a-8aa1-4a773815c242", - "metadata": {}, - "source": [ - "## Getting the ONNX Model\n", - "- Take this from here [ https://github.com/arsenyinfo/EnlightenGAN-inference/blob/main/enlighten_inference/enlighten.onnx ] " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "777189e4-81d8-4135-b175-ef5b8727d9a1", - "metadata": {}, - "outputs": [], - "source": [ - "!git clone https://github.com/arsenyinfo/EnlightenGAN-inference.git" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f265a057-2bd8-4da3-84e5-d3db55439289", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i EnlightenGAN-inference/enlighten_inference/enlighten.onnx -d input 1,3,240,320 -o enlighten_fp32.dlc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ecd438c0-3e41-4339-8b41-99876d210bd0", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc enlighten_fp32.dlc --input_list list.txt --output_dlc enlighten_w8a8.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "aa51d4e0", - "metadata": {}, - "source": [ - "# Post-process model output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4daa7b8e", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_enhanced(img):\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3, 240, 320)).astype(np.float32)\n", - " img = np.clip((img+1)/2 * 255. , 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "markdown", - "id": "63a1687e", - "metadata": {}, - "source": [ - "# Creating Bin and Lib Folder on Device " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a8c2b9ce", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "ea80b847", - "metadata": {}, - "source": [ - "# Pushing all Lib and Bin files onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed6634e3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "markdown", - "id": "5bf854eb", - "metadata": {}, - "source": [ - "# Pushing Artifacts on to Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "088a8923", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7d34aeac", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push raw /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "74bfd5bd", - "metadata": {}, - "source": [ - "# Inferencing 8 bit DLC on DSP Runtime\n", - "* Give name of DLC in OUTPUT_DLC_QUANTIZED8 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2fbcc4ee", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=enlighten_w8a8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"EnhancementGAN_comparision\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list list.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "25c225c6", - "metadata": {}, - "source": [ - "# Inferencing 32b DLC on CPU Runtime\n", - "Give name of DLC in OUTPUT_DLC_32 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1a06d463", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=enlighten_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"EnhancementGAN_comparision\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list list.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "ec8c1af8", - "metadata": {}, - "source": [ - "# Pulling Output folder generated on different precision and cores" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f30dd986-8f19-4c6e-af5b-ee3baa446bcf", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf OUTPUT_8b_DSP\n", - "rm -rf OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3015b52c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "8b3b27e6", - "metadata": {}, - "source": [ - "# Calculate PSNR\n", - "* Pass path of two raw image in Argument." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "908e7a1c", - "metadata": {}, - "outputs": [], - "source": [ - "import math\n", - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " err = (img_pred - img_true) ** 2 \n", - " err = np.mean(err)\n", - " return 10. * math.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ea26ceba", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(el_images, hl_images): #(enhanced_light, high_light )\n", - " psnr = []\n", - " for i in range(len(el_images)):\n", - " el_img = cv2.imread(el_images[i], 1)\n", - " hl_img = cv2.imread(hl_images[i], 1)\n", - " psnr.append(compute_psnr(el_img,hl_img))\n", - " average_psnr = np.mean(np.array(psnr))\n", - " return average_psnr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7cea900", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"OUTPUT_32b_CPU\", \"OUTPUT_8b_DSP\"]\n", - "from PIL import Image\n", - "dict_folder_wise={}\n", - "for j in range(0,len(folder)):\n", - " IMAGES_EL = []# EL: Enhanced Light (model output)\n", - " File_LL=[]\n", - " File_EL=[]\n", - " File_HL=[]\n", - " dict_folder_wise[str(folder[j])]=[]\n", - " for i in range(0,15):\n", - " # for prediction img\n", - " IMAGES_EL.append(post_process_enhanced(folder[j]+\"/Result_\"+str(i)+\"/output.raw\")) \n", - " im = Image.fromarray(IMAGES_EL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " File_EL.append(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " \n", - " #for ground truth HL\n", - " im = Image.fromarray(IMAGES_HL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " File_HL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " \n", - " #for ground truth LL\n", - " im = Image.fromarray(IMAGES_LL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " File_LL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " dict_folder_wise[str(folder[j])].append([File_LL[i],File_HL[i],File_EL[i]])\n", - " print(folder[j],\" :: \",evaluate_average_psnr(File_EL,File_HL))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5466ed73-061f-4f67-85cf-85756ae79e7d", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "from matplotlib.pyplot import figure, imshow, axis\n", - "from matplotlib.image import imread\n", - "import random as rand\n", - "\n", - "def showImagesHorizontally(majors,index):\n", - " fig, axs = plt.subplots(1, 4, figsize=(12,4))\n", - " for nn, ax in enumerate(axs.flat):\n", - " if nn<=2:\n", - " column = majors[nn]\n", - " column_rec_name = column.replace('\\n', '_').replace(' ', '_')\n", - " image = imread(majors[nn])\n", - " ax.set_xlabel(majors[nn].split(\"/\")[-1])\n", - " else:\n", - " image=imread(dict_folder_wise['OUTPUT_8b_DSP'][index][-1])\n", - " ax.set_xlabel(\"Prediction 8b\")\n", - " ax.imshow(image)\n", - " \n", - " fig.suptitle(\"Comparison Between Ground Truth and Prediction\", fontsize=16)\n", - " plt.show()\n", - "\n", - "for index in range(0,15):\n", - " showImagesHorizontally(dict_folder_wise['OUTPUT_32b_CPU'][index],index)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/02-low-light-enhancement/EnlightenGAN/README.md b/models-for-solutions/02-low-light-enhancement/EnlightenGAN/README.md deleted file mode 100644 index 3a04c49c..00000000 --- a/models-for-solutions/02-low-light-enhancement/EnlightenGAN/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# Low Light Image Enhancement using EnhancedGAN - -| Field | Description | -| --- | --- | -| Model Name | EnlightenGAN | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/arsenyinfo/EnlightenGAN-inference | -| Paper | https://arxiv.org/abs/1906.06972 | -| Accuracy Metric | PSNR | -| Pre-Process | cv2.resize, cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.clip | -| post-Process| np.reshape, np.clip, transpose | - - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - - -## Pre-Trained Model - -Please refer to notebook for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* \ No newline at end of file diff --git a/models-for-solutions/02-low-light-enhancement/MBLLEN/MBLLEN.ipynb b/models-for-solutions/02-low-light-enhancement/MBLLEN/MBLLEN.ipynb deleted file mode 100644 index 4026e3dd..00000000 --- a/models-for-solutions/02-low-light-enhancement/MBLLEN/MBLLEN.ipynb +++ /dev/null @@ -1,664 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "1c501062-4864-4e4b-ad35-2a5bc7e28bd2", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e49eede8-22ec-42fc-b923-406169d660fb", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"#rawfiles\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"MBLLEN\"\n", - "os.environ['DLC32']=\"dlc/mbllen_fp32.dlc\"\n", - "os.environ['DLC8']=\"dlc/mbllen_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"MBLLEN\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d9b593f9-b33e-48c0-82b9-f1b137a36844", - "metadata": {}, - "outputs": [], - "source": [ - "from PIL import Image\n", - "import glob\n", - "import os\n", - "import cv2\n", - "import numpy as np\n", - "import torch\n", - "import os\n", - "import shutil" - ] - }, - { - "cell_type": "markdown", - "id": "f4533e0b-e3ff-461b-b94a-d6831ffdacf2", - "metadata": {}, - "source": [ - "# Getting The Model\n", - "\n", - "- **If You Already have the models in the dlc folder no need to run this cell**\n", - "- [ https://github.com/ymmshi/MBLLEN.pytorch.git ](Link of the Actual Model)" - ] - }, - { - "cell_type": "markdown", - "id": "77291007-378c-45f2-8892-65450d316f9c", - "metadata": {}, - "source": [ - "#### Installing dependencies" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17a25c9a-3b70-46ff-b6d3-49f8974acdcf", - "metadata": {}, - "outputs": [], - "source": [ - "!pip3 install pytorch_lightning\n", - "!pip3 install pytorch_msssim" - ] - }, - { - "cell_type": "markdown", - "id": "3eeac983-8015-439c-ad8a-4026199eb43b", - "metadata": {}, - "source": [ - "## Downloading the dataset" - ] - }, - { - "cell_type": "markdown", - "id": "909a3a11", - "metadata": {}, - "source": [ - "Download the dataset of your choice and proceed to verify the pre-processing and post-processing steps given in notebook. " - ] - }, - { - "cell_type": "markdown", - "id": "e1350ebc-101c-407e-bc7d-a2a7df4ea47a", - "metadata": {}, - "source": [ - "## Clonning the Repo and checking\n", - "- https://github.com/ymmshi/MBLLEN.pytorch.git" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7b97e068-de6f-4c78-9461-724226bf3c57", - "metadata": {}, - "outputs": [], - "source": [ - "!git clone https://github.com/ymmshi/MBLLEN.pytorch.git\n", - "#git reset --hard c859661f89b21bce592634b36ecec33fa6bb4b19" - ] - }, - { - "cell_type": "markdown", - "id": "9da6856d-31c5-4c65-8551-c1cb5940a5de", - "metadata": {}, - "source": [ - "#### Generating the ONNX Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6a39aeb7-b904-46b0-9037-bb2db5dead91", - "metadata": {}, - "outputs": [], - "source": [ - "command1=\"cp MBLLEN.patch MBLLEN.pytorch/;cd MBLLEN.pytorch;patch -i MBLLEN.patch\"\n", - "os.system(command1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ef858edf-64fa-4001-81fe-ee8d9930ae85", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cd MBLLEN.pytorch/utils\n", - "touch __init__.py" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3a8cdfec-ffdb-4236-9837-bbce2faf9f9a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp generate_model.py MBLLEN.pytorch\n", - "cd MBLLEN.pytorch\n", - "python generate_model.py" - ] - }, - { - "cell_type": "markdown", - "id": "4573d79e-45cd-4c91-ab10-c1b8d1592f2f", - "metadata": {}, - "source": [ - "#### Getting the FP32 DLC File" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6941bc3c-d227-4eda-a9e8-b603a1a6ee59", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i mbllen.onnx -o dlc/mbllen_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "9f37929c-4cff-43ed-9717-d909f57d80b1", - "metadata": {}, - "source": [ - "#### Understanding the Architecture of the Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ae13e54c-9428-4e43-b3ec-2f9a04aa0b3d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash \n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-viewer -i dlc/mbllen_fp32.dlc -s mbllen.html\n", - "snpe-dlc-info -i dlc/mbllen_fp32.dlc>mbllen.txt" - ] - }, - { - "cell_type": "markdown", - "id": "8ae60b0e-bac9-4552-863a-d0de9a13cf5d", - "metadata": {}, - "source": [ - "## Preprocessing the data to generate raw file" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8f2130eb-6a08-4f22-abd0-8063d2b4cf31", - "metadata": {}, - "outputs": [], - "source": [ - "def preprocess(ll_img, hl_img):\n", - " ll_img = np.array(ll_img, dtype='float32')\n", - " hl_img = np.array(hl_img, dtype='float32')\n", - " ll_img = np.uint8(np.clip(ll_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hl_img = np.uint8(np.clip(hl_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " ll_img = np.uint8(ll_img)\n", - " hl_img = np.uint8(hl_img)\n", - " ll_img = convert_image(ll_img, source='array', target='[0, 1]')\n", - " hl_img = convert_image(hl_img, source='array', target='[0, 1]')\n", - " return ll_img, hl_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "936d6ad9-d1fe-4afe-a0bd-a9e8fadc1b8f", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255) \n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e0466e54-3935-42b4-837d-fb6c2293fc88", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "306044e2-9c49-45f4-925f-8df2f61a36c6", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir):\n", - " # Input images for the model\n", - " INPUTS_LL = []\n", - " # Post-processed images for visualization\n", - " IMAGES_LL = [] # LL:Low Light\n", - " IMAGES_HL = [] # HL:High Light\n", - "\n", - " # Load the test images\n", - " count=0\n", - " for img_path in glob.glob(os.path.join(test_images_dir, '*')):\n", - " l_img = cv2.resize(cv2.imread(img_path),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " l_img = cv2.cvtColor(l_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " h_img = cv2.resize(cv2.imread(img_path.replace(\"low\",\"high\")),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " h_img = cv2.cvtColor(h_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " ll_img, hl_img = preprocess(l_img, h_img)#chw\n", - "\n", - " INPUTS_LL.append(ll_img)#chw\n", - " IMAGES_LL.append(post_process(ll_img))#hwc\n", - " IMAGES_HL.append(post_process(hl_img))#hwc\n", - " return INPUTS_LL, IMAGES_LL, IMAGES_HL" - ] - }, - { - "cell_type": "markdown", - "id": "c9da52c1-3cfa-4100-8d73-c1e6352c2041", - "metadata": {}, - "source": [ - "**Converting the low dataset to raw file format to give it to the model and do the inference**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4eaa6084-0edb-43c5-b06a-a4d4225c0158", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('raw', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b45cf168-f2d0-4b99-951a-d26912d7020e", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import os\n", - "import cv2\n", - "import numpy as np\n", - "import torch\n", - "!mkdir raw\n", - "test_images_dir = \"eval15/low\"\n", - "INPUTS_LL, IMAGES_LL, IMAGES_HL = load_dataset(test_images_dir)\n", - "print(len(INPUTS_LL),len(IMAGES_LL),len(IMAGES_HL))\n", - "for i, img_ll in enumerate(INPUTS_LL):\n", - " img_ll = img_ll.cpu().detach().numpy()\n", - " img_ll = img_ll.astype(np.float32)\n", - " fid = open(\"raw/img_\"+str(i)+ \".raw\", 'wb')\n", - " img_ll.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8e1a5c86-388a-4799-aabd-047d6a2e1037", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_enhanced(img):\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,640,480)).astype(np.float32)\n", - " img = np.clip(img * 255. , 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "markdown", - "id": "a147806b-7d8e-4831-8582-766b392e5160", - "metadata": {}, - "source": [ - "#### Getting the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d3c39129-621d-452a-aa78-7137230f9d47", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 15\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"raw/img_{}.raw\\n\".format(i))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1a39e880-ab84-4b1b-8231-6311653cbf08", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc dlc/mbllen_fp32.dlc --input_list list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc dlc/mbllen_w8a8.dlc --enable_htp " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "007cc06a-9990-4063-8395-ecd824a6a135", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cbc05fea-5caf-473e-b173-28c8f9e0c478", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "96f5dc19-3bc0-4eef-a6bb-178919d81721", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "465dc5d9-9a9a-4cd4-85bc-fb5559299daa", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push raw /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "fa6a5028-7c0a-49e3-8bfc-fe03014c1abb", - "metadata": {}, - "source": [ - "#### Inferencing the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c68c9669-4d6c-465c-a2dd-17f62aced46f", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=mbllen_w8a8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"MBLLEN\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list list.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "34f14305-d853-40ac-8b2e-18c9ea730b29", - "metadata": {}, - "source": [ - "#### Inferencing the Non-Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b7988a08-6d52-4e43-860a-0756df10ec58", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=mbllen_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"MBLLEN\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list list.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1e9c6548-e8e5-4f2f-8f30-7e60eacbf1d6", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf OUTPUT_8b_DSP\n", - "rm -rf OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "58e3cccc-b60b-4cf5-9759-2e470114a67c", - "metadata": {}, - "source": [ - "#### Pulling the Output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0fb7e729-592c-4411-8eea-25ec051c03cc", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "ef542396-2380-4803-8e8a-2f9ef68812ba", - "metadata": {}, - "source": [ - "### Post Processing and Calculating PSNR Value" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2eea16f1-f59f-4caa-a026-65a4165e7e66", - "metadata": {}, - "outputs": [], - "source": [ - "import math\n", - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " err = (img_pred - img_true) ** 2 \n", - " err = np.mean(err)\n", - " return 10. * math.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3a85cb47-ee13-482e-8549-a6cd34b1acc5", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(el_images, hl_images): #(enhanced_light, high_light )\n", - " psnr = []\n", - " for i in range(len(el_images)):\n", - " el_img = cv2.imread(el_images[i], 1)\n", - " hl_img = cv2.imread(hl_images[i], 1)\n", - " psnr.append(compute_psnr(el_img,hl_img))\n", - " average_psnr = np.mean(np.array(psnr))\n", - " return average_psnr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "46efe082-16be-41f3-89f7-79eb568c1207", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_8b_DSP\"]\n", - "from PIL import Image\n", - "dict_folder_wise={}\n", - "for j in range(0,len(folder)):\n", - " IMAGES_EL = []# EL: Enhanced Light (model output)\n", - " File_LL=[]\n", - " File_EL=[]\n", - " File_HL=[]\n", - " dict_folder_wise[str(folder[j])]=[]\n", - " for i in range(0,15):\n", - " # for prediction img\n", - " IMAGES_EL.append(post_process_enhanced(folder[j]+\"/Result_\"+str(i)+\"/352.raw\")) \n", - " im = Image.fromarray(IMAGES_EL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " File_EL.append(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " \n", - " #for ground truth HL\n", - " im = Image.fromarray(IMAGES_HL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " File_HL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " \n", - " #for ground truth LL\n", - " im = Image.fromarray(IMAGES_LL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " File_LL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " dict_folder_wise[str(folder[j])].append([File_LL[i],File_HL[i],File_EL[i]])\n", - " print(folder[j],\" :: \",evaluate_average_psnr(File_EL,File_HL))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "markdown", - "id": "52dab41d-8160-4777-a96a-c64a5db1f88e", - "metadata": {}, - "source": [ - "### Comaparison of Output between Non Quantized Model and Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6d0c3dbf-5385-42f8-b55f-a456614e79e3", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "from matplotlib.pyplot import figure, imshow, axis\n", - "from matplotlib.image import imread\n", - "import random as rand\n", - "\n", - "def showImagesHorizontally(majors,index):\n", - " fig, axs = plt.subplots(1, 4, figsize=(12,4))\n", - " for nn, ax in enumerate(axs.flat):\n", - " if nn<=2:\n", - " column = majors[nn]\n", - " column_rec_name = column.replace('\\n', '_').replace(' ', '_')\n", - " image = imread(majors[nn])\n", - " ax.set_xlabel(majors[nn].split(\"/\")[-1])\n", - " else:\n", - " image=imread(dict_folder_wise['OUTPUT_8b_DSP'][index][-1])\n", - " ax.set_xlabel(\"Prediction 8b\")\n", - " ax.imshow(image)\n", - " \n", - " fig.suptitle(\"Comparison Between Ground Truth and Predictions\", fontsize=16)\n", - " plt.show()\n", - "\n", - "for index in range(0,15):\n", - " showImagesHorizontally(dict_folder_wise['OUTPUT_32b_CPU'][index],index)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/02-low-light-enhancement/MBLLEN/MBLLEN.patch b/models-for-solutions/02-low-light-enhancement/MBLLEN/MBLLEN.patch deleted file mode 100644 index cefc57da..00000000 --- a/models-for-solutions/02-low-light-enhancement/MBLLEN/MBLLEN.patch +++ /dev/null @@ -1,57 +0,0 @@ -diff --git a/config.py b/config.py -index 9db2bb3..bd454d6 100644 ---- a/config.py -+++ b/config.py -@@ -9,15 +9,3 @@ cfg['model'] = { - 'fem_channel': 32, - 'block_num': 9, - } --cfg['data'] = { -- 'data_dir': '/home/ymshi/Face/data/MBLLEN_dataset', -- 'batch_size': 16, -- 'num_workers': 4, -- 'dark_or_low': 'lowlight' --} --cfg['trainer'] = { -- 'gpus':[0,1], -- 'precision': 32, -- 'max_epochs': 80, -- 'monitor': 'val_loss' --} -\ No newline at end of file -diff --git a/main.py b/main.py -index df7af1e..04f54c9 100644 ---- a/main.py -+++ b/main.py -@@ -1,10 +1,10 @@ - import torch - from torch.nn import functional as F - from torch.utils.data import DataLoader --from torchvision import transforms -+#from torchvision import transforms - - from pytorch_lightning import LightningModule, LightningDataModule, Trainer --from pytorch_lightning.plugins import DDPPlugin -+#from pytorch_lightning.plugins import DDPPlugin - from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor - - from utils.model import MBLLEN -@@ -53,7 +53,7 @@ class Data(LightningDataModule): - self.batch_size = data_cfg['batch_size'] - self.num_workers = data_cfg['num_workers'] - self.dark_or_low = data_cfg['dark_or_low'] -- self.transform = transforms.Compose([transforms.ToTensor()]) -+ #self.transform = transforms.Compose([transforms.ToTensor()]) - - def prepare_data(self): - pass -@@ -95,7 +95,6 @@ if __name__ == '__main__': - max_epochs=cfg['trainer']['max_epochs'], - accelerator='ddp', - precision=cfg['trainer']['precision'], -- progress_bar_refresh_rate=1, -- plugins=DDPPlugin(find_unused_parameters=False), -+ progress_bar_refresh_rate=1, - callbacks=[ModelCheckpoint(monitor=cfg['trainer']['monitor']), LearningRateMonitor(logging_interval='step')]) - trainer.fit(model, data) -\ No newline at end of file diff --git a/models-for-solutions/02-low-light-enhancement/MBLLEN/README.md b/models-for-solutions/02-low-light-enhancement/MBLLEN/README.md deleted file mode 100644 index 1a7b705d..00000000 --- a/models-for-solutions/02-low-light-enhancement/MBLLEN/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Low Light Image Enhancement using MBLLEN - -| Field | Description | -| --- | --- | -| Model Name | MBLLEN | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/ymmshi/MBLLEN.pytorch.git | -| Paper | NA | -| Accuracy Metric | PSNR | -| Pre-Process | cv2.resize, cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.clip | -| post-Process| np.reshape, np.clip, transpose | - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - - -## Changes to open source Repository - -Changes made to open-source repository to generate pre-trained models is given as a patch file - MBLLEN.patch - -## Pre-Trained Model - -Please refer to python file 'generate_model.py' for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* \ No newline at end of file diff --git a/models-for-solutions/02-low-light-enhancement/MBLLEN/generate_model.py b/models-for-solutions/02-low-light-enhancement/MBLLEN/generate_model.py deleted file mode 100644 index f086ed6e..00000000 --- a/models-for-solutions/02-low-light-enhancement/MBLLEN/generate_model.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= - -import torch -from main import Model -from config import cfg - -#you can download the pretrained models from here -#https://github.com/ymmshi/MBLLEN.pytorch/tree/master/pretrained_models - -model = Model(cfg['model']) -model.load_state_dict(torch.load('pretrained_models/lowlight.ckpt',map_location=torch.device('cpu'))['state_dict']) -#model = model.cuda() -model.eval() -dummy_input=torch.randn(1, 3, 480,640) -torch.onnx.export(model, dummy_input, "../mbllen.onnx", opset_version=11, verbose=False) -print("ONNX model saved Successfully") \ No newline at end of file diff --git a/models-for-solutions/02-low-light-enhancement/RUAS/README.md b/models-for-solutions/02-low-light-enhancement/RUAS/README.md deleted file mode 100644 index 64981de6..00000000 --- a/models-for-solutions/02-low-light-enhancement/RUAS/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Low Light Image Enhancement using RUAS - -| Field | Description | -| --- | --- | -| Model Name | RUAS | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/dut-media-lab/RUAS.git | -| Paper | NA | -| Accuracy Metric | PSNR | -| Pre-Process | cv2.resize, cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.clip | -| post-Process| np.reshape, np.clip, transpose | - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - - -## Changes to open source Repository - -Changes made to open-source repository to generate pre-trained models is given as a patch file - RUAS.patch - -## Pre-Trained Model - -Please refer to python file 'generate_model.py' for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* \ No newline at end of file diff --git a/models-for-solutions/02-low-light-enhancement/RUAS/RUAS.ipynb b/models-for-solutions/02-low-light-enhancement/RUAS/RUAS.ipynb deleted file mode 100644 index d8124f00..00000000 --- a/models-for-solutions/02-low-light-enhancement/RUAS/RUAS.ipynb +++ /dev/null @@ -1,620 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "100b7d5b-30b6-40a1-8803-5fc7f49c7afe", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "94c5915b-76cd-4824-a1a9-1b93804e1518", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"#rawfiles\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"RUAS\"\n", - "os.environ['DLC32']=\"dlc/ruas_fp32.dlc\"\n", - "os.environ['DLC8']=\"dlc/ruas_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"RUAS\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "70340229-a8d0-4ede-bad4-2c4e563ef80b", - "metadata": {}, - "outputs": [], - "source": [ - "from PIL import Image\n", - "import glob\n", - "import os\n", - "import cv2\n", - "import numpy as np\n", - "import torch\n", - "import os\n", - "import shutil" - ] - }, - { - "cell_type": "markdown", - "id": "82167b78-359b-489b-8002-9c62106f7fe9", - "metadata": {}, - "source": [ - "# Getting The Model\n", - "\n", - "- **If You Already have the models in the dlc folder no need to run this cell**\n", - "- [ https://github.com/dut-media-lab/RUAS.git ](Link of the Actual Model)\n", - "- Retinex-inspired Unrolling with Cooperative Prior Architecture Search for Low-light Image Enhancement(RUAS)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2f76b0c7-9a08-4f3e-b3ef-be203127932e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/dut-media-lab/RUAS\n", - "#git reset --hard c859661f89b21bce592634b36ecec33fa6bb4b19" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9591c582-5c47-4e45-aa26-49af0b0bfc31", - "metadata": {}, - "outputs": [], - "source": [ - "command1=\"cp RUAS.patch RUAS/;cd RUAS;patch -i RUAS.patch\"\n", - "os.system(command1)" - ] - }, - { - "cell_type": "markdown", - "id": "a6551d24-2f0b-48d9-8908-57ba79d1045e", - "metadata": {}, - "source": [ - "#### Generating the ONNX Model\n", - "- **Before Running this cell change in generate_model.py the torch.load(RUAS/ckpt/lol.pt) to torch.load(ckpt/lol.pt)**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb5ef152-633e-4f08-9020-0ba98cf9d086", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "cp generate_model.py RUAS/\n", - "cd RUAS\n", - "cp model/denoise.pt ckpt/\n", - "python generate_model.py\n", - "#cp -r ruas.onnx ../" - ] - }, - { - "cell_type": "markdown", - "id": "9b0f13a9-df27-4cd9-b4f8-6b1a30026cf2", - "metadata": {}, - "source": [ - "#### Getting DLC(32b) Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c1b9d54e-6d93-4dc5-b0d4-d28dc9376c18", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i ruas.onnx -o dlc/ruas_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "b434d983-d583-4736-9866-252060f19b8a", - "metadata": {}, - "source": [ - "##### Understanding the Model architecture" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1cf3168c-fc51-4882-b1a2-09635b9f428a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash \n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-viewer -i dlc/ruas_fp32.dlc -s dlc/model_architecture/ruas.html\n", - "snpe-dlc-info -i dlc/ruas_fp32.dlc>ruas.txt" - ] - }, - { - "cell_type": "markdown", - "id": "44c2c218-603d-4e08-9fb4-2abd77014e62", - "metadata": {}, - "source": [ - "## Getting Dataset\n", - "\n", - "Use the dataset of your choice to validate the pre-processing, and post processing steps given in this notebook" - ] - }, - { - "cell_type": "markdown", - "id": "a701c7b0-59d8-4d0f-8e4e-861fb3b1e5a3", - "metadata": {}, - "source": [ - "## Preprocessing the data to generate raw file" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22504a00-36ff-4723-b652-d9fb7eb99b9e", - "metadata": {}, - "outputs": [], - "source": [ - "def preprocess(ll_img, hl_img):\n", - " ll_img = np.array(ll_img, dtype='float32')\n", - " hl_img = np.array(hl_img, dtype='float32')\n", - " \n", - " ll_img = np.uint8(np.clip(ll_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hl_img = np.uint8(np.clip(hl_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " \n", - " ll_img = np.uint8(ll_img)\n", - " hl_img = np.uint8(hl_img)\n", - " \n", - " ll_img = convert_image(ll_img, source='array', target='[0, 1]')\n", - " hl_img = convert_image(hl_img, source='array', target='[0, 1]')\n", - " \n", - " return ll_img, hl_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b05d9163-74f8-4a50-869d-b7ef091ccab1", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255) \n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " \n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25ccead5-1857-40d8-8b02-61d3c3fc251c", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3188d144-c3bf-42b1-bb41-3b316e54e527", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir):\n", - " # Input images for the model\n", - " INPUTS_LL = []\n", - " # Post-processed images for visualization\n", - " IMAGES_LL = [] # LL:Low Light\n", - " IMAGES_HL = [] # HL:High Light\n", - "\n", - " # Load the test images\n", - " count=0\n", - " for img_path in glob.glob(os.path.join(test_images_dir, '*')):\n", - " l_img = cv2.resize(cv2.imread(img_path),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " l_img = cv2.cvtColor(l_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " h_img = cv2.resize(cv2.imread(img_path.replace(\"low\",\"high\")),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " h_img = cv2.cvtColor(h_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " ll_img, hl_img = preprocess(l_img, h_img)#chw\n", - "\n", - " INPUTS_LL.append(ll_img)#chw\n", - " IMAGES_LL.append(post_process(ll_img))#hwc\n", - " IMAGES_HL.append(post_process(hl_img))#hwc\n", - " return INPUTS_LL, IMAGES_LL, IMAGES_HL" - ] - }, - { - "cell_type": "markdown", - "id": "d82f3a6d-8ad9-4b82-80b9-8fa50af20bee", - "metadata": {}, - "source": [ - "**Converting the low dataset to raw file format to give it to the model and do the inference**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a69187d5-1dcb-4f0e-949b-9422e32d6de3", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('raw', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "98bd0cb8-c2b7-496c-9e77-2896afbcb2dc", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"\"\n", - "INPUTS_LL, IMAGES_LL, IMAGES_HL = load_dataset(test_images_dir)\n", - "print(len(INPUTS_LL),len(IMAGES_LL),len(IMAGES_HL))\n", - "for i, img_ll in enumerate(INPUTS_LL):\n", - " img_ll = img_ll.cpu().detach().numpy()\n", - " img_ll = img_ll.astype(np.float32)\n", - " fid = open(\"raw/img_\"+str(i)+ \".raw\", 'wb')\n", - " img_ll.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "markdown", - "id": "d32eb89c-ef0f-4f4c-8ac8-d31ce20b7ab3", - "metadata": {}, - "source": [ - "#### Getting the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22d07fac-85df-4bdf-8487-ee1a6da68d70", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 15\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"raw/img_{}.raw\\n\".format(i))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c6b79223-ddf2-43a3-bb4c-00aeeee4daee", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc dlc/ruas_fp32.dlc --input_list list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc dlc/ruas_w8a8.dlc --enable_htp --htp_socs sm8650" - ] - }, - { - "cell_type": "markdown", - "id": "fd8e1bcf-09fd-425c-88b9-fede5bd6f805", - "metadata": {}, - "source": [ - "## Inferencing the Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "654c9630-a3a1-4e21-96b2-1294e00ee571", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f3225438-c35d-4770-abd1-de0d9fae29dc", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ea602da2-b0e7-487e-a310-12740578b856", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "48ffab7c-bddc-4353-b142-fe3c52096d32", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push raw /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "d09076f4-cea4-4b66-afbf-eb169471da67", - "metadata": {}, - "source": [ - "#### Inferencing the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a42180a6-c989-45c4-a900-63169c94a880", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=ruas_w8a8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"RUAS\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list list.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "9ce677d5-d372-4010-a4b7-44c06683a944", - "metadata": {}, - "source": [ - "#### Inferencing the 32b Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "60b8bc5f-4d56-4b3c-ae01-e6687a2b00dd", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=ruas_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"RUAS\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list list.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "07d6fa3e-f5a1-4acd-889e-66683232ca5f", - "metadata": {}, - "source": [ - "#### Pulling the Output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "74fdce41-ba6a-45fa-b65e-1e15ceb09355", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "cfcee5be-0666-47c5-b6f2-5c91e1711adb", - "metadata": {}, - "source": [ - "### Pos Processing and Calculating PSNR Value" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "479ca78e-b748-4593-8a7a-ed036a965556", - "metadata": {}, - "outputs": [], - "source": [ - "import math\n", - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " err = (img_pred - img_true) ** 2 \n", - " err = np.mean(err)\n", - " return 10. * math.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "161294fd-4a07-45f0-ba63-726891fe09b7", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(el_images, hl_images): #(enhanced_light, high_light )\n", - " psnr = []\n", - " for i in range(len(el_images)):\n", - " el_img = cv2.imread(el_images[i], 1)\n", - " hl_img = cv2.imread(hl_images[i], 1)\n", - " psnr.append(compute_psnr(el_img,hl_img))\n", - " average_psnr = np.mean(np.array(psnr))\n", - " return average_psnr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bcc854ae-7f38-49d4-8467-e39aef824aff", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_enhanced(img):\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,640,480)).astype(np.float32)\n", - " img = np.clip(img * 255. , 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "43b17d3a-f5e9-4ae9-b2f3-86eba8c77fed", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"OUTPUT_32b_CPU\", \"OUTPUT_8b_DSP\"]\n", - "from PIL import Image\n", - "dict_folder_wise={}\n", - "for j in range(0,len(folder)):\n", - " IMAGES_EL = []# EL: Enhanced Light (model output)\n", - " File_LL=[]\n", - " File_EL=[]\n", - " File_HL=[]\n", - " dict_folder_wise[str(folder[j])]=[]\n", - " for i in range(0,15):\n", - " # for prediction img\n", - " IMAGES_EL.append(post_process_enhanced(folder[j]+\"/Result_\"+str(i)+\"/414.raw\")) \n", - " im = Image.fromarray(IMAGES_EL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " File_EL.append(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " \n", - " #for ground truth HL\n", - " im = Image.fromarray(IMAGES_HL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " File_HL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " \n", - " #for ground truth LL\n", - " im = Image.fromarray(IMAGES_LL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " File_LL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " dict_folder_wise[str(folder[j])].append([File_LL[i],File_HL[i],File_EL[i]])\n", - " print(folder[j],\" :: \",evaluate_average_psnr(File_EL,File_HL))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "markdown", - "id": "c534a829-a730-409b-b01b-976fac77c68e", - "metadata": {}, - "source": [ - "### Comaparison of Output between Non Quantized Model and Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "128e2e7c-f190-4934-a829-353bf60910b1", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "from matplotlib.pyplot import figure, imshow, axis\n", - "from matplotlib.image import imread\n", - "import random as rand\n", - "\n", - "def showImagesHorizontally(majors,index):\n", - " fig, axs = plt.subplots(1, 4, figsize=(12,4))\n", - " for nn, ax in enumerate(axs.flat):\n", - " if nn<=2:\n", - " column = majors[nn]\n", - " column_rec_name = column.replace('\\n', '_').replace(' ', '_')\n", - " image = imread(majors[nn])\n", - " ax.set_xlabel(majors[nn].split(\"/\")[-1])\n", - " else:\n", - " image=imread(dict_folder_wise['OUTPUT_8b_DSP'][index][-1])\n", - " ax.set_xlabel(\"Prediction 8b\")\n", - " ax.imshow(image)\n", - " \n", - " fig.suptitle(\"Comparison Between Ground Truth and Predictions\", fontsize=16)\n", - " plt.show()\n", - "\n", - "for index in range(0,15):\n", - " showImagesHorizontally(dict_folder_wise['OUTPUT_32b_CPU'][index],index)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/02-low-light-enhancement/RUAS/RUAS.patch b/models-for-solutions/02-low-light-enhancement/RUAS/RUAS.patch deleted file mode 100644 index 5b40f427..00000000 --- a/models-for-solutions/02-low-light-enhancement/RUAS/RUAS.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/model.py b/model.py -index 4f10d37..1a778be 100644 ---- a/model.py -+++ b/model.py -@@ -184,7 +184,7 @@ class Network(nn.Module): - self._init_weights() - - def _init_weights(self): -- model_dict = torch.load('./model/denoise.pt') -+ model_dict = torch.load('./ckpt/denoise.pt',map_location=torch.device('cpu')) - self.denoise_net.load_state_dict(model_dict) - - def forward(self, input): diff --git a/models-for-solutions/02-low-light-enhancement/RUAS/generate_model.py b/models-for-solutions/02-low-light-enhancement/RUAS/generate_model.py deleted file mode 100644 index 55b154cf..00000000 --- a/models-for-solutions/02-low-light-enhancement/RUAS/generate_model.py +++ /dev/null @@ -1,26 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= - -import torch -import urllib, os -from model import Network - - -#You can Download from this below link, weight folder(ckpt) -# https://github.com/dut-media-lab/RUAS/tree/main/ckpt - -model = Network() -model_dict = torch.load('ckpt/lol.pt',map_location=torch.device('cpu')) -model.load_state_dict(model_dict) -dummy_input=torch.randn(1, 3,480,640) -torch.onnx.export(model, dummy_input, "../ruas.onnx", opset_version=11, verbose=False) - -print("ONNX model saved successfully") - diff --git a/models-for-solutions/02-low-light-enhancement/SCI/README.md b/models-for-solutions/02-low-light-enhancement/SCI/README.md deleted file mode 100644 index df2c1678..00000000 --- a/models-for-solutions/02-low-light-enhancement/SCI/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Low Light Image Enhancement using SCI - -| Field | Description | -| --- | --- | -| Model Name | SCI | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/vis-opt-group/SCI.git | -| Paper | NA | -| Accuracy Metric | PSNR | -| Pre-Process | cv2.resize, cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.clip | -| post-Process| np.reshape, np.clip, transpose | - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - - -## Changes to open source Repository - -Changes made to open-source repository to generate pre-trained models is given as a patch file - SCI.patch - -## Pre-Trained Model - -Please refer to python file 'generate_model.py' for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* \ No newline at end of file diff --git a/models-for-solutions/02-low-light-enhancement/SCI/SCI.patch b/models-for-solutions/02-low-light-enhancement/SCI/SCI.patch deleted file mode 100644 index 0a19e195..00000000 --- a/models-for-solutions/02-low-light-enhancement/SCI/SCI.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/model.py b/model.py -index 6975387..616b561 100644 ---- a/model.py -+++ b/model.py -@@ -137,7 +137,7 @@ class Finetunemodel(nn.Module): - self.enhance = EnhanceNetwork(layers=1, channels=3) - self._criterion = LossFunction() - -- base_weights = torch.load(weights) -+ base_weights = torch.load(weights,map_location=torch.device('cpu')) - pretrained_dict = base_weights - model_dict = self.state_dict() - pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} diff --git a/models-for-solutions/02-low-light-enhancement/SCI/SCI_Accuracy_Analyzer.ipynb b/models-for-solutions/02-low-light-enhancement/SCI/SCI_Accuracy_Analyzer.ipynb deleted file mode 100644 index a1c52961..00000000 --- a/models-for-solutions/02-low-light-enhancement/SCI/SCI_Accuracy_Analyzer.ipynb +++ /dev/null @@ -1,614 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "6af20943-741d-437f-88ae-41f725497631", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "30805cc7-f2a6-437a-830b-6d1b5c39b684", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"#rawfiles\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"SCI\"\n", - "os.environ['DLC32']=\"dlc/sci_difficult_fp32.dlc\"\n", - "os.environ['DLC8']=\"dlc/sci_difficult_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"SCI\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "79e153c7-b769-417b-819d-9085c0ea06e8", - "metadata": {}, - "outputs": [], - "source": [ - "from PIL import Image\n", - "import glob\n", - "import os\n", - "import cv2\n", - "import numpy as np\n", - "import torch\n", - "import os\n", - "import shutil" - ] - }, - { - "cell_type": "markdown", - "id": "06468cb6-c5e3-4f8f-bda9-7cd205bd4fa1", - "metadata": {}, - "source": [ - "# Getting The Model\n", - "\n", - "- **If You Already have the models in the dlc folder no need to run this cell**\n", - "- [ https://github.com/vis-opt-group/SCI.git ](Link of the Actual Model)\n", - "- Self-Calibrated Illumination (SCI) Learning " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ba3b91de-ae08-496a-8a16-51c0bba903d7", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/vis-opt-group/SCI\n", - "#git reset --hard 808e70644191a63c936bd4ce73ce3f10fbc02ec8\n", - "cp SCI.patch SCI" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f1c28a4c-76e3-4bf4-9c03-a73a8c14c13c", - "metadata": {}, - "outputs": [], - "source": [ - "command1=\"cd SCI;patch -i SCI.patch\"\n", - "os.system(command1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9da91f0f-0a4c-47b1-abff-167a668ef92b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp generate_model.py SCI/\n", - "cd SCI\n", - "python generate_model.py" - ] - }, - { - "cell_type": "markdown", - "id": "a9437302-c6c3-4c2b-baba-dc78fe9ddb3c", - "metadata": {}, - "source": [ - "#### Getting the Non Quantized DLC File" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aabc22e6-3dc4-4b02-b764-c5c80b90b12a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i sci_difficult.onnx -o dlc/sci_difficult_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "5cde243b-4012-4102-9503-57e4e29072a7", - "metadata": {}, - "source": [ - "#### Understanding the Architecture of the Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fdfcf8a9-5284-42c9-9ed4-fb962a6c4e81", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash \n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-viewer -i dlc/sci_difficult_fp32.dlc -s sci_difficult.html\n", - "snpe-dlc-info -i dlc/sci_difficult_fp32.dlc>sci_difficult.txt" - ] - }, - { - "cell_type": "markdown", - "id": "3c508fa0-bd27-4df5-9ed0-5fe50a265160", - "metadata": {}, - "source": [ - "## Getting Dataset\n", - "\n", - "Use the dataset of your choice to validate the pre-processing, and post processing steps given in this notebook\n", - " " - ] - }, - { - "cell_type": "markdown", - "id": "cabb222d-55be-4f24-bc5e-100f017892ce", - "metadata": {}, - "source": [ - "## Preprocessing the data to generate raw file" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4feded9d-ebab-4485-bdf7-569693a4222c", - "metadata": {}, - "outputs": [], - "source": [ - "def preprocess(ll_img, hl_img):\n", - " ll_img = np.array(ll_img, dtype='float32')\n", - " hl_img = np.array(hl_img, dtype='float32')\n", - " \n", - " ll_img = np.uint8(np.clip(ll_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hl_img = np.uint8(np.clip(hl_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " \n", - " ll_img = np.uint8(ll_img)\n", - " hl_img = np.uint8(hl_img)\n", - " \n", - " ll_img = convert_image(ll_img, source='array', target='[0, 1]')\n", - " hl_img = convert_image(hl_img, source='array', target='[0, 1]')\n", - " \n", - " return ll_img, hl_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bc14010d-969c-413d-8b9f-11729ffc631f", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255) \n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " \n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "78b6aed0-99ae-4cfe-a27e-e1e4bdf0f90f", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3068ec30-fcb6-4852-9fc3-9a57fa6b6986", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir):\n", - " # Input images for the model\n", - " INPUTS_LL = []\n", - " # Post-processed images for visualization\n", - " IMAGES_LL = [] # LL:Low Light\n", - " IMAGES_HL = [] # HL:High Light\n", - "\n", - " # Load the test images\n", - " count=0\n", - " for img_path in glob.glob(os.path.join(test_images_dir, '*')):\n", - " l_img = cv2.resize(cv2.imread(img_path),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " l_img = cv2.cvtColor(l_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " h_img = cv2.resize(cv2.imread(img_path.replace(\"low\",\"high\")),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " h_img = cv2.cvtColor(h_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " ll_img, hl_img = preprocess(l_img, h_img)#chw\n", - "\n", - " INPUTS_LL.append(ll_img)#chw\n", - " IMAGES_LL.append(post_process(ll_img))#hwc\n", - " IMAGES_HL.append(post_process(hl_img))#hwc\n", - " return INPUTS_LL, IMAGES_LL, IMAGES_HL" - ] - }, - { - "cell_type": "markdown", - "id": "cf2c94c3-9671-48a0-bbc7-1754eed9af8c", - "metadata": {}, - "source": [ - "**Converting the low dataset to raw file format to give it to the model and do the inference**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bd9cf096-9826-46ef-90c3-cbd2c7c2dade", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('raw', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9dd7ea61-64f9-4803-a895-a2d2d6d16530", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"eval15/low\"\n", - "INPUTS_LL, IMAGES_LL, IMAGES_HL = load_dataset(test_images_dir)\n", - "print(len(INPUTS_LL),len(IMAGES_LL),len(IMAGES_HL))\n", - "for i, img_ll in enumerate(INPUTS_LL):\n", - " img_ll = img_ll.cpu().detach().numpy()\n", - " img_ll = img_ll.astype(np.float32)\n", - " fid = open(\"raw/img_\"+str(i)+ \".raw\", 'wb')\n", - " img_ll.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "75a21462-f0b4-4fff-ac8d-88b7ce539834", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 15\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"raw/img_{}.raw\\n\".format(i))" - ] - }, - { - "cell_type": "markdown", - "id": "04ab39bb-d77c-4cee-a471-e9aa3f431ff8", - "metadata": {}, - "source": [ - "#### Getting the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f3c49c44-9681-4994-9fe4-d1adae64db02", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc dlc/sci_difficult_fp32.dlc --input_list list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc dlc/sci_difficult_w8a8.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "6b1ec798-0245-45f9-8b84-7e0816b03430", - "metadata": {}, - "source": [ - "## Inferencing the Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0cec0f4a-cb55-49d1-8dfc-79b8df2a7d3a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "da764ceb-b825-4d47-badc-ee1b3418c7c0", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1a4d2767-0191-4c55-a503-5b33cfa1fcb8", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "55da49eb-35ce-4b10-9e62-dd99c740204a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push raw /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "a0105ea8-354e-4bb0-a159-d387d0836ecd", - "metadata": {}, - "source": [ - "#### Inferencing the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c529c3c3-bdbc-4904-b4df-fbd0c952e0cd", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=sci_difficult_w8a8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"SCI\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list list.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "1cd5e3be-da14-47fc-b646-b95a823f4bef", - "metadata": {}, - "source": [ - "#### Inferencing the 32b Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "712c3232-ddb0-42ef-9698-90127101f4a0", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=sci_difficult_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"SCI\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list list.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "69943bca-744b-47aa-a6b1-fdf00732c9aa", - "metadata": {}, - "source": [ - "#### Pulling the Output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f0dc837-1af9-4b72-af4d-67d34e098dfe", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf OUTPUT_8b_DSP\n", - "rm -rf OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "429f4154-627d-4705-8288-501f06497e1e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "889540a8-0ffe-490a-882b-6f7a5dc02f02", - "metadata": {}, - "source": [ - "### Post Processing and Calculating PSNR Value" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "643886d4-587c-4e7f-a366-b5545cec7839", - "metadata": {}, - "outputs": [], - "source": [ - "import math\n", - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " err = (img_pred - img_true) ** 2 \n", - " err = np.mean(err)\n", - " return 10. * math.log10((data_range ** 2) / (err + eps))\n", - "def evaluate_average_psnr(el_images, hl_images): #(enhanced_light, high_light )\n", - " psnr = []\n", - " for i in range(len(el_images)):\n", - " el_img = cv2.imread(el_images[i], 1)\n", - " hl_img = cv2.imread(hl_images[i], 1)\n", - " psnr.append(compute_psnr(el_img,hl_img))\n", - " average_psnr = np.mean(np.array(psnr))\n", - " return average_psnr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "61efbd61-7b86-4539-ba03-dbfc3e6da4dc", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_enhanced(img):\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,640,480)).astype(np.float32)\n", - " img = np.clip(img * 255. , 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1da8b716-17d2-4467-970c-5bbddd4b126a", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"OUTPUT_32b_CPU\", \"OUTPUT_8b_DSP\"]\n", - "from PIL import Image\n", - "dict_folder_wise={}\n", - "for j in range(0,len(folder)):\n", - " IMAGES_EL = []# EL: Enhanced Light (model output)\n", - " File_LL=[]\n", - " File_EL=[]\n", - " File_HL=[]\n", - " dict_folder_wise[str(folder[j])]=[]\n", - " for i in range(0,15):\n", - " # for prediction img\n", - " IMAGES_EL.append(post_process_enhanced(folder[j]+\"/Result_\"+str(i)+\"/31.raw\")) \n", - " im = Image.fromarray(IMAGES_EL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " File_EL.append(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " \n", - " #for ground truth HL\n", - " im = Image.fromarray(IMAGES_HL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " File_HL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " \n", - " #for ground truth LL\n", - " im = Image.fromarray(IMAGES_LL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " File_LL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " dict_folder_wise[str(folder[j])].append([File_LL[i],File_HL[i],File_EL[i]])\n", - " print(folder[j],\" :: \",evaluate_average_psnr(File_EL,File_HL))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "markdown", - "id": "a25f88fd-0efa-4c62-8549-332e9dfb66a4", - "metadata": {}, - "source": [ - "### Comaparison of Output between Non Quantized Model and Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d84da7d5-d3ae-4f36-a792-19bb65df66f7", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "from matplotlib.pyplot import figure, imshow, axis\n", - "from matplotlib.image import imread\n", - "import random as rand\n", - "\n", - "def showImagesHorizontally(majors,index):\n", - " fig, axs = plt.subplots(1, 4, figsize=(12,4))\n", - " for nn, ax in enumerate(axs.flat):\n", - " if nn<=2:\n", - " column = majors[nn]\n", - " column_rec_name = column.replace('\\n', '_').replace(' ', '_')\n", - " image = imread(majors[nn])\n", - " ax.set_xlabel(majors[nn].split(\"/\")[-1])\n", - " else:\n", - " image=imread(dict_folder_wise['OUTPUT_8b_DSP'][index][-1])\n", - " ax.set_xlabel(\"Prediction 8b\")\n", - " ax.imshow(image)\n", - " \n", - " fig.suptitle(\"Comparison Between Ground Truth and Prediction\", fontsize=16)\n", - " plt.show()\n", - "\n", - "for index in range(0,15):\n", - " showImagesHorizontally(dict_folder_wise['OUTPUT_32b_CPU'][index],index)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/02-low-light-enhancement/SCI/generate_model.py b/models-for-solutions/02-low-light-enhancement/SCI/generate_model.py deleted file mode 100644 index da1e65e0..00000000 --- a/models-for-solutions/02-low-light-enhancement/SCI/generate_model.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= - -import torch -from model import Finetunemodel - - -#You can download the weights from the below link -#https://github.com/vis-opt-group/SCI/tree/main/weights -model = Finetunemodel('weights/difficult.pt') -dummy_input=torch.randn(1, 3, 480,640) - -torch.onnx.export(model, dummy_input, "../sci_difficult.onnx", opset_version=11, verbose=False) - -print("ONNX Model Saved Successfully") \ No newline at end of file diff --git a/models-for-solutions/02-low-light-enhancement/StableLLVE/README.md b/models-for-solutions/02-low-light-enhancement/StableLLVE/README.md deleted file mode 100644 index 229c2992..00000000 --- a/models-for-solutions/02-low-light-enhancement/StableLLVE/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Low Light Image Enhancement using StableLLVE - -| Field | Description | -| --- | --- | -| Model Name | StableLLVE | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/zkawfanx/StableLLVE.git | -| Paper | NA | -| Accuracy Metric | PSNR | -| Pre-Process | cv2.resize, cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.clip | -| post-Process| np.reshape, np.clip, transpose | - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - - -## Changes to open source Repository - -None - -## Pre-Trained Model - -Please refer to python file 'generate_model.py' for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* \ No newline at end of file diff --git a/models-for-solutions/02-low-light-enhancement/StableLLVE/StableLLVE.ipynb b/models-for-solutions/02-low-light-enhancement/StableLLVE/StableLLVE.ipynb deleted file mode 100644 index 5bbdc9a2..00000000 --- a/models-for-solutions/02-low-light-enhancement/StableLLVE/StableLLVE.ipynb +++ /dev/null @@ -1,588 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "812369c8-d674-413d-8a2d-18979068925f", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6edfd9b8-50a1-4170-b53d-9f5b364dad5a", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"#rawfiles\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"StableLLVE\"\n", - "os.environ['DLC32']=\"dlc/StableLLVE_fp32.dlc\"\n", - "os.environ['DLC8']=\"dlc/StableLLVE_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"StableLLVE\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "51c72fa0-3922-450f-831a-d68db70dc8f0", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/zkawfanx/StableLLVE.git" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7ae819f8-1fa0-40f4-b1ab-c154c13ecee2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp generate_model.py StableLLVE/\n", - "cd StableLLVE\n", - "python generate_model.py" - ] - }, - { - "cell_type": "markdown", - "id": "76560005-18f9-4750-898a-8fd2d4380e48", - "metadata": {}, - "source": [ - "#### Getting the Non Quantized DLC File" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e63fd5ba-38e3-42f3-ab0a-392a75956c3e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i StableLLVE.onnx -o dlc/StableLLVE_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "62ed60a5-c3c2-4db1-a205-6032b3fd5d1e", - "metadata": {}, - "source": [ - "#### Understanding the Architecture of the Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a7e336db-7f3d-40e9-8287-749296815174", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash \n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-viewer -i dlc/StableLLVE_fp32.dlc -s StableLLVE.html\n", - "snpe-dlc-info -i dlc/StableLLVE_fp32.dlc>StableLLVE.txt" - ] - }, - { - "cell_type": "markdown", - "id": "0a668a89-dcf2-40b2-a04e-0a1dac9505f3", - "metadata": {}, - "source": [ - "Use the dataset of your choice to validate the pre-processing, and post processing steps given in this notebook" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "46cd1837-3628-4a43-aef4-0216dbe6c250", - "metadata": {}, - "outputs": [], - "source": [ - "def preprocess(ll_img, hl_img):\n", - " ll_img = np.array(ll_img, dtype='float32')\n", - " hl_img = np.array(hl_img, dtype='float32') \n", - " ll_img = np.uint8(np.clip(ll_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hl_img = np.uint8(np.clip(hl_img, 0., 255.)) # this is to simulate matlab's imwrite operation \n", - " ll_img = np.uint8(ll_img)\n", - " hl_img = np.uint8(hl_img)\n", - " ll_img = convert_image(ll_img, source='array', target='[0, 1]')\n", - " hl_img = convert_image(hl_img, source='array', target='[0, 1]')\n", - " return ll_img, hl_img" - ] - }, - { - "cell_type": "markdown", - "id": "43b9c217-3515-48c3-a627-e1abe008a8b0", - "metadata": {}, - "source": [ - "## Preprocessing the data to generate raw file" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5b7394b5-be2e-49a6-a584-cf22a4e230c7", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255) \n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " \n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5464e147-2fe8-4599-b414-449daabafc5b", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5d46939e-91f4-41e8-9893-428cca828e55", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir):\n", - " # Input images for the model\n", - " INPUTS_LL = []\n", - " # Post-processed images for visualization\n", - " IMAGES_LL = [] # LL:Low Light\n", - " IMAGES_HL = [] # HL:High Light\n", - "\n", - " # Load the test images\n", - " count=0\n", - " for img_path in glob.glob(os.path.join(test_images_dir, '*')):\n", - " l_img = cv2.resize(cv2.imread(img_path),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " l_img = cv2.cvtColor(l_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " h_img = cv2.resize(cv2.imread(img_path.replace(\"low\",\"high\")),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " h_img = cv2.cvtColor(h_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " ll_img, hl_img = preprocess(l_img, h_img)#chw\n", - "\n", - " INPUTS_LL.append(ll_img)#chw\n", - " IMAGES_LL.append(post_process(ll_img))#hwc\n", - " IMAGES_HL.append(post_process(hl_img))#hwc\n", - " return INPUTS_LL, IMAGES_LL, IMAGES_HL" - ] - }, - { - "cell_type": "markdown", - "id": "383721cd-2990-4bae-ac71-25ee362be4f6", - "metadata": {}, - "source": [ - "**Converting the low dataset to raw file format to give it to the model and do the inference**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "45863450-b505-439e-86ec-0b9051fafc9c", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('raw', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a143f8f0-4293-4421-a161-5d7baeed945a", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import os\n", - "import cv2\n", - "import numpy as np\n", - "import torch\n", - "\n", - "test_images_dir = \"eval15/low\"\n", - "INPUTS_LL, IMAGES_LL, IMAGES_HL = load_dataset(test_images_dir)\n", - "print(len(INPUTS_LL),len(IMAGES_LL),len(IMAGES_HL))\n", - "for i, img_ll in enumerate(INPUTS_LL):\n", - " img_ll = img_ll.cpu().detach().numpy()\n", - " img_ll = img_ll.astype(np.float32)\n", - " fid = open(\"raw/img_\"+str(i)+ \".raw\", 'wb')\n", - " img_ll.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bb1240ff-32b4-43d1-9d66-51daa84912d0", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_enhanced(img):\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,640,480)).astype(np.float32)\n", - " img = np.clip(img* 255. , 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "markdown", - "id": "cf7caf62-b968-49c1-bd37-1f47e8e294f1", - "metadata": {}, - "source": [ - "#### Getting the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7b82cb89-9a4f-4765-8768-95f9cad70b3c", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 15\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"raw/img_{}.raw\\n\".format(i))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1b935f71-2506-4b8b-8b34-a3bdb1d64064", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc dlc/StableLLVE_fp32.dlc --input_list list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc dlc/StableLLVE_w8a8.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "70efadeb-6a42-459d-a65d-80b8585209da", - "metadata": {}, - "source": [ - "## Model Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6285c5cb-d3ba-40d5-ad37-4e9ed9825248", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "76335d80-0f60-46b4-9825-92f39ab38d14", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "041088db-cbef-4fe7-9797-c4928a5e2ff9", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5bc11881-6c24-4b04-80fe-f7ce4e2d8508", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push raw /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "83246ab3-eec9-4484-8e75-0438d699fd24", - "metadata": {}, - "source": [ - "#### Inferencing the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "66cf75f2-91fd-4193-b62c-ac4e65b55e0f", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=StableLLVE_w8a8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"StableLLVE\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list list.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "25c7d5cc-10be-4c65-a7dc-bb7063e94c62", - "metadata": {}, - "source": [ - "#### Inferencing the Non-Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "00018b15-f86e-45d6-ab4e-29323dcf50f8", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=StableLLVE_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"StableLLVE\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list list.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "daf30cd9-1f84-4cc0-8ea0-fd5615bdd4f7", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf OUTPUT_8b_DSP\n", - "rm -rf OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "7136946e-da34-4a68-b361-f8707bd9e0e4", - "metadata": {}, - "source": [ - "#### Pulling the Output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e3f2ad57-17fd-4da4-8184-abfe052359fb", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "b2aab437-c5a8-456d-ac30-403f74380eb6", - "metadata": {}, - "source": [ - "### Post Processing and Calculating PSNR Value" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "279b123a-9c33-4833-8de0-34943927f1a2", - "metadata": {}, - "outputs": [], - "source": [ - "import math\n", - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " err = (img_pred - img_true) ** 2 \n", - " err = np.mean(err)\n", - " return 10. * math.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0a180675-6ffc-4c6c-a73a-2014b4c72c3c", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(el_images, hl_images): #(enhanced_light, high_light )\n", - " psnr = []\n", - " for i in range(len(el_images)):\n", - " el_img = cv2.imread(el_images[i], 1)\n", - " hl_img = cv2.imread(hl_images[i], 1)\n", - " psnr.append(compute_psnr(el_img,hl_img))\n", - " average_psnr = np.mean(np.array(psnr))\n", - " return average_psnr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fa0ccd13-8824-4237-94dd-c406811e25be", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "folder = [\"OUTPUT_32b_CPU\", \"OUTPUT_8b_DSP\"]\n", - "from PIL import Image\n", - "dict_folder_wise={}\n", - "for j in range(0,len(folder)):\n", - " IMAGES_EL = []# EL: Enhanced Light (model output)\n", - " File_LL=[]\n", - " File_EL=[]\n", - " File_HL=[]\n", - " dict_folder_wise[str(folder[j])]=[]\n", - " for i in range(0,15):\n", - " # for prediction img\n", - " IMAGES_EL.append(post_process_enhanced(folder[j]+\"/Result_\"+str(i)+\"/363.raw\")) \n", - " im = Image.fromarray(IMAGES_EL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " File_EL.append(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " \n", - " #for ground truth HL\n", - " im = Image.fromarray(IMAGES_HL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " File_HL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " \n", - " #for ground truth LL\n", - " im = Image.fromarray(IMAGES_LL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " File_LL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " dict_folder_wise[str(folder[j])].append([File_LL[i],File_HL[i],File_EL[i]])\n", - " print(folder[j],\" :: \",evaluate_average_psnr(File_EL,File_HL))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "markdown", - "id": "0be9fd7f-0436-4e8c-abd9-100be31845b4", - "metadata": {}, - "source": [ - "### Comaparison of Output between Non Quantized Model and Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "302a0fd6-f69f-4e2e-a3ee-14895246c404", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "from matplotlib.pyplot import figure, imshow, axis\n", - "from matplotlib.image import imread\n", - "import random as rand\n", - "\n", - "def showImagesHorizontally(majors,index):\n", - " fig, axs = plt.subplots(1, 4, figsize=(12,4))\n", - " for nn, ax in enumerate(axs.flat):\n", - " if nn<=2:\n", - " column = majors[nn]\n", - " column_rec_name = column.replace('\\n', '_').replace(' ', '_')\n", - " image = imread(majors[nn])\n", - " ax.set_xlabel(majors[nn].split(\"/\")[-1])\n", - " else:\n", - " image=imread(dict_folder_wise['OUTPUT_8b_DSP'][index][-1])\n", - " ax.set_xlabel(\"Prediction 8b\")\n", - " ax.imshow(image)\n", - " \n", - " fig.suptitle(\"Comparison Between Ground Truth and Prediction\", fontsize=16)\n", - " plt.show()\n", - "\n", - "for index in range(0,15):\n", - " showImagesHorizontally(dict_folder_wise['OUTPUT_32b_CPU'][index],index)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b69ac507-97f6-423e-a242-8cd936f19801", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/02-low-light-enhancement/StableLLVE/generate_model.py b/models-for-solutions/02-low-light-enhancement/StableLLVE/generate_model.py deleted file mode 100644 index 8e829fb9..00000000 --- a/models-for-solutions/02-low-light-enhancement/StableLLVE/generate_model.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= - -import torch -from model import UNet - - -#You can download the pretrained weight from here -#https://github.com/zkawfanx/StableLLVE/blob/main/checkpoint.pth - -model = UNet(n_channels=3, bilinear=True) -model.load_state_dict(torch.load('./checkpoint.pth',map_location=torch.device('cpu'))) - -dummy_input=torch.randn(1, 3,480,640) - -torch.onnx.export(model, dummy_input, "../StableLLVE.onnx", opset_version=11, verbose=False) -print("ONNX model saved Successfully") diff --git a/models-for-solutions/02-low-light-enhancement/Zero-DCE++/README.md b/models-for-solutions/02-low-light-enhancement/Zero-DCE++/README.md deleted file mode 100644 index b7364ee6..00000000 --- a/models-for-solutions/02-low-light-enhancement/Zero-DCE++/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Low Light Image Enhancement using Zero-DCE++ - -| Field | Description | -| --- | --- | -| Model Name | StableLLVE | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/Li-Chongyi/Zero-DCE_extension.git | -| Paper | NA | -| Accuracy Metric | PSNR | -| Pre-Process | cv2.resize, cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.clip | -| post-Process| np.reshape, np.clip, transpose | - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - - -## Changes to open source Repository - -None - -## Pre-Trained Model - -Please refer to python file 'generate_model.py' for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* \ No newline at end of file diff --git a/models-for-solutions/02-low-light-enhancement/Zero-DCE++/Zero-DCE++.ipynb b/models-for-solutions/02-low-light-enhancement/Zero-DCE++/Zero-DCE++.ipynb deleted file mode 100644 index 234d9893..00000000 --- a/models-for-solutions/02-low-light-enhancement/Zero-DCE++/Zero-DCE++.ipynb +++ /dev/null @@ -1,620 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "4b2a00a7-9ca4-4596-a65c-206bef03d584", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c9e4c774-316f-4a8b-91c2-589e8ac0af61", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"#rawfiles\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"ZERO_DCE++\"\n", - "os.environ['DLC32']=\"zero_dce++_fp32.dlc\"\n", - "os.environ['DLC8']=\"zero_dce++_Q_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"ZERO_DCE++\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a643b251-c4e1-4afb-84d4-8f5e7b945e5c", - "metadata": {}, - "outputs": [], - "source": [ - "from PIL import Image\n", - "import glob\n", - "import os\n", - "import cv2\n", - "import numpy as np\n", - "import torch\n", - "import os\n", - "import shutil" - ] - }, - { - "cell_type": "markdown", - "id": "e460c722-cbc1-45b3-b3c9-5aa5cfbc1523", - "metadata": {}, - "source": [ - "# Getting The Model\n", - "\n", - "- **If You Already have the models in the dlc folder no need to run this cell**\n", - "- [ https://github.com/Li-Chongyi/Zero-DCE_extension.git ](Link of the Actual Model)\n", - "- Zero-Reference Deep Curve Estimation (Zero-DCE)\n", - "- This is a very light-weight model,a tiny network with just 10K parameters. Zero-DCE++ has a fast inference speed than Zero-DCE" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6554e47f-9093-4b88-88c5-8f28c38ece72", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/Li-Chongyi/Zero-DCE_extension\n", - "#git reset --hard 808e70644191a63c936bd4ce73ce3f10fbc02ec8" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6c360903-78cd-46de-bc01-4d86e0862904", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp generate_model.py Zero-DCE_extension/Zero-DCE++/\n", - "cd Zero-DCE_extension/Zero-DCE++/\n", - "python generate_model.py" - ] - }, - { - "cell_type": "markdown", - "id": "053f1972-42af-47d4-9387-6c14247ee205", - "metadata": {}, - "source": [ - "#### Getting the Non Quantized DLC File" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cfe155c5-a13c-4fb3-87ec-c13426f18478", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i zero_dce++.onnx -o dlc/zero_dce++_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "49ee56ac-31ab-4aff-9270-c936691de3a1", - "metadata": {}, - "source": [ - "#### Understanding the Architecture of the Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a0499d25-f6e7-4f6f-85d7-0f1b6fda04ee", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash \n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-viewer -i dlc/zero_dce++_fp32.dlc -s zero_dce++.html\n", - "snpe-dlc-info -i dlc/zero_dce++_fp32.dlc>zero_dce++.txt" - ] - }, - { - "cell_type": "markdown", - "id": "fcff1daa-24d1-43c0-96ff-dfd7c368f2c5", - "metadata": {}, - "source": [ - "#### Getting the data\n", - "Use the dataset of your choice to validate the pre-processing, and post processing steps given in this notebook\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2343f6e1-bc94-4e28-b611-2fe537212cab", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "pip install gdown\n", - "gdown --no-check-certificate 157bjO1_cFuSd0HWDUuAmcHRJDVyWpOxB\n", - "unzip LOLdataset.zip\n", - "rm -rf LOLdataset.zip\n", - "rm -rf our485\n", - "mkdir raw" - ] - }, - { - "cell_type": "markdown", - "id": "e45ae551-89d4-4469-ad15-70779422bc8d", - "metadata": {}, - "source": [ - "## Preprocessing the data to generate raw file" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ee426585-e4d4-47d9-b753-0bedb0edabcb", - "metadata": {}, - "outputs": [], - "source": [ - "def preprocess(ll_img, hl_img):\n", - " ll_img = np.array(ll_img, dtype='float32')\n", - " hl_img = np.array(hl_img, dtype='float32') \n", - " ll_img = np.uint8(np.clip(ll_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hl_img = np.uint8(np.clip(hl_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " ll_img = np.uint8(ll_img)\n", - " hl_img = np.uint8(hl_img) \n", - " ll_img = convert_image(ll_img, source='array', target='[0, 1]')\n", - " hl_img = convert_image(hl_img, source='array', target='[0, 1]')\n", - " return ll_img, hl_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2c20d6a7-193e-4b26-8a91-f78bf57b70fc", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255) \n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7853cff7-50b0-44cb-9162-dc5bce62c117", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0797c27b-0ee9-42aa-87bb-b907e7d5cb3e", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir):\n", - " # Input images for the model\n", - " INPUTS_LL = []\n", - " # Post-processed images for visualization\n", - " IMAGES_LL = [] # LL:Low Light\n", - " IMAGES_HL = [] # HL:High Light\n", - "\n", - " # Load the test images\n", - " count=0\n", - " for img_path in glob.glob(os.path.join(test_images_dir, '*')):\n", - " l_img = cv2.resize(cv2.imread(img_path),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " l_img = cv2.cvtColor(l_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " h_img = cv2.resize(cv2.imread(img_path.replace(\"low\",\"high\")),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " h_img = cv2.cvtColor(h_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " ll_img, hl_img = preprocess(l_img, h_img)#chw\n", - "\n", - " INPUTS_LL.append(ll_img)#chw\n", - " IMAGES_LL.append(post_process(ll_img))#hwc\n", - " IMAGES_HL.append(post_process(hl_img))#hwc\n", - " return INPUTS_LL, IMAGES_LL, IMAGES_HL" - ] - }, - { - "cell_type": "markdown", - "id": "a5d3057e-95bd-4a2a-9b5b-808451286195", - "metadata": {}, - "source": [ - "**Converting the low dataset to raw file format to give it to the model and do the inference**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a6a242ba-77c2-45e4-ad32-8d4c8a55d925", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('raw', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "84b153e6-eca7-49d4-9c3b-a0c084c96da8", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"eval15/low\"\n", - "INPUTS_LL, IMAGES_LL, IMAGES_HL = load_dataset(test_images_dir)\n", - "print(len(INPUTS_LL),len(IMAGES_LL),len(IMAGES_HL))\n", - "for i, img_ll in enumerate(INPUTS_LL):\n", - " img_ll = img_ll.cpu().detach().numpy()\n", - " img_ll = img_ll.astype(np.float32)\n", - " fid = open(\"raw/img_\"+str(i)+ \".raw\", 'wb')\n", - " img_ll.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "544ef07f-7039-4ff4-a22b-2fc9c178f7c5", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_enhanced(img):\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,640,480)).astype(np.float32)\n", - " img = np.clip((img+1)/2 * 255. , 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "de77c3b3-171b-4e85-9121-603b7ce3a821", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 15\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"raw/img_{}.raw\\n\".format(i))" - ] - }, - { - "cell_type": "markdown", - "id": "f32a298e-0465-47ab-8339-bc201fa0ec55", - "metadata": {}, - "source": [ - "#### Getting the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "07121839-e02a-42da-91c2-d4f43f608a87", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc dlc/zero_dce++_fp32.dlc --input_list list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc dlc/zero_dce++_Q_w8a8.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "e8d764c2-9ce6-4b28-bf8c-e37b5fc4ca64", - "metadata": {}, - "source": [ - "## Inferencing the Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f82ac63-df97-4323-89be-1b5bfbe00ea6", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fbab7228-310a-45c9-b847-36f9edacebb2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8495895d-a16b-4915-b57e-f7d67345136a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4564a1d7-5e80-4db2-808e-f44a89981155", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push dlc/$DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push dlc/$DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push raw /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "477485b8-6b15-4b5f-8458-0afccb7ff461", - "metadata": {}, - "source": [ - "#### Inferencing the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d3b10768-1d1d-4cf5-aa55-fd6213907959", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=zero_dce++_Q_w8a8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"ZERO_DCE++\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list list.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "cf8d0b61-35f1-4a9f-9d93-a0425e6a34f7", - "metadata": {}, - "source": [ - "#### Inferencing the 32b Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "96dfad6b-dc55-4da9-9497-d00164d3ce76", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=zero_dce++_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"ZERO_DCE++\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list list.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "103eacb5-c00d-4309-b424-1e5f351a843d", - "metadata": {}, - "source": [ - "#### Pulling the Output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bdd0dfc5-df7b-4072-a1fc-1cead18d9fdf", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf OUTPUT_8b_DSP\n", - "rm -rf OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f758c036-00bc-4f2f-9a80-4bc5a37ffa98", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "69824b00-6426-476f-ab1d-0edcfa2915dc", - "metadata": {}, - "source": [ - "### Post Processing and Calculating PSNR Value" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e692f417-fddb-4451-a414-99c83e3e31a3", - "metadata": {}, - "outputs": [], - "source": [ - "import math\n", - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " err = (img_pred - img_true) ** 2 \n", - " err = np.mean(err)\n", - " return 10. * math.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "062f6c5d-6cee-4b01-a472-e77ee24d19a8", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(el_images, hl_images): #(enhanced_light, high_light )\n", - " psnr = []\n", - " for i in range(len(el_images)):\n", - " el_img = cv2.imread(el_images[i], 1)\n", - " hl_img = cv2.imread(hl_images[i], 1)\n", - " psnr.append(compute_psnr(el_img,hl_img))\n", - " average_psnr = np.mean(np.array(psnr))\n", - " return average_psnr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8e5e243b-187d-4bbf-8c5d-2e89143bbbf6", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"OUTPUT_32b_CPU\", \"OUTPUT_8b_DSP\"]\n", - "from PIL import Image\n", - "dict_folder_wise={}\n", - "for j in range(0,len(folder)):\n", - " IMAGES_EL = []# EL: Enhanced Light (model output)\n", - " File_LL=[]\n", - " File_EL=[]\n", - " File_HL=[]\n", - " dict_folder_wise[str(folder[j])]=[]\n", - " for i in range(0,15):\n", - " # for prediction img\n", - " IMAGES_EL.append(post_process_enhanced(folder[j]+\"/Result_\"+str(i)+\"/110.raw\")) \n", - " im = Image.fromarray(IMAGES_EL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " File_EL.append(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " \n", - " #for ground truth HL\n", - " im = Image.fromarray(IMAGES_HL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " File_HL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " \n", - " #for ground truth LL\n", - " im = Image.fromarray(IMAGES_LL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " File_LL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " dict_folder_wise[str(folder[j])].append([File_LL[i],File_HL[i],File_EL[i]])\n", - " print(folder[j],\" :: \",evaluate_average_psnr(File_EL,File_HL))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "markdown", - "id": "92f76ad5-4feb-4eb6-8e46-52d659824d06", - "metadata": {}, - "source": [ - "### Comaparison of Output between Non Quantized Model and Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f96ad4a0-cf0f-481c-86e7-6f96b103cc64", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "from matplotlib.pyplot import figure, imshow, axis\n", - "from matplotlib.image import imread\n", - "import random as rand\n", - "\n", - "def showImagesHorizontally(majors,index):\n", - " fig, axs = plt.subplots(1, 4, figsize=(12,4))\n", - " for nn, ax in enumerate(axs.flat):\n", - " if nn<=2:\n", - " column = majors[nn]\n", - " column_rec_name = column.replace('\\n', '_').replace(' ', '_')\n", - " image = imread(majors[nn])\n", - " ax.set_xlabel(majors[nn].split(\"/\")[-1])\n", - " else:\n", - " image=imread(dict_folder_wise['OUTPUT_8b_DSP'][index][-1])\n", - " ax.set_xlabel(\"Prediction 8b\")\n", - " ax.imshow(image)\n", - " \n", - " fig.suptitle(\"Comparison Between Ground Truth and Prediction\", fontsize=16)\n", - " plt.show()\n", - "\n", - "for index in range(0,15):\n", - " showImagesHorizontally(dict_folder_wise['OUTPUT_32b_CPU'][index],index)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/02-low-light-enhancement/Zero-DCE++/generate_model.py b/models-for-solutions/02-low-light-enhancement/Zero-DCE++/generate_model.py deleted file mode 100644 index b3578131..00000000 --- a/models-for-solutions/02-low-light-enhancement/Zero-DCE++/generate_model.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= - -import torch -import model - - -#You can download the weights from this below link -#https://github.com/Li-Chongyi/Zero-DCE_extension/tree/main/Zero-DCE%2B%2B/snapshots_Zero_DCE%2B%2B -#You can put this weight to your appropiate path -scale_factor = 12 - -DCE_net = model.enhance_net_nopool(scale_factor) -DCE_net.load_state_dict(torch.load('snapshots_Zero_DCE++/Epoch99.pth',map_location=torch.device('cpu'))) - -dummy_input=torch.randn(1, 3, 2160,3840) - - -torch.onnx.export(DCE_net, dummy_input, "../../zero_dce++.onnx", opset_version=11, verbose=False) - -print("ONNX model saved Successfully") diff --git a/models-for-solutions/02-low-light-enhancement/Zero-DCE/README.md b/models-for-solutions/02-low-light-enhancement/Zero-DCE/README.md deleted file mode 100644 index c49b5485..00000000 --- a/models-for-solutions/02-low-light-enhancement/Zero-DCE/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Low Light Image Enhancement using Zero-DCE - -| Field | Description | -| --- | --- | -| Model Name | Zero-DCE | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/Li-Chongyi/Zero-DCE.git | -| Paper | NA | -| Accuracy Metric | PSNR | -| Pre-Process | cv2.resize, cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.clip | -| post-Process| np.reshape, np.clip, transpose | - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - - -## Changes to open source Repository - -Changes made to open-source repository to generate pre-trained models is given as a patch file - Zero_DCE.patch - -## Pre-Trained Model - -Please refer to python file 'generate_model.py' for detailed steps to prepare Pre-Trained Model - -## Convert Model to DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Quantization of DLC - -Please refer to notebook for detailed steps to converting pre-trained model to DLC - -## Make Inference, Verify output. - -Please refer to notebook for detailed steps to making inference, verifying model output - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* \ No newline at end of file diff --git a/models-for-solutions/02-low-light-enhancement/Zero-DCE/Zero-DCE.ipynb b/models-for-solutions/02-low-light-enhancement/Zero-DCE/Zero-DCE.ipynb deleted file mode 100644 index 24e40b02..00000000 --- a/models-for-solutions/02-low-light-enhancement/Zero-DCE/Zero-DCE.ipynb +++ /dev/null @@ -1,636 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "43183bf7-00b7-49a4-b2cc-901deb564e9b", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "45f44f67-3fd7-4c6f-92ed-2988d3a65fcc", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"#rawfiles\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"ZERO_DCE\"\n", - "os.environ['DLC32']=\"dlc/zero_dce_fp32.dlc\"\n", - "os.environ['DLC8']=\"dlc/zero_dce_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"ZERO_DCE\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\"#fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "07d6bc1a-c090-4b1e-8bcc-b7d968d23ec9", - "metadata": {}, - "outputs": [], - "source": [ - "from PIL import Image\n", - "import glob\n", - "import os\n", - "import cv2\n", - "import numpy as np\n", - "import torch\n", - "import os\n", - "import shutil" - ] - }, - { - "cell_type": "markdown", - "id": "09c6c68b-b058-4eaa-8c0d-fa6cefc2f343", - "metadata": {}, - "source": [ - "# Getting The Model\n", - "\n", - "- **If You Already have the models in the dlc folder no need to run this cell**\n", - "- [ https://github.com/Li-Chongyi/Zero-DCE.git ](Link of the Actual Model)\n", - "- Zero-Reference Deep Curve Estimation (Zero-DCE)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "54314caf-0b04-4f41-9704-f0eb99c640b0", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/Li-Chongyi/Zero-DCE\n", - "#git reset --hard 808e70644191a63c936bd4ce73ce3f10fbc02ec8\n", - "cp Zero_DCE.patch Zero-DCE/Zero-DCE_code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9a1db4a8-8451-40b7-9c40-ff19032aff50", - "metadata": {}, - "outputs": [], - "source": [ - "command1=\"cd Zero-DCE;cd Zero-DCE_code;patch -i Zero_DCE.patch\"\n", - "os.system(command1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aa347c43-7686-41a6-8033-36d1e35d7dbf", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp generate_model.py Zero-DCE/Zero-DCE_code/\n", - "cd Zero-DCE/Zero-DCE_code/\n", - "python generate_model.py" - ] - }, - { - "cell_type": "markdown", - "id": "f442d88d-c8c2-4517-9d16-badc1623b388", - "metadata": {}, - "source": [ - "#### Getting the Non Quantized DLC File" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "62a7057f-7359-44e9-a2f6-1171f232f63d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i zero_dce.onnx -o dlc/zero_dce_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "e3cd938b-1c03-423f-8e9a-27671ce50513", - "metadata": {}, - "source": [ - "#### Understanding the Architecture of the Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9cdee03e-498d-434d-98d9-5cfc7e8af9dc", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash \n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-viewer -i dlc/zero_dce_fp32.dlc -s zero_dce.html\n", - "snpe-dlc-info -i dlc/zero_dce_fp32.dlc>zero_dce.txt" - ] - }, - { - "cell_type": "markdown", - "id": "2cfeaa71-4ecd-40d8-a928-28c953cf8100", - "metadata": {}, - "source": [ - "#### Getting the data\n", - "Use the dataset of your choice to validate the pre-processing, and post processing steps given in this notebook\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b1f74234-5a49-4f45-b339-9273243b2244", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "pip install gdown\n", - "gdown --no-check-certificate 157bjO1_cFuSd0HWDUuAmcHRJDVyWpOxB\n", - "unzip LOLdataset.zip\n", - "rm -rf LOLdataset.zip\n", - "rm -rf our485\n", - "mkdir raw" - ] - }, - { - "cell_type": "markdown", - "id": "55639ea2-c396-4d10-bec4-35b8c54fcf4e", - "metadata": {}, - "source": [ - "## Preprocessing the data to generate raw file" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9308f87d-bf77-4ebd-9dc1-d636241b24ec", - "metadata": {}, - "outputs": [], - "source": [ - "def preprocess(ll_img, hl_img):\n", - " ll_img = np.array(ll_img, dtype='float32')\n", - " hl_img = np.array(hl_img, dtype='float32')\n", - " \n", - " ll_img = np.uint8(np.clip(ll_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " hl_img = np.uint8(np.clip(hl_img, 0., 255.)) # this is to simulate matlab's imwrite operation\n", - " \n", - " ll_img = np.uint8(ll_img)\n", - " hl_img = np.uint8(hl_img)\n", - " \n", - " ll_img = convert_image(ll_img, source='array', target='[0, 1]')\n", - " hl_img = convert_image(hl_img, source='array', target='[0, 1]')\n", - " \n", - " return ll_img, hl_img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fc03d987-3253-40c2-a1bb-614c9324f3b9", - "metadata": {}, - "outputs": [], - "source": [ - "def convert_image(img, source, target):\n", - " if source == 'array':\n", - " img = torch.from_numpy(img.transpose((2, 0, 1))).contiguous()#chw\n", - " img = img.to(dtype=torch.float32).div(255) \n", - " elif source == '[0, 1]':\n", - " img = torch.clamp(img, 0, 1) # useful to post-process output of models that can overspill\n", - " \n", - " if target == '[0, 1]':\n", - " pass # already in [0, 1]\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4bec0bdf-8131-455a-a054-a9c5e6a4adf3", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process(img):\n", - " img = img.detach().cpu().numpy()\n", - " img = np.clip(255. * img, 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2cd61e97-1904-4b63-8ae2-da33ae80fb79", - "metadata": {}, - "outputs": [], - "source": [ - "def load_dataset(test_images_dir):\n", - " # Input images for the model\n", - " INPUTS_LL = []\n", - " # Post-processed images for visualization\n", - " IMAGES_LL = [] # LL:Low Light\n", - " IMAGES_HL = [] # HL:High Light\n", - "\n", - " # Load the test images\n", - " count=0\n", - " for img_path in glob.glob(os.path.join(test_images_dir, '*')):\n", - " l_img = cv2.resize(cv2.imread(img_path),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " l_img = cv2.cvtColor(l_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " h_img = cv2.resize(cv2.imread(img_path.replace(\"low\",\"high\")),[480,640],interpolation=cv2.INTER_CUBIC)\n", - " h_img = cv2.cvtColor(h_img, cv2.COLOR_BGR2RGB)\n", - " \n", - " ll_img, hl_img = preprocess(l_img, h_img)#chw\n", - "\n", - " INPUTS_LL.append(ll_img)#chw\n", - " IMAGES_LL.append(post_process(ll_img))#hwc\n", - " IMAGES_HL.append(post_process(hl_img))#hwc\n", - " return INPUTS_LL, IMAGES_LL, IMAGES_HL" - ] - }, - { - "cell_type": "markdown", - "id": "e3a6c6a7-b78f-4be1-b9bc-36e8a0828825", - "metadata": {}, - "source": [ - "**Converting the low dataset to raw file format to give it to the model and do the inference**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "514d8cc1-db8b-46ed-b50e-ec3020d899c0", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('raw', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6c14f848-3477-4997-9b31-d131a302ebbb", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"eval15/low\"\n", - "INPUTS_LL, IMAGES_LL, IMAGES_HL = load_dataset(test_images_dir)\n", - "print(len(INPUTS_LL),len(IMAGES_LL),len(IMAGES_HL))\n", - "for i, img_ll in enumerate(INPUTS_LL):\n", - " img_ll = img_ll.cpu().detach().numpy()\n", - " img_ll = img_ll.astype(np.float32)\n", - " fid = open(\"raw/img_\"+str(i)+ \".raw\", 'wb')\n", - " img_ll.tofile(fid)\n", - " fid.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "efd5dc8f-cf00-4b20-a3c3-3573d4feab86", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 15\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"raw/img_{}.raw\\n\".format(i))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cc56770f-9799-42d3-bc64-535b761e73e2", - "metadata": {}, - "outputs": [], - "source": [ - "def post_process_enhanced(img):\n", - " img = np.fromfile(img, np.float32)\n", - " img = img.reshape((3,640,480)).astype(np.float32)\n", - " img = np.clip(img* 255. , 0., 255.)\n", - " img = np.uint8(img)\n", - " img = img.transpose(1, 2, 0)#hwc\n", - " return img" - ] - }, - { - "cell_type": "markdown", - "id": "e614714a-c056-4b90-9b1c-ca810fb7a350", - "metadata": {}, - "source": [ - "#### Getting the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e09213a6-cb9f-47e5-a76e-e690a16e08c1", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc dlc/zero_dce_fp32.dlc --input_list list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc dlc/zero_dce_w8a8.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "abbd0a4b-4283-4f47-b828-0e676303d1d7", - "metadata": {}, - "source": [ - "## Inferencing the Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0f5c04c4-d929-4a82-8d57-f75d4ffc09bc", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "82daade8-c249-4734-a517-3d9388c3df4d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "662510c5-0670-4829-95a1-166c86cdb852", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f2be12c-bac5-4954-9283-dbbcd47db253", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push raw /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "e0d48fd7-cb86-4ed8-aa76-d9ac99a744ec", - "metadata": {}, - "source": [ - "#### Inferencing the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0923cac6-d195-4d05-aa7a-bbc14a72597a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=zero_dce_w8a8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"ZERO_DCE\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list list.txt --set_output_tensors 87 --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "bda3bedb-1454-4f17-af7d-41ceb9de3a85", - "metadata": {}, - "source": [ - "#### Inferencing the Non-Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3a6015ea-9d72-4da4-aca4-4dec8345f214", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=zero_dce_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"ZERO_DCE\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list list.txt --set_unconsumed_as_output --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "eab0b961-9dac-4d72-a810-6d00130e4739", - "metadata": {}, - "source": [ - "#### Pulling the Output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8aa6743d-68cf-4c49-a845-cfd5d07c49bc", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf OUTPUT_8b_DSP\n", - "rm -rf OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5c1971ae-e8e4-4682-849f-46ab905a9462", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "974b97e3-83de-4afb-91e7-4df60dbde602", - "metadata": {}, - "source": [ - "### Post Processing and Calculating PSNR Value" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "02f5d42d-1a0c-423d-a4b4-78e59b251add", - "metadata": {}, - "outputs": [], - "source": [ - "import math\n", - "def compute_psnr(img_pred, img_true, data_range=255., eps=1e-8):\n", - " err = (img_pred - img_true) ** 2 \n", - " err = np.mean(err)\n", - " return 10. * math.log10((data_range ** 2) / (err + eps))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ecf3f6ff-f9fa-43ab-8e61-8993b2b41db1", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_average_psnr(el_images, hl_images): #(enhanced_light, high_light )\n", - " psnr = []\n", - " for i in range(len(el_images)):\n", - " el_img = cv2.imread(el_images[i], 1)\n", - " hl_img = cv2.imread(hl_images[i], 1)\n", - " psnr.append(compute_psnr(el_img,hl_img))\n", - " average_psnr = np.mean(np.array(psnr))\n", - " return average_psnr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1f28ee33-1c5a-41d2-9c4d-d193df63b3b3", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"OUTPUT_32b_CPU\", \"OUTPUT_8b_DSP\"]\n", - "from PIL import Image\n", - "dict_folder_wise={}\n", - "for j in range(0,len(folder)):\n", - " IMAGES_EL = []# EL: Enhanced Light (model output)\n", - " File_LL=[]\n", - " File_EL=[]\n", - " File_HL=[]\n", - " dict_folder_wise[str(folder[j])]=[]\n", - " for i in range(0,15):\n", - " # for prediction img\n", - " IMAGES_EL.append(post_process_enhanced(folder[j]+\"/Result_\"+str(i)+\"/87.raw\")) \n", - " im = Image.fromarray(IMAGES_EL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " File_EL.append(folder[j]+\"/Result_\"+str(i)+\"/prediction.jpeg\")\n", - " \n", - " #for ground truth HL\n", - " im = Image.fromarray(IMAGES_HL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " File_HL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_HL.jpeg\")\n", - " \n", - " #for ground truth LL\n", - " im = Image.fromarray(IMAGES_LL[i])\n", - " im.save(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " File_LL.append(folder[j]+\"/Result_\"+str(i)+\"/groundtruth_LL.jpeg\")\n", - " dict_folder_wise[str(folder[j])].append([File_LL[i],File_HL[i],File_EL[i]])\n", - " print(folder[j],\" :: \",evaluate_average_psnr(File_EL,File_HL))\n", - " print(\"\\n============================\\n\")" - ] - }, - { - "cell_type": "markdown", - "id": "69476a0f-553b-49d2-9c9b-f817e34fcf2f", - "metadata": {}, - "source": [ - "### Comaparison of Output between Non Quantized Model and Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d1608474-4c3f-44da-9e4c-308bf8920078", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "from matplotlib.pyplot import figure, imshow, axis\n", - "from matplotlib.image import imread\n", - "import random as rand\n", - "\n", - "def showImagesHorizontally(majors,index):\n", - " fig, axs = plt.subplots(1, 4, figsize=(12,4))\n", - " for nn, ax in enumerate(axs.flat):\n", - " if nn<=2:\n", - " column = majors[nn]\n", - " column_rec_name = column.replace('\\n', '_').replace(' ', '_')\n", - " image = imread(majors[nn])\n", - " ax.set_xlabel(majors[nn].split(\"/\")[-1])\n", - " else:\n", - " image=imread(dict_folder_wise['OUTPUT_8b_DSP'][index][-1])\n", - " ax.set_xlabel(\"Prediction 8b\")\n", - " ax.imshow(image)\n", - " \n", - " fig.suptitle(\"Comparison Between Ground Truth and Prediction\", fontsize=16)\n", - " plt.show()\n", - "\n", - "for index in range(0,15):\n", - " showImagesHorizontally(dict_folder_wise['OUTPUT_32b_CPU'][index],index)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/02-low-light-enhancement/Zero-DCE/Zero_DCE.patch b/models-for-solutions/02-low-light-enhancement/Zero-DCE/Zero_DCE.patch deleted file mode 100644 index 75268e09..00000000 --- a/models-for-solutions/02-low-light-enhancement/Zero-DCE/Zero_DCE.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/Zero-DCE_code/model.py b/Zero-DCE_code/model.py -index 3b710a5..a64cf8d 100644 ---- a/Zero-DCE_code/model.py -+++ b/Zero-DCE_code/model.py -@@ -1,8 +1,6 @@ - import torch - import torch.nn as nn - import torch.nn.functional as F --import math --#import pytorch_colors as colors - import numpy as np - - class enhance_net_nopool(nn.Module): diff --git a/models-for-solutions/02-low-light-enhancement/Zero-DCE/generate_model.py b/models-for-solutions/02-low-light-enhancement/Zero-DCE/generate_model.py deleted file mode 100644 index 5ae94191..00000000 --- a/models-for-solutions/02-low-light-enhancement/Zero-DCE/generate_model.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= -import torch -import model - - -#You can download the weights from here -# https://github.com/Li-Chongyi/Zero-DCE/tree/master/Zero-DCE_code/snapshots - - -DCE_net = model.enhance_net_nopool() -DCE_net.load_state_dict(torch.load('snapshots/Epoch99.pth',map_location=torch.device('cpu'))) -DCE_net.eval() - -dummy_input=torch.randn(1, 3, 480,640) -torch.onnx.export(DCE_net, dummy_input, "../../zero_dce.onnx", opset_version=11, verbose=False) -print("ONNX model saved Successfully") - - - - - - diff --git a/models-for-solutions/03-object-detection/SSD MobileNet V2/README.md b/models-for-solutions/03-object-detection/SSD MobileNet V2/README.md deleted file mode 100644 index 7068dd10..00000000 --- a/models-for-solutions/03-object-detection/SSD MobileNet V2/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Object Detection with SSD-MobileNetV2 Model - -| Field | Description | -| --- | --- | -| Model Name | SSD MobilenetV2 | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/lufficc/SSD.git | -| Paper | NA | -| Accuracy Metric | mAP | - -## Pre-requisites - -* Please follow the instructions for setting up Qualcomm Neural Processing SDK using the [link](https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) provided. -* Tested this on SNPE-2.14.0 -* To install caffe follow the instructions from this [link](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -* Please make torchvision version as 0.9.1 - - -## How to get the onnx model from opensource ? - - -```python -git clone https://github.com/lufficc/SSD.git -cd SSD/ -git reset --hard 68dc0a20efaf3997e58b616afaaaa21bf8ca3c05 -wget https://github.com/lufficc/SSD/releases/download/1.2/mobilenet_v2_ssd320_voc0712_v2.pth -patch -i ../changes_on_top_without_ABP-NMS.patch -python demo.py --config-file configs/mobilenet_v2_ssd320_voc0712.yaml --images -``` - - - -# Accuracy analysis - -- To check accuracy please run "SSD MobileNetV2 Accuracy Analysis.ipynb" jupyter notebook. -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - -# References - -1. SSD MobileNetV2 Model paper: https://arxiv.org/abs/1801.04381 -2. https://github.com/lufficc/SSD -3. 2017 Train Val dataset: http://images.cocodataset.org/annotations/annotations_trainval2017.zip diff --git a/models-for-solutions/03-object-detection/SSD MobileNet V2/SSD MobileNetV2 Accuracy Analysis.ipynb b/models-for-solutions/03-object-detection/SSD MobileNet V2/SSD MobileNetV2 Accuracy Analysis.ipynb deleted file mode 100644 index 068d81e7..00000000 --- a/models-for-solutions/03-object-detection/SSD MobileNet V2/SSD MobileNetV2 Accuracy Analysis.ipynb +++ /dev/null @@ -1,1268 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "da0b6f27-ce33-44c3-ae37-dbc170ce31ce", - "metadata": {}, - "source": [ - "#### Installing Necessary Libraries" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "81d0d960-ed61-475f-bb1c-0f78d6145636", - "metadata": {}, - "outputs": [], - "source": [ - "!pip3 install scikit-learn\n", - "!pip3 install bokeh\n", - "!pip3 install torchvision==0.9.0\n", - "!pip3 install scikit-learn\n", - "!pip3 install torch==1.8.0\n", - "!pip3 install onnx==1.8.1\n", - "!pip3 install onnxruntime==1.7.0\n", - "!pip3 install vizer\n", - "!pip3 install yacs" - ] - }, - { - "cell_type": "markdown", - "id": "aa6c9fd6-d8ed-4338-8dec-83341654d51e", - "metadata": {}, - "source": [ - "#### Importing Libraries" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3c977b77-b812-4536-92a8-2d3bb1c76efb", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "import glob\n", - "import os\n", - "import numpy as np\n", - "from PIL import Image\n", - "import re\n", - "import matplotlib.image as mpimg\n", - "import matplotlib.pyplot as plt\n", - "import cv2\n", - "import glob\n", - "import os\n", - "import numpy as np\n", - "from PIL import Image\n", - "import re\n", - "import matplotlib.image as mpimg\n", - "import matplotlib.pyplot as plt\n", - "import torch\n", - "import sklearn\n", - "from bokeh import plotting\n", - "from numpy.linalg import norm\n", - "from sklearn.metrics.pairwise import cosine_similarity\n", - "from sklearn.metrics import mean_squared_error\n", - "import math\n", - "from IPython.display import Image, display" - ] - }, - { - "cell_type": "markdown", - "id": "4aa6564c-97f2-4fb9-8a30-9805c9d01697", - "metadata": {}, - "source": [ - "## Getting the dataset\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c52d3e5c-f5ee-4650-9816-4632bc6e260b", - "metadata": {}, - "outputs": [], - "source": [ - "# User needs to download the dataset of their choice. " - ] - }, - { - "cell_type": "markdown", - "id": "619b2ab6-2994-42ab-85b7-501a582dc913", - "metadata": {}, - "source": [ - "## Getting The Model" - ] - }, - { - "cell_type": "markdown", - "id": "ed303842-9d2c-49af-bbe8-cfb15ff57211", - "metadata": {}, - "source": [ - "### Clonning the repo and getting the onnx model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8e547f22-fdca-4fbc-924d-025065f0b070", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/lufficc/SSD.git\n", - "cd SSD/\n", - "git reset --hard 68dc0a20efaf3997e58b616afaaaa21bf8ca3c05\n", - "wget https://github.com/lufficc/SSD/releases/download/1.2/mobilenet_v2_ssd320_voc0712_v2.pth" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "de134e1a-594b-4ad9-9df8-a3c23044b463", - "metadata": {}, - "outputs": [], - "source": [ - "command1=\"cp utils/changes_on_top_without_ABP-NMS.patch SSD/;cd SSD;patch -p1 < ./changes_on_top_without_ABP-NMS.patch;python demo.py --config-file configs/mobilenet_v2_ssd320_voc0712.yaml --images_dir demo --ckpt mobilenet_v2_ssd320_voc0712_v2.pth\"\n", - "os.system(command1)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed2436e1-5790-4f46-8598-fd2235ab1221", - "metadata": {}, - "outputs": [], - "source": [ - "command2=\"cd SSD;cp ssd_mobilenetV2_without_ABP-NMS.onnx ../\"\n", - "os.system(command2)" - ] - }, - { - "cell_type": "markdown", - "id": "82da456c-d748-4374-ad82-1da014d87954", - "metadata": {}, - "source": [ - "### Getting the DLC Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "61b04f79-c0f4-41a5-9f96-2a7b886628c6", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "snpe-onnx-to-dlc -i ssd_mobilenetV2_without_ABP-NMS.onnx -o ssd_mobilenetV2_without_ABP-NMS_fp32.dlc --quantization_overrides encoding_format.encodings " - ] - }, - { - "cell_type": "markdown", - "id": "f54e040a-6837-4301-a5af-4c2a2bbab0a7", - "metadata": {}, - "source": [ - "#### Architecture of The SSD-MobileNet-V2 Model\n", - "*Please Check From the Below File Path the Architecture*" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "290312f8-6c96-45f4-92a8-5a3a989e3589", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "snpe-dlc-viewer -i ssd_mobilenetV2_without_ABP-NMS_fp32.dlc -s ssd_mobilenetV2_without_ABP-NMS_fp32.html" - ] - }, - { - "cell_type": "markdown", - "id": "8464804a-d310-4396-af43-90972e4bebe6", - "metadata": {}, - "source": [ - "### Preprocessing the image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4c92e3b9-93cb-46f7-9456-007451f6d379", - "metadata": {}, - "outputs": [], - "source": [ - "class Compose(object):\n", - " def __init__(self, transforms):\n", - " self.transforms = transforms\n", - "\n", - " def __call__(self, img, boxes=None, labels=None):\n", - " for t in self.transforms:\n", - " img, boxes, labels = t(img, boxes, labels)\n", - " if boxes is not None:\n", - " boxes, labels = remove_empty_boxes(boxes, labels)\n", - " return img, boxes, labels\n", - "\n", - "class SubtractMeans(object):\n", - " def __init__(self, mean):\n", - " self.mean = np.array(mean, dtype=np.float32)\n", - "\n", - " def __call__(self, image, boxes=None, labels=None):\n", - " image = image.astype(np.float32)\n", - " image -= self.mean\n", - " return image.astype(np.float32), boxes, labels\n", - "\n", - "class Resize(object):\n", - " def __init__(self, size=300):\n", - " self.size = size\n", - " def __call__(self, image, boxes=None, labels=None):\n", - " image = cv2.resize(image, (self.size,\n", - " self.size))\n", - " return image, boxes, labels\n", - "\n", - "def build_transforms(IMAGE_SIZE = 300, PIXEL_MEAN = [123, 117, 104]):\n", - " transform = [\n", - " Resize(IMAGE_SIZE), #300\n", - " SubtractMeans(PIXEL_MEAN), #[123, 117, 104]\n", - " ]\n", - " transform = Compose(transform)\n", - " return transform\n", - "\n", - "def load_img(img):\n", - " image_bgr = cv2.imread(img, cv2.IMREAD_COLOR)\n", - " image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)\n", - " height, width, _ = image_rgb.shape\n", - " transforms = build_transforms(IMAGE_SIZE = 320, PIXEL_MEAN = [123, 117, 104])\n", - " images = transforms(image_rgb)[0]\n", - " images = np.ascontiguousarray(images)\n", - " return images" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "478dff7f-7004-4414-a87e-ad72d37f0129", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "import numpy as np\n", - "import os\n", - "def detect(imgfile,i):\n", - " img= load_img(imgfile)\n", - " img.tofile(\"raw/\"+filenames[i].split(\".\")[0]+\".raw\")\n", - " \n", - "filenames = os.listdir(\"val2017\")\n", - "for i in range(0,len(filenames)):\n", - " if \"jpg\" in filenames[i].lower():\n", - " detect(\"val2017/\"+filenames[i],i)\n", - " else:\n", - " print(\"Could not get this image pre-processed\")\n", - " print(\" ==> filename :: \",filenames[i].lower())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "acdfe576-a083-4581-a3dc-ff0e964ecba1", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "find ./raw -name *.raw > list.txt" - ] - }, - { - "cell_type": "markdown", - "id": "c2d1b62c-71c5-4915-8cfc-bc12080b4cdc", - "metadata": {}, - "source": [ - "#### Quantizing the DLC Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2ba82fb8-a27d-4f32-a93d-01c6f1a68b4d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "snpe-dlc-quantize --input_dlc ssd_mobilenetV2_without_ABP-NMS_fp32.dlc --override_params --optimization cle --input_list list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc ssd_mobilenetV2_without_ABP-NMS_a8w8.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "5d1b147e-b32a-4a62-a5ce-259af3f39083", - "metadata": {}, - "source": [ - "## Running The DLC Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f7498cc-477f-4fd2-823c-19253feab4a9", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cd ../../../../snpe/qaisw-v2.15.1.230926150623_62883\n", - "adb devices" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "673c235c-cea9-42f3-a130-4fddd496dcf8", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"../../../../snpe/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"#rawfiles\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"SSD_MobileNetV2\"\n", - "os.environ['DLC32']=\"ssd_mobilenetV2_without_ABP-NMS_fp32.dlc\"\n", - "os.environ['DLC8']=\"ssd_mobilenetV2_without_ABP-NMS_a8w8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"SSD_MobileNetV2\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"389c94f8\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "markdown", - "id": "d86717c3-7f51-4f94-84a4-090359d08f49", - "metadata": {}, - "source": [ - "#### Creating Bin and Lib Folder on Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e52ea798-55ce-4af7-966b-14cc22b12318", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "9cc00ffe-a95f-4c8a-add4-f8405dad3bf6", - "metadata": {}, - "source": [ - "### Pushing all Lib and Bin files onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "60e63e06-fac3-4d1f-856d-a08da815dc8b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v73/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "markdown", - "id": "f05ef4ec-1a1d-484e-a223-3b579b9d1b19", - "metadata": {}, - "source": [ - "### Pushing Artifacts on to Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3265be37-824e-44b0-8435-fdd83366cf85", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "742b3952-e121-4164-8a1f-cdddb68dfa86", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push raw /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "455048aa-b8f5-4893-a5b2-d6b502fc2503", - "metadata": {}, - "source": [ - "### Inferencing 8 bit DLC on DSP Runtime\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "93745257-4f3c-411a-98ed-c2af07755ad4", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=ssd_mobilenetV2_without_ABP-NMS_a8w8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"SSD_MobileNetV2\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list list.txt --set_output_tensors 935,986 --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "014320fc-e2f2-4509-9e54-f52d63e600eb", - "metadata": {}, - "source": [ - "### Inferencing 32b DLC on CPU Runtime\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d5c2722d-a74b-449e-8129-2f3a2be03528", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=ssd_mobilenetV2_without_ABP-NMS_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"SSD_MobileNetV2\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list list.txt --set_unconsumed_as_output --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "b3656548-f544-4fa7-9d1d-8b4f002df283", - "metadata": {}, - "source": [ - "### Pulling the Output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1961c0be-5bb4-4a44-9c79-4aaecb18d93b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf OUTPUT_8b_DSP/\n", - "rm -rf OUTPUT_32b_CPU/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d277c31a-de1a-4333-a244-97f7bd32a87c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "562a4e7d-fa79-49b6-bf8f-c1e5ac59548b", - "metadata": {}, - "source": [ - "## Post Processing the Output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1dfec26b-e9ea-4092-af07-e0a1393c034e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "mkdir -p \"detection_results_32b_CPU\"\n", - "mkdir -p \"detection_results_8b_DSP\"\n", - "mkdir -p \"prediction_results_32b_CPU\"\n", - "mkdir -p \"prediction_results_8b_DSP\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f08e880-4da7-42bb-9472-59a0e4eb8d10", - "metadata": {}, - "outputs": [], - "source": [ - "class_name = [\"BACKGROUND\",\n", - " \"aeroplane\",\n", - " \"bicycle\",\n", - " \"bird\",\n", - " \"boat\",\n", - " \"bottle\",\n", - " \"bus\",\n", - " \"car\",\n", - " \"cat\",\n", - " \"chair\",\n", - " \"cow\",\n", - " \"diningtable\",\n", - " \"dog\",\n", - " \"horse\",\n", - " \"motorbike\",\n", - " \"person\",\n", - " \"pottedplant\",\n", - " \"sheep\",\n", - " \"sofa\",\n", - " \"train\",\n", - " \"tvmonitor\"]\n", - "\n", - "label2class={str(i):x for i,x in enumerate(class_name)}\n", - "print(label2class)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "735a7276-cdb3-4d99-b8ad-7254f88d152a", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "\n", - "#Computing the areas of rectangle given two corners\n", - "def area_of(left_top, right_bottom) -> torch.Tensor:\n", - " hw = torch.clamp(right_bottom - left_top, min=0.0)\n", - " return hw[..., 0] * hw[..., 1]\n", - "\n", - "#Returning intersection-over-union (Jaccard index) of boxes.\n", - "def iou_of(boxes0, boxes1, eps=1e-5):\n", - " overlap_left_top = torch.max(boxes0[..., :2], boxes1[..., :2])\n", - " overlap_right_bottom = torch.min(boxes0[..., 2:], boxes1[..., 2:])\n", - "\n", - " overlap_area = area_of(overlap_left_top, overlap_right_bottom)\n", - " area0 = area_of(boxes0[..., :2], boxes0[..., 2:])\n", - " area1 = area_of(boxes1[..., :2], boxes1[..., 2:])\n", - " return overlap_area / (area0 + area1 - overlap_area + eps)\n", - "\n", - "\n", - "\n", - "#Returning the Bounding Boxes\n", - "def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):\n", - " scores = box_scores[:, -1]\n", - " boxes = box_scores[:, :-1]\n", - " picked = []\n", - " _, indexes = scores.sort(descending=True)\n", - " indexes = indexes[:candidate_size]\n", - " while len(indexes) > 0:\n", - " current = indexes[0]\n", - " picked.append(current.item())\n", - " if 0 < top_k == len(picked) or len(indexes) == 1:\n", - " break\n", - " current_box = boxes[current, :]\n", - " indexes = indexes[1:]\n", - " rest_boxes = boxes[indexes, :]\n", - " iou = iou_of(\n", - " rest_boxes,\n", - " current_box.unsqueeze(0),\n", - " )\n", - " indexes = indexes[iou <= iou_threshold]\n", - "\n", - " return box_scores[picked, :]\n", - "\n", - "\n", - "\n", - "def nms(box_scores, nms_method=None, score_threshold=None, iou_threshold=None,\n", - " sigma=0.5, top_k=-1, candidate_size=200):\n", - " if nms_method == \"soft\":\n", - " return soft_nms(box_scores, score_threshold, sigma, top_k)\n", - " else:\n", - " return hard_nms(box_scores, iou_threshold, top_k, candidate_size=candidate_size)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cd981ebf-f6c4-4b8c-bc48-15a8027507cc", - "metadata": {}, - "outputs": [], - "source": [ - "colors = np.random.uniform(0, 255, size=(len(list(label2class.values())), 3))\n", - "\n", - "import cv2.dnn\n", - "import torch\n", - "def postProcessing(scores,boxes,original_image_path,save_image_path):\n", - " original_image=cv2.imread(original_image_path, cv2.IMREAD_COLOR)\n", - " height,width,_=original_image.shape\n", - " prob_threshold = 0.4\n", - " picked_box_probs = []\n", - " picked_labels = []\n", - " for class_index in range(1, scores.shape[1]):\n", - " \n", - " probs = scores[:, class_index]\n", - " \n", - " mask = probs > prob_threshold\n", - " probs = probs[mask]\n", - " \n", - " if probs.shape[0] == 0:\n", - " continue\n", - " subset_boxes = boxes[mask, :]\n", - " subset_boxes = torch.from_numpy(subset_boxes)\n", - " probs = torch.from_numpy(probs)\n", - " box_probs = torch.cat([subset_boxes, probs.reshape(-1, 1)], dim=1)\n", - " box_probs = nms(box_probs, None,\n", - " score_threshold=prob_threshold,\n", - " iou_threshold=0.2,\n", - " sigma=0.2,\n", - " top_k=-1,\n", - " candidate_size=200)\n", - " picked_box_probs.extend([box_probs])\n", - " picked_labels.extend([class_index] * box_probs.size(0))\n", - " picked_box_probs = torch.cat(picked_box_probs)\n", - " picked_box_probs[:, 0] *= width\n", - " picked_box_probs[:, 1] *= height\n", - " picked_box_probs[:, 2] *= width\n", - " picked_box_probs[:, 3] *= height\n", - " label = class_name[picked_labels[0]]\n", - " \n", - " for i in range(0,len(picked_box_probs)):\n", - " x,y=int(picked_box_probs[i, 0].numpy()),int(picked_box_probs[i, 1].numpy())\n", - " x_plus_w,y_plus_h=int(picked_box_probs[i, 2].numpy()),int(picked_box_probs[i, 3].numpy())\n", - " original_image = cv2.rectangle(original_image,(x, y), (x_plus_w,y_plus_h),colors[class_index],2)\n", - " original_image=cv2.putText(original_image, label,(int(picked_box_probs[i, 0])+9, int(picked_box_probs[i, 1])-10),cv2.FONT_HERSHEY_SIMPLEX,1,(255, 40, 255),2) # line type\n", - " \n", - " \n", - " picked_box_probs = []\n", - " picked_labels = []\n", - " cv2.imwrite(save_image_path,original_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "662f1d71-3df5-4113-9190-32760e107697", - "metadata": {}, - "outputs": [], - "source": [ - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_8b_DSP\"]\n", - "detection_results = [\"detection_results_32b_CPU\",\"detection_results_8b_DSP\"]\n", - "results = [\"prediction_results_32b_CPU\",\"prediction_results_8b_DSP\"]\n", - "f=open(\"list.txt\",\"r\")\n", - "input_list = f.readlines()\n", - "f.close()\n", - "for j in range(0,2):\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path :\n", - " #using raw file to traverse output folders and result folder will be in sequence as per list.txt\n", - " raw_path = input_list[int(result_path.split(\"_\")[-1])].replace(\"\\n\",\"\") # rawfile path from list.txt\n", - " if os.path.exists(raw_path):\n", - " scores = np.fromfile(result_path+'/935.raw', dtype=\"float32\")\n", - " boxes=np.fromfile(result_path+'/986.raw', dtype=\"float32\")\n", - "\n", - " boxes=boxes.reshape((3234,4))\n", - " scores=scores.reshape((3234,21))\n", - " originalImagePath = raw_path.replace(\".raw\",\".jpg\").replace(\"raw\",\"val2017\")\n", - "\n", - " runtime = folder[j].split(\"_\")[-1]\n", - " save_img_path = detection_results[j]+\"/\"+raw_path.split(\"/\")[-1].split(\".\")[0]+\"_\"+runtime+\".jpg\"\n", - "\n", - " postProcessing(scores,boxes,originalImagePath,save_img_path)\n", - " " - ] - }, - { - "cell_type": "markdown", - "id": "413b58d4-4827-4228-876a-f96964003910", - "metadata": {}, - "source": [ - "## Visualization" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "039bb5d8-66fe-47bf-87be-851ce61bce87", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "detection_results = [\"detection_results_32b_CPU\",\"detection_results_8b_DSP\"]\n", - "\n", - "for CPU_path in glob.glob(os.path.join(detection_results[0], '*')):\n", - " CPU_img_name=CPU_path.split(\"/\")[1].split(\"_\")[0]\n", - " for DSP_path in glob.glob(os.path.join(detection_results[1], '*')):\n", - " DSP_img_name=DSP_path.split(\"/\")[1].split(\"_\")[0]\n", - " \n", - " if CPU_img_name!=DSP_img_name:continue\n", - " \n", - " CPU_img=cv2.imread(CPU_path)\n", - " DSP_img=cv2.imread(DSP_path)\n", - " fing,ax=plt.subplots(1,2,figsize=(10,5))\n", - " ax[0].imshow(CPU_img)\n", - " ax[0].set_title(\"CPU 32 Inference\")\n", - "\n", - " ax[1].imshow(DSP_img)\n", - " ax[1].set_title(\"DSP 8 Inference\")\n", - " plt.tight_layout()\n", - " plt.show()\n" - ] - }, - { - "cell_type": "markdown", - "id": "9a61ca57-cbc8-4d3b-8d36-e62e37804fc8", - "metadata": {}, - "source": [ - "# Mixed precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e87e13ee-34d9-4d21-88be-0fb0de77952a", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"../../../../../../snpe/snpe-2.10.0.4541\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"#rawfiles\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"SSD_MobileNetV2\"\n", - "os.environ['DLC32']=\"dlc/ssd_mobilenetV2_without_ABP-NMS.dlc\"\n", - "os.environ['DLC8']=\"dlc/ssd_mobilenetV2_without_ABP-NMS_Q.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"SSD_MobileNetV2\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"e4149b7f\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android-clang8.0\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "67031ad4-d118-4504-a8b2-49bedefd6f76", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a4433134-442c-4b40-a10d-3751c90728f6", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/dsp/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7e279429-1ebc-41f2-870b-ee05b7d2978b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "16419832-b66c-4625-b25d-a0d51d33ba74", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push raw /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d30f04b3-1d9a-4aa0-b90a-283825a7eefa", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android-clang8.0/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android-clang8.0/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=ssd_mobilenetV2_without_ABP-NMS_Q.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"SSD_MobileNetV2\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list list.txt --output_dir $OUTPUT_FOLDER --use_dsp --debug" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "54df402c-1cf8-4ad4-b729-8a0fde9799ee", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android-clang8.0/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android-clang8.0/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=ssd_mobilenetV2_without_ABP-NMS.dlc\n", - "export ONDEVICE_FOLDER=\"SSD_MobileNetV2\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list list.txt --output_dir $OUTPUT_FOLDER --debug" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7412fb50-3b79-4bc2-952a-d95548940515", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf OUTPUT_32b_CPU\n", - "rm -rf OUTPUT_8b_DSP\n", - "ls" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7bd1afae-541a-4da9-857a-ed96e16a8405", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "78ea4e5a-e45c-4c38-afd6-1bc4992e834f", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "snpe-dlc-info -i dlc/ssd_mobilenetV2_without_ABP-NMS_Q.dlc >ssd_mobilenetV2_without_ABP-NMS_Q.txt\n", - "snpe-dlc-viewer -i dlc/ssd_mobilenetV2_without_ABP-NMS_Q.dlc -s ssd_mobilenetV2_without_ABP-NMS_Q.html" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b879a087-fcd6-44be-bcd1-5f2b4f857dfc", - "metadata": {}, - "outputs": [], - "source": [ - "from sklearn.metrics.pairwise import cosine_similarity\n", - "def cosine_sim(out_a, out_b):\n", - " \"\"\"\n", - " This function computes the cosine similarity of two outputs.\n", - " :param out_a: onnx output tensor\n", - " :param out_b: system output tensor\n", - " :return: cosine similarity\n", - " \"\"\"\n", - " out_a = out_a.reshape(1,len(out_a))\n", - " out_b = out_b.reshape(1,len(out_b))\n", - " return cosine_similarity(out_a,out_b)[0][0]\n", - "\n", - "\n", - "def plot_layer_by_layer(INT8_DSP, FP32_CPU, output_folder):\n", - " list_htp_nodes = [os.path.join(path, name) for path, subdirs, files in os.walk(INT8_DSP) for name in files if name.endswith(\"raw\")]\n", - " list_htp_nodes_mse = []\n", - " final_node_names = []\n", - " list_htp_nodes_no_raw = [ k.split('/')[-1].split('.')[0] for k in list_htp_nodes]\n", - " for i in range(len(list_htp_nodes)):\n", - " node_name = str(list_htp_nodes[i])\n", - " htp_node_data = np.fromfile(node_name,dtype=np.float32)\n", - " if os.path.exists(node_name.replace(INT8_DSP.split(\"/\")[1],FP32_CPU.split(\"/\")[1])):\n", - " cpu_node_data = np.fromfile(node_name.replace(INT8_DSP.split(\"/\")[1],FP32_CPU.split(\"/\")[1]),dtype=np.float32)\n", - " else:\n", - " layer_new_name = node_name.replace(INT8_DSP.split(\"/\")[1],FP32_CPU.split(\"/\")[1])\n", - " layer_new_name = layer_new_name.replace(\"_converted_UFIXED_POINT_16\",\"\")\n", - " cpu_node_data = np.fromfile(layer_new_name,dtype=np.float32)\n", - " print(\"new_path :: \",layer_new_name)\n", - "# cpu_node_data = np.fromfile(node_name.replace(\"OUTPUT_8b_DSP\",\"OUTPUT_FP32_CPU\"),dtype=np.float32)\n", - " error = cosine_sim(htp_node_data,cpu_node_data)\n", - " list_htp_nodes_mse.append(error) # saving cosine similarity scores\n", - " final_node_names.append(node_name.split(\"Result_0\")[-1]) # saving layer names\n", - " plt.figure(figsize=(30, 32))\n", - " plt.plot(final_node_names,list_htp_nodes_mse)\n", - " plt.savefig(f\"{output_folder}/cosine_plot.png\")\n", - " plotting.output_file(f\"{output_folder}/cosine_plot.html\")\n", - " plot = plotting.figure(x_range=final_node_names, height=200, title=f\"rmse_plot_htp_vs_cpu\", x_axis_label=\"Layers\", y_axis_label=\"cosine\") \n", - " plot.x(final_node_names,list_htp_nodes_mse)\n", - " plot.line(final_node_names,list_htp_nodes_mse)\n", - " plot.xaxis.major_label_orientation = \"vertical\"\n", - " plot.sizing_mode = \"scale_width\"\n", - " plotting.save(plot)\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23c27573-a821-4ee6-81c5-868a61edcedd", - "metadata": {}, - "outputs": [], - "source": [ - "output_folder = 'plots_fp32_vs_int8'\n", - "os.makedirs(output_folder,exist_ok=True)\n", - "INT8_DSP = \"./OUTPUT_8b_DSP/Result_0\" # int8 dsp layer dump dir path\n", - "FP32_CPU = \"./OUTPUT_32b_CPU/Result_0\" # fp32 arm apu dump dir path\n", - "plot_layer_by_layer(INT8_DSP, FP32_CPU, output_folder)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "101527ad-faf7-45e5-9692-f55b0a8b332a", - "metadata": {}, - "outputs": [], - "source": [ - "def get_scale_offset(min_value,max_value,bw=8):\n", - " qmin = 0\n", - " qmax = (2**bw)-1\n", - " scale = (max_value - min_value) / (qmax - qmin)\n", - " initial_zero_point = qmin - min_value / scale\n", - " final_zero_point = 0;\n", - " if (initial_zero_point < qmin):\n", - " final_zero_point = qmin\n", - " elif (initial_zero_point > qmax):\n", - " final_zero_point = qmax\n", - " else:\n", - " final_zero_point = round(initial_zero_point)\n", - " return scale,-final_zero_point" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5434e5ae-69b9-4997-9215-51b5479df94a", - "metadata": {}, - "outputs": [], - "source": [ - "# output-685 (1131)\n", - "scale, offset = get_scale_offset(0.0,0.984115421772, 16)\n", - "print(\"Sigmoid layer : scale:\",scale,\" || offset:\",offset )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f43125ad-43f0-4790-80dd-212f1919fcc8", - "metadata": {}, - "outputs": [], - "source": [ - "#Output-647(1104)\n", - "scale, offset = get_scale_offset(0.0,1.380036115646, 16)\n", - "print(\"Sigmoid layer : scale:\",scale,\" || offset:\",offset )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e4b1553d-ce6f-47f7-a5ec-90e7c89266e5", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "snpe-onnx-to-dlc -i onnx-models/ssd_mobilenetV2_without_ABP-NMS.onnx -o dlc/ssd_mobilenetV2_without_ABP-NMS.dlc --quantization_overrides encoding_format.encodings \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6518f5c8-0b78-4892-b5dc-ac2fdf8a7d0c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "snpe-dlc-quantize --input_dlc dlc/ssd_mobilenetV2_without_ABP-NMS.dlc --override_params --output_dlc dlc/ssd_mobilenetV2_without_ABP-NMS_Q.dlc --input_list list.txt --enable_htp\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5e04033d-dcef-4d4f-a4e5-7fad45da0b57", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"../../../../../../snpe/snpe-2.12.0.230626\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"#rawfiles\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"SSD_MobileNetV2\"\n", - "os.environ['DLC32']=\"dlc/ssd_mobilenetV2_without_ABP-NMS.dlc\"\n", - "os.environ['DLC8']=\"dlc/ssd_mobilenetV2_without_ABP-NMS_Q.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"SSD_MobileNetV2\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"e4149b7f\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "39326516-f4ea-409c-80f7-4055d0b200a7", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e55d862e-87df-4c59-927b-8deb819d7e88", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v73/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2e48c62f-ef35-4781-a74a-ef7a16ef19fc", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f7b3c29-7fe1-4761-896f-b4075edf0918", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push raw /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "99dc112d-19a4-4eb6-9da6-4ffcfab58849", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=ssd_mobilenetV2_without_ABP-NMS_Q.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"SSD_MobileNetV2\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list list.txt --set_output_tensors 935,986 --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d9693667-b0f2-40c6-b76a-c7e710f50dfe", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=ssd_mobilenetV2_without_ABP-NMS.dlc\n", - "export ONDEVICE_FOLDER=\"SSD_MobileNetV2\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list list.txt --set_unconsumed_as_output --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2030a6c6-e04f-4d01-99e2-942559fb5c78", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "rm -rf OUTPUT_32b_CPU\n", - "rm -rf OUTPUT_8b_DSP\n", - "ls" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "42bf6d11-781a-45d2-a229-a8d29612a35d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d0131747-a910-44bf-bd6e-bf17ccdedb9a", - "metadata": {}, - "outputs": [], - "source": [ - "#000000329219\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_8b_DSP\"]\n", - "detection_results = [\"detection_results_32b_CPU\",\"detection_results_8b_DSP\"]\n", - "results = [\"prediction_results_32b_CPU\",\"prediction_results_8b_DSP\"]\n", - "f=open(\"list.txt\",\"r\")\n", - "r=0.31\n", - "input_shape = tuple(map(int,[320,320]))\n", - "input_list = f.readlines()\n", - "f.close()\n", - "for j in range(0,2):\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path and int(result_path.split(\"_\")[-1]) score_threshold -- boxes = boxes[indices] -- labels = labels[indices] -- scores = scores[indices] -- meters = ' | '.join( -- [ -- 'objects {:02d}'.format(len(boxes)), -- 'load {:03d}ms'.format(round(load_time * 1000)), -- 'inference {:03d}ms'.format(round(inference_time * 1000)), -- 'FPS {}'.format(round(1.0 / inference_time)) -- ] -- ) -- print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths), image_name, meters)) -- -- drawn_image = draw_boxes(image, boxes, labels, scores, class_names).astype(np.uint8) -- Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name)) -+ #result = model(images.to(device))[0] -+ boxes,scores = model(images.to(device)) -+ torch.onnx.export(model, (images), "ssd_mobilenetV2_without_ABP-NMS.onnx", verbose=True,do_constant_folding=True,opset_version=11,export_params=True) -+ with torch.no_grad(): -+ with torch.jit.optimized_execution(True): -+ traced = torch.jit.trace(model, (images)) -+ traced.save("ssd_mobilenetV2_without_ABP-NMS.pt") -+ print("Torch Trace file generated") -+ # image_bgr = cv2.imread(image_path, cv2.IMREAD_COLOR) -+ # for i in range(boxes.shape[0]): -+ # # print(scores[i][0], 0.0, scores[i][0] == 0.0) -+ # if scores[i] != 0.0: -+ # # print(i) -+ # label = str(int(labels[i])) -+ # cv2.rectangle(image_bgr, (boxes[i, 0], boxes[i, 1]), (boxes[i, 2], boxes[i, 3]), (255, 255, 0), 4) -+ # cv2.putText(image_bgr, label, -+ # (int(boxes[i, 0])+20, int(boxes[i, 1])+40), -+ # cv2.FONT_HERSHEY_SIMPLEX, -+ # 1, # font scale -+ # (255, 0, 255), -+ # 2) # line type -+ # img_file = os.path.join('ssd_mobilenetv2_'+str(j)+'.jpg') -+ # cv2.imwrite(img_file, image_bgr) -+ # inference_time = time.time() - start -+ -+ # result = result.resize((width, height)).to(cpu_device).numpy() -+ # boxes, labels, scores = result['boxes'], result['labels'], result['scores'] -+ -+ # indices = scores > score_threshold -+ # boxes = boxes[indices] -+ # labels = labels[indices] -+ # scores = scores[indices] -+ # meters = ' | '.join( -+ # [ -+ # 'objects {:02d}'.format(len(boxes)), -+ # 'load {:03d}ms'.format(round(load_time * 1000)), -+ # 'inference {:03d}ms'.format(round(inference_time * 1000)), -+ # 'FPS {}'.format(round(1.0 / inference_time)) -+ # ] -+ # ) -+ # print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths), image_name, meters)) -+ -+ # drawn_image = draw_boxes(image, boxes, labels, scores, class_names).astype(np.uint8) -+ # Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name)) - - - def main(): -diff --git a/demo/000342.jpg b/demo/000342.jpg -deleted file mode 100644 -index ecc5d74..0000000 -Binary files a/demo/000342.jpg and /dev/null differ -diff --git a/demo/000542.jpg b/demo/000542.jpg -deleted file mode 100644 -index d87e919..0000000 -Binary files a/demo/000542.jpg and /dev/null differ -diff --git a/demo/004101.jpg b/demo/004101.jpg -deleted file mode 100644 -index 9d79ca9..0000000 -Binary files a/demo/004101.jpg and /dev/null differ -diff --git a/demo/008591.jpg b/demo/008591.jpg -deleted file mode 100644 -index 4273f87..0000000 -Binary files a/demo/008591.jpg and /dev/null differ -diff --git a/ssd/config/defaults.py b/ssd/config/defaults.py -index 9b99438..6ef7398 100644 ---- a/ssd/config/defaults.py -+++ b/ssd/config/defaults.py -@@ -4,7 +4,7 @@ _C = CN() - - _C.MODEL = CN() - _C.MODEL.META_ARCHITECTURE = 'SSDDetector' --_C.MODEL.DEVICE = "cuda" -+_C.MODEL.DEVICE = "cpu" - # match default boxes to any ground truth with jaccard overlap higher than a threshold (0.5) - _C.MODEL.THRESHOLD = 0.5 - _C.MODEL.NUM_CLASSES = 21 -diff --git a/ssd/modeling/box_head/box_head.py b/ssd/modeling/box_head/box_head.py -index 582a570..55db632 100644 ---- a/ssd/modeling/box_head/box_head.py -+++ b/ssd/modeling/box_head/box_head.py -@@ -7,6 +7,7 @@ from ssd.modeling.box_head.box_predictor import make_box_predictor - from ssd.utils import box_utils - from .inference import PostProcessor - from .loss import MultiBoxLoss -+import torch - - - @registry.BOX_HEADS.register('SSDBoxHead') -@@ -43,7 +44,25 @@ class SSDBoxHead(nn.Module): - boxes = box_utils.convert_locations_to_boxes( - bbox_pred, self.priors, self.cfg.MODEL.CENTER_VARIANCE, self.cfg.MODEL.SIZE_VARIANCE - ) -+ device = torch.device("cpu") - boxes = box_utils.center_form_to_corner_form(boxes) -- detections = (scores, boxes) -- detections = self.post_processor(detections) -- return detections, {} -+ #detections = (scores, boxes) -+ # print("------SCORES------",scores.shape) -+ # num_boxes = scores.shape[1] -+ # num_classes = scores.shape[2] -+ -+ # boxes = boxes.view(num_boxes, 1, 4).expand(num_boxes, num_classes, 4) -+ # labels = torch.arange(num_classes, device=device) -+ # labels = labels.view(1, num_classes).expand_as(scores) -+ -+ # # remove predictions with the background label -+ # boxes = boxes[:, 1:] -+ # scores = scores[:, 1:] -+ # labels = labels[:, 1:] -+ -+ # # batch everything, by making every class prediction be a separate instance -+ # boxes = boxes.reshape(-1, 4) -+ # scores = scores.reshape(-1) -+ # labels = labels.reshape(-1) -+ #detections = self.post_processor(detections) -+ return boxes,scores#,labels -diff --git a/ssd/modeling/detector/ssd_detector.py b/ssd/modeling/detector/ssd_detector.py -index c43a4a6..6ae35e8 100644 ---- a/ssd/modeling/detector/ssd_detector.py -+++ b/ssd/modeling/detector/ssd_detector.py -@@ -13,7 +13,7 @@ class SSDDetector(nn.Module): - - def forward(self, images, targets=None): - features = self.backbone(images) -- detections, detector_losses = self.box_head(features, targets) -- if self.training: -- return detector_losses -- return detections -+ boxes,scores = self.box_head(features, targets) -+ # if self.training: -+ # return detector_losses -+ return boxes,scores#,labels diff --git a/models-for-solutions/03-object-detection/YOLOX/Accuracy_analyzer.ipynb b/models-for-solutions/03-object-detection/YOLOX/Accuracy_analyzer.ipynb deleted file mode 100644 index a76bcbd0..00000000 --- a/models-for-solutions/03-object-detection/YOLOX/Accuracy_analyzer.ipynb +++ /dev/null @@ -1,646 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "56eda6a2-9de6-412f-98db-22220e648202", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"#rawfiles\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"YoloX_updated\"\n", - "os.environ['DLC32']=\"yolox_FP32_2_15_1.dlc\"\n", - "os.environ['DLC8']=\"yolox_a8w8_2_15_1.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"YoloX_updated\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"728b7a92\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "67877ee1-1f86-4e1f-921b-4b11fb93c698", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "import glob\n", - "import os\n", - "import numpy as np\n", - "from PIL import Image\n", - "import re\n", - "import matplotlib.image as mpimg\n", - "import matplotlib.pyplot as plt" - ] - }, - { - "cell_type": "markdown", - "id": "c41dbb8f-1be9-4851-8c01-6650deead467", - "metadata": {}, - "source": [ - "### ONNX Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "89808597-57d1-4ea7-bf1f-6c9fd8af009b", - "metadata": {}, - "outputs": [], - "source": [ - "!wget https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_x.onnx" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9ea9e7a1-b75c-4cbf-83ba-8dd95a9cd900", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network yolox_x.onnx --output_path yolox_FP32_2_15_1.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "fc33f572-c7a6-4afb-bb7c-637720ef7e69", - "metadata": {}, - "source": [ - "### DLC Conversion\n", - "\n", - "- Please create the dlc models(FP32) from the generating_model.ipynb file\n", - "- After Creating the Raw file here we'll generate the INT8 Model" - ] - }, - { - "cell_type": "markdown", - "id": "563d7e6c-c86e-4d6b-87ef-d5b4b4847b8a", - "metadata": {}, - "source": [ - "# Preprocessing" - ] - }, - { - "cell_type": "markdown", - "id": "ece73914-fadb-47a1-9d33-7e7979817969", - "metadata": {}, - "source": [ - "### Getting the dataset\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7864346-c0fb-4df1-8307-6f45f07b8bd3", - "metadata": {}, - "outputs": [], - "source": [ - "# User needs to download the dataset of their choice here. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "04699474-fc2c-434d-a6a9-9af58eaf7b4a", - "metadata": {}, - "outputs": [], - "source": [ - "# dataset is huge to run on device , taking only 15 dataset\n", - "files = os.listdir('val2017')\n", - "for file in files[15:]:\n", - " os.remove(\"val2017/\"+file)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "141a43b2-4e5f-41d6-92fb-e288b24f6df1", - "metadata": {}, - "outputs": [], - "source": [ - "def preproc(img, input_size, swap=(2, 0, 1)):\n", - " if len(img.shape) == 3:\n", - " padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114\n", - " else:\n", - " padded_img = np.ones(input_size, dtype=np.uint8) * 114\n", - "\n", - " r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])\n", - " resized_img = cv2.resize(\n", - " img,\n", - " (int(img.shape[1] * r), int(img.shape[0] * r)),\n", - " interpolation=cv2.INTER_LINEAR,\n", - " ).astype(np.uint8)\n", - " padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img\n", - "\n", - " padded_img = padded_img.transpose(swap)\n", - " padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)\n", - " return padded_img, r" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8c50d53a-8e67-4b3e-b0f4-9c679de4f42c", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "import numpy as np\n", - "import os\n", - "dict={}\n", - "input_shape = tuple(map(int,[640,640])) \n", - "def detect(imgfile,i):\n", - " origimg = cv2.imread(imgfile)\n", - " img, ratio = preproc(origimg, input_shape)\n", - " index=imgfile.split(\"/\")[1].split(\".\")[0]\n", - " dict[index]=ratio\n", - " img=np.transpose(img,(1,2,0))\n", - " img.tofile(\"raw/\"+filenames[i].split(\".\")[0]+\".raw\")\n", - " \n", - "filenames = os.listdir(\"val2017\")\n", - "for i in range(0,len(filenames)):\n", - " if \"jpg\" in filenames[i].lower():\n", - " detect(\"val2017/\"+filenames[i],i)" - ] - }, - { - "cell_type": "markdown", - "id": "a9393573-8aa3-440e-b96d-4e3261e53499", - "metadata": {}, - "source": [ - "### Creating the list file" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "445bcf68-0697-4146-960b-5511028b0c62", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "find ./raw -name *.raw > list.txt" - ] - }, - { - "cell_type": "markdown", - "id": "da8890e6-e898-416a-ab91-e681fef2d874", - "metadata": {}, - "source": [ - "#### Creating the INT 8 Model After preparing the raw files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8dff13d1-49d8-438b-84c2-07c0565d8088", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc yolox_FP32_2_15_1.dlc --input_list list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc yolox_a8w8_2_15_1.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "markdown", - "id": "080f67f8-17a0-48be-9d84-6848de68c3e3", - "metadata": {}, - "source": [ - "#### Creating Bin and Lib Folder on Device " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4e900c4c-1377-4dd0-897e-def4c093e52b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "5965274a-afee-4b38-8d50-513f5fab97ad", - "metadata": {}, - "source": [ - "### Pushing all Lib and Bin files onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15822914-56dd-419e-b2e6-eaf19f349122", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "markdown", - "id": "69944d70-98ff-43c2-96cf-e1314607be8b", - "metadata": {}, - "source": [ - "### Pushing Artifacts on to Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6b3879d7-7ffb-4975-9971-4638467657ca", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c1a39528-3dde-4643-a7de-6f7431c820f1", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push raw /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "35977ce5-4f74-4fd2-a7a8-bbf210431b7e", - "metadata": {}, - "source": [ - "### Inferencing 8 bit DLC on DSP Runtime\n", - "* Give name of DLC in OUTPUT_DLC_QUANTIZED8 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5696a818-d3e5-470f-ad8a-119136ee9d4b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=yolox_a8w8_2_15_1.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"YoloX_updated\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list list.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "e7fceca6-2436-4db7-8a92-b1dce996f34d", - "metadata": {}, - "source": [ - "### Inferencing 32b DLC on CPU Runtime\n", - "Give name of DLC in OUTPUT_DLC_32 and ondevice folder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "42fee380-0c73-4579-9d81-4569c273918d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=yolox_FP32_2_15_1.dlc\n", - "export ONDEVICE_FOLDER=\"YoloX_updated\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list list.txt --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "7e1ab034-c136-42b5-a8b0-5fbfe09efacd", - "metadata": {}, - "source": [ - "### Pulling Output folder generated on different precision and cores" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5aa423e3-22ab-43e0-b2b1-9a19987ca5e0", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "058f2ad2-528e-4c9f-b95d-0c1547713c71", - "metadata": {}, - "source": [ - "# Post Processing\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1087e359-6c8b-4eb2-b9c2-36dac61347f1", - "metadata": {}, - "outputs": [], - "source": [ - "label2class = {'0': 'person', '1': 'bicycle', '2': 'car', '3': 'motorcycle', '4': 'airplane', '5': 'bus', \n", - " '6': 'train', '7': 'truck', '8': 'boat', '9': 'traffic', '10': 'fire', '11': 'stop', '12': 'parking', \n", - " '13': 'bench', '14': 'bird', '15': 'cat', '16': 'dog', '17': 'horse', '18': 'sheep', '19': 'cow', \n", - " '20': 'elephant', '21': 'bear', '22': 'zebra', '23': 'giraffe', '24': 'backpack', '25': 'umbrella', \n", - " '26': 'handbag', '27': 'tie', '28': 'suitcase', '29': 'frisbee', '30': 'skis', '31': 'snowboard', \n", - " '32': 'sports', '33': 'kite', '34': 'baseball', '35': 'baseball', '36': 'skateboard', '37': 'surfboard', \n", - " '38': 'tennis', '39': 'bottle', '40': 'wine', '41': 'cup', '42': 'fork', '43': 'knife', '44': 'spoon', \n", - " '45': 'bowl', '46': 'banana', '47': 'apple', '48': 'sandwich', '49': 'orange', '50': 'broccoli', \n", - " '51': 'carrot', '52': 'hot', '53': 'pizza', '54': 'donut', '55': 'cake', '56': 'chair', '57': 'couch', \n", - " '58': 'potted', '59': 'bed', '60': 'dining', '61': 'toilet', '62': 'tv', '63': 'laptop', '64': 'mouse', \n", - " '65': 'remote', '66': 'keyboard', '67': 'cell', '68': 'microwave', '69': 'oven', '70': 'toaster', \n", - " '71': 'sink', '72': 'refrigerator', '73': 'book', '74': 'clock', '75': 'vase', '76': 'scissors', \n", - " '77': 'teddy', '78': 'hair', '79': 'toothbrush'}\n", - "\n", - "print(list(label2class.values()))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13f91a41-f0eb-47cd-8186-31b1748f95c0", - "metadata": {}, - "outputs": [], - "source": [ - "detection_results = [\"detection_results_32b_CPU\",\"detection_results_8b_DSP\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0568f23c-88dc-4e87-95b8-08bea67f7853", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "mkdir -p \"detection_results_32b_CPU\"\n", - "mkdir -p \"detection_results_8b_DSP\"\n", - "mkdir -p \"prediction_results_32b_CPU\"\n", - "mkdir -p \"prediction_results_8b_DSP\"" - ] - }, - { - "cell_type": "markdown", - "id": "a0f11dd4-8fdb-4eae-9861-1dfba1a95cb1", - "metadata": {}, - "source": [ - "### Function to draw the bounding boxes" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "654feb24-55b9-4788-8758-1622295eb899", - "metadata": {}, - "outputs": [], - "source": [ - "from matplotlib import pyplot as plt\n", - "import cv2.dnn\n", - "import numpy as np\n", - "colors = np.random.uniform(0, 255, size=(len(list(label2class.values())), 3))\n", - "def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h):\n", - " label = f'{label2class[str(class_id)]}'\n", - " color = colors[class_id]\n", - " img = cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)\n", - " img = cv2.putText(img, label, (x +2, y -10), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color, 2)\n", - " return img" - ] - }, - { - "cell_type": "markdown", - "id": "54166c06-e7bb-45c2-bcde-4cd20fe97d71", - "metadata": {}, - "source": [ - "#### Actual Post Processing Code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "72277211-77c5-4119-8182-6c68f9006049", - "metadata": {}, - "outputs": [], - "source": [ - "def post_proc(output, originalImagePath, save_img_path,ratio):\n", - " #Initializing the lists\n", - " boxes_updated = []\n", - " scores_updated = []\n", - " class_ids = []\n", - " #Reading the actual image\n", - " original_image: np.ndarray = cv2.imread(originalImagePath)\n", - "\n", - " # Preprocessing the boxes and scores\n", - " #format of output is first 4 is the bounding boxes, 5th one is objectness score, last 80 column is score of each classes\n", - " boxes = output[:, :4]\n", - " scores = output[:, 4:5] * output[:, 5:]\n", - "\n", - " #Processing of bounding boxes\n", - " boxes_xyxy = np.ones_like(boxes)\n", - " boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2.\n", - " boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2.\n", - " boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2.\n", - " boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2.\n", - " boxes_xyxy /= ratio\n", - "\n", - " #For each prediction from 8400 predictions finding the results\n", - " for i in range(0, output.shape[0]):\n", - " (minScore, maxScore, minClassLoc, (x, maxClassIndex)) = cv2.minMaxLoc(scores[i])\n", - " if maxScore >= 0.2:\n", - " boxes_updated.append(boxes_xyxy[i])\n", - " scores_updated.append(float(maxScore))\n", - " class_ids.append(maxClassIndex)\n", - "\n", - " # Removing Overlapping predictions\n", - " result_boxes = cv2.dnn.NMSBoxes(boxes_updated, scores_updated, 0.20, 0.5, 0.5) #32b CPU\n", - " detections = []\n", - " img = original_image\n", - "\n", - " #For each prediction showing drawing the bounding boxes\n", - " for i in range(len(result_boxes)):\n", - " index = result_boxes[i]\n", - " box = boxes_updated[index]\n", - " detection = {\n", - " 'class_id': class_ids[index],\n", - " 'class_name': label2class[str(class_ids[index])],\n", - " 'confidence': scores_updated[index],\n", - " 'box': box\n", - " }\n", - " detections.append(detection)\n", - " img = draw_bounding_box(original_image, class_ids[index],detection['confidence'], int(box[0]), int(box[1]), int(box[2]), int(box[3])) \n", - " \n", - " cv2.imwrite(save_img_path, img)\n", - "\n", - "\n", - " str_pred = \"\"\n", - " for dic in detections:\n", - " str_pred+= dic['class_name']+\" \"+str(dic['confidence'])+\" \"+str(dic['box'][0])+\" \"+str(dic['box'][1])+\" \"+str(dic['box'][2])+\" \"+str(dic['box'][3])+\"\\n\"\n", - " return str_pred.strip()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24addea8-1ab7-46a6-a01b-3537e82abd5a", - "metadata": {}, - "outputs": [], - "source": [ - "def postprocess_helper(outputs, img_size, p6=False):\n", - " grids = []\n", - " expanded_strides = []\n", - " strides = [8, 16, 32] if not p6 else [8, 16, 32, 64]\n", - "\n", - " hsizes = [img_size[0] // stride for stride in strides]\n", - " wsizes = [img_size[1] // stride for stride in strides]\n", - "\n", - " for hsize, wsize, stride in zip(hsizes, wsizes, strides):\n", - " xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))\n", - " grid = np.stack((xv, yv), 2).reshape(1, -1, 2)\n", - " grids.append(grid)\n", - " shape = grid.shape[:2]\n", - " expanded_strides.append(np.full((*shape, 1), stride))\n", - "\n", - " grids = np.concatenate(grids, 1)\n", - " expanded_strides = np.concatenate(expanded_strides, 1)\n", - " outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides\n", - " outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides\n", - "\n", - " return outputs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10396ea1-83d0-4dca-92af-2823de7ce749", - "metadata": {}, - "outputs": [], - "source": [ - "#000000329219\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_8b_DSP\"]\n", - "detection_results = [\"detection_results_32b_CPU\",\"detection_results_8b_DSP\"]\n", - "results = [\"prediction_results_32b_CPU\",\"prediction_results_8b_DSP\"]\n", - "f=open(\"list.txt\",\"r\")\n", - "input_list = f.readlines()\n", - "input_shape=tuple([640,640])\n", - "f.close()\n", - "for j in range(0,len(folder)):\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path and int(result_path.split(\"_\")[-1]) Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - -## How to get the onnx model from opensource ? - -```python -!wget https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_x.onnx -``` - -# Accuracy analysis - -- To check accuracy please run "Accuracy_analyzer.ipynb" jupyter notebook. -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - -# References - -1. YOLOX Model paper: https://arxiv.org/abs/2107.08430 -2. https://github.com/Megvii-BaseDetection/YOLOX/ diff --git a/models-for-solutions/03-object-detection/detr_resnet101/README.md b/models-for-solutions/03-object-detection/detr_resnet101/README.md deleted file mode 100644 index 97a0d83a..00000000 --- a/models-for-solutions/03-object-detection/detr_resnet101/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# Object Detection with DETR Model - -| Field | Description | -| --- | --- | -| Model Name | DETR | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/facebookresearch/detr | -| Paper | NA | -| Accuracy Metric | box ap | - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - -## How to get the model ? - -For this demo, a ONNX version of DETR was used, execute this Python Script (generateModels.py). Once executed it will create models folder containing ONNX Model, -Quantized and Non-Quantized DLC. - - -# Accuracy analysis - -- To check accuracy please run "detr_resnet101-accuracy-analysis.ipynb" jupyter notebook. -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - -# References - -1. DETR Model paper: https://arxiv.org/pdf/2104.01318.pdf -2. https://huggingface.co/docs/transformers/model_doc/detr -3. 2017 Train Val dataset: http://images.cocodataset.org/annotations/annotations_trainval2017.zip diff --git a/models-for-solutions/03-object-detection/detr_resnet101/detr_resnet101-optimized-accuracy-analysis.ipynb b/models-for-solutions/03-object-detection/detr_resnet101/detr_resnet101-optimized-accuracy-analysis.ipynb deleted file mode 100644 index adaed924..00000000 --- a/models-for-solutions/03-object-detection/detr_resnet101/detr_resnet101-optimized-accuracy-analysis.ipynb +++ /dev/null @@ -1,572 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "b94b4143-7c12-4f89-9b4f-459f053b6050", - "metadata": {}, - "source": [ - "# Setting Up SDK Artifacts" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3f1748bf-bc9a-4a9f-9ece-55dcfa54705f", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"../../../../../snpe/2.15.1.230926/\" #set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw/\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"multiple\"\n", - "os.environ['DLC32']=\"models/detr_resnet101_fp32.dlc\" # Use the path to your non-quantized dlc\n", - "os.environ['DLC8']=\"models/detr_resnet101_w8a8.dlc\" # Use the path to your Quantized dlc\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\" # Use the name of the input file\n", - "os.environ['ONDEVICE_FOLDER']=\"aditya\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v75\"" - ] - }, - { - "cell_type": "markdown", - "id": "f140a00d-4a8e-419b-975a-a6a6810ac76e", - "metadata": {}, - "source": [ - "## Generate model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a6cd0a42-b02b-4013-88f3-8e417729c3f5", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import os\n", - "import shutil\n", - "import torch.nn as nn\n", - "model = torch.hub.load('facebookresearch/detr', 'detr_resnet101', pretrained=True)\n", - "model.eval()\n", - "dummy_input=torch.randn(1, 3, 800, 1066)\n", - "output = model(dummy_input)\n", - "print(output['pred_logits'].shape)\n", - "\n", - "class ModifiedModel(nn.Module):\n", - " def __init__(self):\n", - " super(ModifiedModel,self).__init__()\n", - " self.model = model\n", - " self.model.eval()\n", - " def forward(self,pixel_values):\n", - " output = self.model(pixel_values)\n", - " output['pred_logits'] = output['pred_logits'].softmax(-1)[0,:,:-1]\n", - " return output\n", - "customModel = ModifiedModel()\n", - "customModel.eval()\n", - "dummy_input=torch.randn(1, 3, 800, 1066)\n", - "output = customModel(dummy_input)\n", - "print(output['pred_logits'].shape)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "450758bd-a367-4d2c-9bb0-d2ea60704f04", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "05945e74-19fc-4f96-8fc3-a938769db6c0", - "metadata": {}, - "outputs": [], - "source": [ - "dummy_input=torch.randn(1, 3, 800, 1066)\n", - "\n", - "torch.onnx.export(customModel, dummy_input, \"models/detr_resnet101.onnx\", opset_version=11\n", - " , verbose=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0ef0d26e-d0b2-413f-99d9-fb9488edf021", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/detr_resnet101.onnx --output_path models/detr_resnet101_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "ae6259dc-4d98-4358-8456-1ea9e353f192", - "metadata": {}, - "source": [ - "## import libraries" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ac5270dc-c0c9-4e3f-8403-5ed4f92b66f1", - "metadata": {}, - "outputs": [], - "source": [ - "import math\n", - "import os\n", - "from PIL import Image\n", - "import requests\n", - "import matplotlib.pyplot as plt\n", - "import ipywidgets as widgets\n", - "from IPython.display import display, clear_output\n", - "import torch\n", - "import shutil\n", - "import numpy as np\n", - "from torch import nn\n", - "from torchvision.models import resnet50\n", - "import torchvision.transforms as T\n", - "torch.set_grad_enabled(False);\n", - "import os\n", - "import cv2\n", - "import numpy as np\n", - "from numpy import asarray\n", - "from PIL import Image\n", - "import glob\n", - "import torch.nn.functional as nnf\n", - "import subprocess\n", - "!pip3 install ipywidgets" - ] - }, - { - "cell_type": "markdown", - "id": "289295b0-7a9b-4de6-a02b-9435acf52897", - "metadata": {}, - "source": [ - "## Getting the Dataset and Preparation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d54d07d1-8210-480d-bec3-746e21c4c40d", - "metadata": {}, - "outputs": [], - "source": [ - "# User can download dataset of their choice for accuracy validation. \n", - "# User needs to follow the pre/post processing steps prescribed in dataset (or) given below. " - ] - }, - { - "cell_type": "markdown", - "id": "2e581c6c-3fe2-493a-a99f-26feda92c17b", - "metadata": {}, - "source": [ - "### Pre-Processing Steps of DETR Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5ddb6093-42b4-45c2-8c3e-d12d22d6381a", - "metadata": {}, - "outputs": [], - "source": [ - "# standard PyTorch mean-std input image normalization\n", - "transform = T.Compose([\n", - " T.Resize(800),\n", - " T.ToTensor(),\n", - " T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", - "])\n", - "\n", - "# for output bounding box post-processing\n", - "def box_cxcywh_to_xyxy(x):\n", - " x_c, y_c, w, h = x.unbind(1)\n", - " b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n", - " (x_c + 0.5 * w), (y_c + 0.5 * h)]\n", - " return torch.stack(b, dim=1)\n", - "\n", - "def rescale_bboxes(out_bbox, size):\n", - " img_w, img_h = size\n", - " b = box_cxcywh_to_xyxy(out_bbox)\n", - " b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)\n", - " return b\n", - "\n", - "def plot_results(pil_img, prob, boxes,Image_count):\n", - " fig=plt.figure(figsize=(8,8))\n", - " ax1=fig.add_subplot(2,2,3)\n", - " ax1.imshow(pil_img)\n", - " ax = plt.gca()\n", - " colors = COLORS * 100\n", - " for p, (xmin, ymin, xmax, ymax), c in zip(prob, boxes.tolist(), colors):\n", - " ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,\n", - " fill=False, color=c, linewidth=1))\n", - " cl = p.argmax()\n", - " text = f'{CLASSES[cl]}: {p[cl]:0.2f}'\n", - " ax.text(xmin, ymin, text, fontsize=10,\n", - " bbox=dict(alpha=0.5))\n", - " plt.savefig(str(Image_count)+\".jpg\")\n", - " if Image_count%2==0:\n", - " shutil.move(str(Image_count)+\".jpg\",\"output/CPU\")\n", - " else:\n", - " shutil.move(str(Image_count)+\".jpg\",\"output/DSP\")\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "id": "dfc8c1b1-55b7-4cd5-b26a-cf17d737e1ae", - "metadata": {}, - "source": [ - "### Steps to create raw images" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d14b0aba-9d06-4f91-98d5-4a17b2127c7e", - "metadata": {}, - "outputs": [], - "source": [ - "name=\"raw\"\n", - "os.system('mkdir ' + name)\n", - "def detect(imgfile,i):\n", - " #getting the actual image\n", - " origimg = Image.open(imgfile)\n", - " #Transforming the image\n", - " img = transform(origimg).unsqueeze(0)\n", - "\n", - " img= nnf.interpolate(img, size=(800, 1066), mode='bicubic', align_corners=False)\n", - " \n", - " img_to_save=img.numpy().transpose(0,2,3,1).astype(np.float32)\n", - " \n", - " img_to_save.tofile(\"raw/\"+filenames[i].split(\".\")[0]+\".raw\")\n", - " \n", - "filenames = os.listdir(\"val2017\")\n", - "for i in range(0,len(filenames)):\n", - " if \"jpg\" in filenames[i].lower():\n", - " detect(\"val2017/\"+filenames[i],i)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5992e8ce-c6df-4169-979e-b000e7943fe1", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "find ./raw -name *.raw > list.txt" - ] - }, - { - "cell_type": "markdown", - "id": "57404ad6-9bcf-4254-9ae7-902e34b80196", - "metadata": {}, - "source": [ - "### Getting the Quantized Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d2e6d39d-06ce-4c5b-a5c5-1a08a51fd491", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc models/detr_resnet101_fp32.dlc --input_list list.txt --output_dlc models/detr_resnet101_w8a8.dlc --enable_htp" - ] - }, - { - "cell_type": "markdown", - "id": "d8561ebf-c06a-4602-9136-84dfde6813d7", - "metadata": {}, - "source": [ - "## Creating Bin and Lib Folder On Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6ed7817b-0574-48b6-8c8e-3184f1ca93e0", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#source throughput.sh >>dump.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "55bb8d16-46a7-46fc-835b-0bb53b1a5a66", - "metadata": {}, - "source": [ - "# Pusing All Bin and Lib Files on to Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "747c5228-fb41-4ba7-b1bf-b1b258dadcba", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin" - ] - }, - { - "cell_type": "markdown", - "id": "a97c7322-6293-45ba-84b0-f48ff22edb67", - "metadata": {}, - "source": [ - "# Pushing Artifacts onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6b44ca30-4fd9-48a8-a3c5-a735a90defd2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "b45da446-097d-452e-b9bf-289128391982", - "metadata": {}, - "source": [ - "# Inferencing 8-bit DLC onto DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "354d6786-6fd5-4860-8159-e5729d405e19", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export SNPE_TARGET_ARCH=aarch64-android\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export DLC8=detr_resnet101_w8a8.dlc\n", - "export ONDEVICE_FOLDER=\"aditya\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $DLC8 --input_list list.txt --set_output_tensors 5863,5870 --output_dir=OUTPUT_8b_DSP --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "742e8fed-d3af-4938-b3fb-79d7ffe93b79", - "metadata": {}, - "source": [ - "# Inferencing 32-bit DLC onto CPU Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4670f413-3cb2-4fd6-8592-ab5b787b47ef", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export SNPE_TARGET_ARCH=aarch64-android\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export DLC32=detr_resnet101_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"aditya\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $DLC32 --input_list list.txt --set_output_tensors 5863,5870 --output_dir=OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "352f272d-4641-4f89-b622-6ab769af29d8", - "metadata": {}, - "source": [ - "# Pulling output folder generated on different Precision and Cores" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9744d5da-3c50-47c6-b354-ff878cc58288", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "cb6a41e6-cb53-4a48-ad3f-847432495928", - "metadata": {}, - "source": [ - "## Post Processing the Inferenced data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bad9b2db-7b39-4a2c-8dda-b3ac55206e50", - "metadata": {}, - "outputs": [], - "source": [ - "# Sample list of classes\n", - "CLASSES = [\n", - " 'N/A', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n", - " 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A',\n", - " 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n", - " 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack',\n", - " 'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n", - " 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n", - " 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass',\n", - " 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n", - " 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n", - " 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 'N/A',\n", - " 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',\n", - " 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A',\n", - " 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',\n", - " 'toothbrush'\n", - "]\n", - "\n", - "# colors for visualization\n", - "COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],\n", - " [0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4f7ba131-f92b-4538-a884-e7d7029283d8", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "Image_Paths=[]\n", - "\n", - "with open('list.txt', 'r') as f:\n", - " for line in f:\n", - " Image_Paths.append(line.strip().split(\"/\")[-1].split(\".\")[0])\n", - "\n", - "\n", - "count=Image_count=0\n", - "if os.path.exists(\"output\")==False:\n", - " os.mkdir(\"output\")\n", - "if os.path.exists(\"output/CPU\")==False:\n", - " os.mkdir(\"output/CPU\")\n", - "if os.path.exists(\"output/DSP\")==False:\n", - " os.mkdir(\"output/DSP\")\n", - "for image in Image_Paths:\n", - " image_path = 'val2017/'+image+\".jpg\"\n", - " im = Image.open(image_path)\n", - " file1 = 'OUTPUT_32b_CPU/Result_' + str(count) + '/5870.raw'\n", - " file2 = 'OUTPUT_32b_CPU/Result_' + str(count) + '/5863.raw'\n", - " file3 = 'OUTPUT_8b_DSP/Result_' + str(count) + '/5870.raw'\n", - " file4 = 'OUTPUT_8b_DSP/Result_' + str(count) + '/5863.raw'\n", - " a=np.fromfile(file1,np.float32)\n", - " a=a.reshape(100,91)\n", - " tensor_a = torch.from_numpy(a)\n", - " b=np.fromfile(file2,np.float32)\n", - " b=b.reshape(1,100,4)\n", - " tensor_b = torch.from_numpy(b)\n", - "\n", - " c=np.fromfile(file3,np.float32)\n", - " c=c.reshape(100,91)\n", - " tensor_c = torch.from_numpy(c)\n", - " d=np.fromfile(file4,np.float32)\n", - " d=d.reshape(1,100,4)\n", - " tensor_d = torch.from_numpy(d)\n", - "\n", - "\n", - " \n", - " probas = tensor_a\n", - " keep = probas.max(-1).values > 0.9\n", - " bboxes_scaled = rescale_bboxes(tensor_b[0, keep], im.size)\n", - " print(\"CPU FP32 Inference Result\")\n", - " plot_results(im, probas[keep], bboxes_scaled,Image_count)\n", - " Image_count=Image_count+1\n", - "\n", - " probas = tensor_c\n", - " keep = probas.max(-1).values > 0.9\n", - " bboxes_scaled = rescale_bboxes(tensor_d[0, keep], im.size)\n", - " print(\"DSP INT8 Inference Result\")\n", - " plot_results(im, probas[keep], bboxes_scaled,Image_count)\n", - " Image_count=Image_count+1\n", - " count=count+1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11e0e3a6-c33d-4320-b61d-e224c85e6438", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "83386c6f-7c23-4bfb-af4e-8e91d90e129d", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/03-object-detection/detr_resnet101/detr_resnet101-original-accuracy-analysis.ipynb b/models-for-solutions/03-object-detection/detr_resnet101/detr_resnet101-original-accuracy-analysis.ipynb deleted file mode 100644 index e759bd40..00000000 --- a/models-for-solutions/03-object-detection/detr_resnet101/detr_resnet101-original-accuracy-analysis.ipynb +++ /dev/null @@ -1,472 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "b94b4143-7c12-4f89-9b4f-459f053b6050", - "metadata": {}, - "source": [ - "# Setting Up SDK Artifacts" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3f1748bf-bc9a-4a9f-9ece-55dcfa54705f", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\" #set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw/\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"multiple\"\n", - "os.environ['DLC32']=\"models/detr_resnet101_fp32.dlc\" # Use the path to your non-quantized dlc\n", - "os.environ['DLC8']=\"models/detr_resnet101_w8a8.dlc\" # Use the path to your Quantized dlc\n", - "os.environ['TARGET_INPUT_LIST']=\"list.txt\" # Use the name of the input file\n", - "os.environ['ONDEVICE_FOLDER']=\"aditya\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v75\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ac5270dc-c0c9-4e3f-8403-5ed4f92b66f1", - "metadata": {}, - "outputs": [], - "source": [ - "import math\n", - "import os\n", - "from PIL import Image\n", - "import requests\n", - "import matplotlib.pyplot as plt\n", - "import ipywidgets as widgets\n", - "from IPython.display import display, clear_output\n", - "import torch\n", - "import shutil\n", - "import numpy as np\n", - "from torch import nn\n", - "from torchvision.models import resnet50\n", - "import torchvision.transforms as T\n", - "torch.set_grad_enabled(False);\n", - "import os\n", - "import cv2\n", - "import numpy as np\n", - "from numpy import asarray\n", - "from PIL import Image\n", - "import glob\n", - "import torch.nn.functional as nnf\n", - "import subprocess\n", - "!pip3 install ipywidgets" - ] - }, - { - "cell_type": "markdown", - "id": "289295b0-7a9b-4de6-a02b-9435acf52897", - "metadata": {}, - "source": [ - "## Getting the Dataset and Preparation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d54d07d1-8210-480d-bec3-746e21c4c40d", - "metadata": {}, - "outputs": [], - "source": [ - "# User needs to download the dataset of their choice, \n", - "# and follow the pre/post processing steps prescribed for that dataset. " - ] - }, - { - "cell_type": "markdown", - "id": "2e581c6c-3fe2-493a-a99f-26feda92c17b", - "metadata": {}, - "source": [ - "### Pre-Processing Steps of DETR Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5ddb6093-42b4-45c2-8c3e-d12d22d6381a", - "metadata": {}, - "outputs": [], - "source": [ - "# standard PyTorch mean-std input image normalization\n", - "transform = T.Compose([\n", - " T.Resize(800),\n", - " T.ToTensor(),\n", - " T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", - "])\n", - "\n", - "# for output bounding box post-processing\n", - "def box_cxcywh_to_xyxy(x):\n", - " x_c, y_c, w, h = x.unbind(1)\n", - " b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n", - " (x_c + 0.5 * w), (y_c + 0.5 * h)]\n", - " return torch.stack(b, dim=1)\n", - "\n", - "def rescale_bboxes(out_bbox, size):\n", - " img_w, img_h = size\n", - " b = box_cxcywh_to_xyxy(out_bbox)\n", - " b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)\n", - " return b\n", - "\n", - "def plot_results(pil_img, prob, boxes,Image_count):\n", - " fig=plt.figure(figsize=(8,8))\n", - " ax1=fig.add_subplot(2,2,3)\n", - " ax1.imshow(pil_img)\n", - " ax = plt.gca()\n", - " colors = COLORS * 100\n", - " for p, (xmin, ymin, xmax, ymax), c in zip(prob, boxes.tolist(), colors):\n", - " ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,\n", - " fill=False, color=c, linewidth=1))\n", - " cl = p.argmax()\n", - " text = f'{CLASSES[cl]}: {p[cl]:0.2f}'\n", - " ax.text(xmin, ymin, text, fontsize=10,\n", - " bbox=dict(alpha=0.5))\n", - " plt.savefig(str(Image_count)+\".jpg\")\n", - " if Image_count%2==0:\n", - " shutil.move(str(Image_count)+\".jpg\",\"output/CPU\")\n", - " else:\n", - " shutil.move(str(Image_count)+\".jpg\",\"output/DSP\")\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "id": "dfc8c1b1-55b7-4cd5-b26a-cf17d737e1ae", - "metadata": {}, - "source": [ - "### Steps to create raw images" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d14b0aba-9d06-4f91-98d5-4a17b2127c7e", - "metadata": {}, - "outputs": [], - "source": [ - "name=\"raw\"\n", - "os.system('mkdir ' + name)\n", - "def detect(imgfile,i):\n", - " #getting the actual image\n", - " origimg = Image.open(imgfile)\n", - " #Transforming the image\n", - " img = transform(origimg).unsqueeze(0)\n", - "\n", - " img= nnf.interpolate(img, size=(800, 1066), mode='bicubic', align_corners=False)\n", - " \n", - " img_to_save=img.numpy().transpose(0,2,3,1).astype(np.float32)\n", - " \n", - " img_to_save.tofile(\"raw/\"+filenames[i].split(\".\")[0]+\".raw\")\n", - " \n", - "filenames = os.listdir(\"val2017\")\n", - "for i in range(0,len(filenames)):\n", - " if \"jpg\" in filenames[i].lower():\n", - " detect(\"val2017/\"+filenames[i],i)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5992e8ce-c6df-4169-979e-b000e7943fe1", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "find ./raw -name *.raw > list.txt" - ] - }, - { - "cell_type": "markdown", - "id": "57404ad6-9bcf-4254-9ae7-902e34b80196", - "metadata": {}, - "source": [ - "### Getting the Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d2e6d39d-06ce-4c5b-a5c5-1a08a51fd491", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "python generateModels.py" - ] - }, - { - "cell_type": "markdown", - "id": "d8561ebf-c06a-4602-9136-84dfde6813d7", - "metadata": {}, - "source": [ - "## Creating Bin and Lib Folder On Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6ed7817b-0574-48b6-8c8e-3184f1ca93e0", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#source throughput.sh >>dump.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "55bb8d16-46a7-46fc-835b-0bb53b1a5a66", - "metadata": {}, - "source": [ - "# Pusing All Bin and Lib Files on to Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "747c5228-fb41-4ba7-b1bf-b1b258dadcba", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin" - ] - }, - { - "cell_type": "markdown", - "id": "a97c7322-6293-45ba-84b0-f48ff22edb67", - "metadata": {}, - "source": [ - "# Pushing Artifacts onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6b44ca30-4fd9-48a8-a3c5-a735a90defd2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "b45da446-097d-452e-b9bf-289128391982", - "metadata": {}, - "source": [ - "# Inferencing 8-bit DLC onto DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "354d6786-6fd5-4860-8159-e5729d405e19", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export SNPE_TARGET_ARCH=aarch64-android\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export DLC8=detr_resnet101_w8a8.dlc\n", - "export ONDEVICE_FOLDER=\"aditya\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $DLC8 --input_list list.txt --set_output_tensors 5863,5862 --output_dir=OUTPUT_8b_DSP --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "742e8fed-d3af-4938-b3fb-79d7ffe93b79", - "metadata": {}, - "source": [ - "# Inferencing 32-bit DLC onto CPU Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4670f413-3cb2-4fd6-8592-ab5b787b47ef", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export SNPE_TARGET_ARCH=aarch64-android\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export DLC32=detr_resnet101_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"aditya\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $DLC32 --input_list list.txt --set_unconsumed_as_output --output_dir=OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "352f272d-4641-4f89-b622-6ab769af29d8", - "metadata": {}, - "source": [ - "# Pulling output folder generated on different Precision and Cores" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9744d5da-3c50-47c6-b354-ff878cc58288", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "cb6a41e6-cb53-4a48-ad3f-847432495928", - "metadata": {}, - "source": [ - "## Post Processing the Inferenced data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bad9b2db-7b39-4a2c-8dda-b3ac55206e50", - "metadata": {}, - "outputs": [], - "source": [ - "# Sample list of classes in a dataset\n", - "CLASSES = [\n", - " 'N/A', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n", - " 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A',\n", - " 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n", - " 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack',\n", - " 'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n", - " 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n", - " 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass',\n", - " 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n", - " 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n", - " 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 'N/A',\n", - " 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',\n", - " 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A',\n", - " 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',\n", - " 'toothbrush'\n", - "]\n", - "\n", - "# colors for visualization\n", - "COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],\n", - " [0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4f7ba131-f92b-4538-a884-e7d7029283d8", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "Image_Paths=[]\n", - "\n", - "with open('list.txt', 'r') as f:\n", - " for line in f:\n", - " Image_Paths.append(line.strip().split(\"/\")[-1].split(\".\")[0])\n", - "\n", - "\n", - "count=Image_count=0\n", - "if os.path.exists(\"output\")==False:\n", - " os.mkdir(\"output\")\n", - "if os.path.exists(\"output/CPU\")==False:\n", - " os.mkdir(\"output/CPU\")\n", - "if os.path.exists(\"output/DSP\")==False:\n", - " os.mkdir(\"output/DSP\")\n", - "for image in Image_Paths:\n", - " image_path = 'val2017/'+image+\".jpg\"\n", - " im = Image.open(image_path)\n", - " file1 = 'OUTPUT_32b_CPU/Result_' + str(count) + '/5862.raw'\n", - " file2 = 'OUTPUT_32b_CPU/Result_' + str(count) + '/5863.raw'\n", - " file3 = 'OUTPUT_8b_DSP/Result_' + str(count) + '/5862.raw'\n", - " file4 = 'OUTPUT_8b_DSP/Result_' + str(count) + '/5863.raw'\n", - " a=np.fromfile(file1,np.float32)\n", - " a=a.reshape(1,100,92)\n", - " tensor_a = torch.from_numpy(a)\n", - " b=np.fromfile(file2,np.float32)\n", - " b=b.reshape(1,100,4)\n", - " tensor_b = torch.from_numpy(b)\n", - "\n", - " c=np.fromfile(file3,np.float32)\n", - " c=c.reshape(1,100,92)\n", - " tensor_c = torch.from_numpy(c)\n", - " d=np.fromfile(file4,np.float32)\n", - " d=d.reshape(1,100,4)\n", - " tensor_d = torch.from_numpy(d)\n", - "\n", - "\n", - " \n", - " probas = tensor_a.softmax(-1)[0, :, :-1]\n", - " keep = probas.max(-1).values > 0.9\n", - " bboxes_scaled = rescale_bboxes(tensor_b[0, keep], im.size)\n", - " print(\"CPU FP32 Inference Result\")\n", - " plot_results(im, probas[keep], bboxes_scaled,Image_count)\n", - " Image_count=Image_count+1\n", - "\n", - " probas = tensor_c.softmax(-1)[0, :, :-1]\n", - " keep = probas.max(-1).values > 0.9\n", - " bboxes_scaled = rescale_bboxes(tensor_d[0, keep], im.size)\n", - " print(\"DSP INT8 Inference Result\")\n", - " plot_results(im, probas[keep], bboxes_scaled,Image_count)\n", - " Image_count=Image_count+1\n", - " count=count+1" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/03-object-detection/detr_resnet101/generateModels.py b/models-for-solutions/03-object-detection/detr_resnet101/generateModels.py deleted file mode 100644 index 45326547..00000000 --- a/models-for-solutions/03-object-detection/detr_resnet101/generateModels.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- mode: python -*- -# ============================================================================= -# @@-COPYRIGHT-START-@@ -# -# Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# @@-COPYRIGHT-END-@@ -# ============================================================================= - -import torch -import os -import shutil -#print(torch.hub.list('facebookresearch/detr')) -model = torch.hub.load('facebookresearch/detr', 'detr_resnet101', pretrained=True) -dummy_input=torch.randn(1, 3, 800, 1066) -if os.path.exists('models')==False: - os.mkdir('models') -else: - shutil.rmtree('models') - os.mkdir('models') - print("Folder Already exists") -torch.onnx.export(model, dummy_input, "models/detr_resnet101.onnx", opset_version=11, verbose=False) -print("ONNX Model saved Successfully") -command1="snpe-onnx-to-dlc --input_network models/detr_resnet101.onnx --output_path models/detr_resnet101_fp32.dlc" -os.system(command1) -command2="snpe-dlc-quantize --input_dlc models/detr_resnet101_fp32.dlc --input_list list.txt --output_dlc models/detr_resnet101_w8a8.dlc --enable_htp" -os.system(command2) - diff --git a/models-for-solutions/03-object-detection/yolonas/README.md b/models-for-solutions/03-object-detection/yolonas/README.md deleted file mode 100644 index 58fff93b..00000000 --- a/models-for-solutions/03-object-detection/yolonas/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# Object Detection with YOLOX Model - -| Field | Description | -| --- | --- | -| Model Name | Yolo NAS | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/Deci-AI/super-gradients/ | -| Paper | https://arxiv.org/abs/2304.00501 | -| Accuracy Metric | mAP | - -## Pre-requisites - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - -## How to get the onnx model from opensource ? -- Install super_gradients version=3.1.2 -```python -from super_gradients.training import models -model = models.get(Models.YOLO_NAS_S, pretrained_weights="coco") -``` - -# Accuracy analysis - -- To check accuracy please run "Accuracy_analyzer.ipynb" jupyter notebook. -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - -# References - -1. YOLO-nas Model paper:https://arxiv.org/abs/2304.00501 -2. https://github.com/Megvii-BaseDetection/YOLOX/tree/main -3. https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md diff --git a/models-for-solutions/03-object-detection/yolonas/object_detection_yolonas.ipynb b/models-for-solutions/03-object-detection/yolonas/object_detection_yolonas.ipynb deleted file mode 100644 index 42047a66..00000000 --- a/models-for-solutions/03-object-detection/yolonas/object_detection_yolonas.ipynb +++ /dev/null @@ -1,603 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "039733ac-47fb-4ab3-a842-1013a2e1dea3", - "metadata": {}, - "source": [ - "# Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ca28e659-dd04-4f14-9a04-c8fbf3f0e33a", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['DLC32']=\"models/yolo_nas_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/yolo_nas_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"yolonas\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #change with your device-id. Use command \"adb devices\" to get devices names.\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1ea7c3bb-9f35-4d1c-84b7-c5de30ec895e", - "metadata": {}, - "outputs": [], - "source": [ - "## Note- Use python3.8 or above for generating onnx\n", - "!pip install super-gradients==3.1.2\n", - "import torch\n", - "from super_gradients.training import models\n", - "from super_gradients.common.object_names import Models\n", - "import cv2\n", - "import numpy as np\n", - "import os" - ] - }, - { - "cell_type": "markdown", - "id": "85db567b-85d4-4565-9eb5-c29cde94996a", - "metadata": {}, - "source": [ - "## Getting The dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fceaa07b-a67f-41df-85f4-f5b0c29effde", - "metadata": {}, - "outputs": [], - "source": [ - "# User needs to download the dataset of their choice. " - ] - }, - { - "cell_type": "markdown", - "id": "1228d5e0-beb1-4033-be62-165dcdd3bd02", - "metadata": {}, - "source": [ - "## Getting the ONNX Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dadb32d9-064e-428e-bb3e-270765df79da", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c6522d80-61d2-47df-a84a-69cf802dc1f3", - "metadata": {}, - "outputs": [], - "source": [ - "model = models.get(Models.YOLO_NAS_S, pretrained_weights=\"coco\")\n", - "# Prpare model for conversion\n", - "# Input size is in format of [Batch x Channels x Width x Height] where 640 is the standard dataset dimensions\n", - "model.eval()\n", - "model.prep_model_for_conversion(input_size=[1, 3, 320, 320])\n", - "# Create dummy_input\n", - "dummy_input = torch.randn([1, 3, 320, 320], device=\"cpu\")\n", - "# Convert model to onnx\n", - "torch.onnx.export(model, dummy_input, \"models/yolo_nas_s.onnx\", opset_version=11)" - ] - }, - { - "cell_type": "markdown", - "id": "c672610a-f036-4fab-8837-2d83dbe051b7", - "metadata": {}, - "source": [ - "#### Getting the FP32 Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "09ce77df-5e7c-4bcc-896c-023eec610993", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i models/yolo_nas_s.onnx -o models/yolo_nas_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "5a9e015e-d724-4178-9630-c74c263d0a7a", - "metadata": {}, - "source": [ - "## Preprocessing" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7339803b-968a-403e-b914-0c3825c81605", - "metadata": {}, - "outputs": [], - "source": [ - "def preprocess(original_image):\n", - " resized_image = cv2.resize(original_image, (320, 320))\n", - " resized_image = resized_image/255\n", - " return resized_image\n", - "##Please test download and give the path here\n", - "dataset_path = \"val2017/\"\n", - "!mkdir -p rawYoloNAS\n", - "filenames=[]\n", - "for path in os.listdir(dataset_path):\n", - " # check if current path is a file\n", - " if os.path.isfile(os.path.join(dataset_path, path)):\n", - " filenames.append(os.path.join(dataset_path, path))\n", - "for filename in filenames:\n", - " original_image = cv2.imread(filename)\n", - " img = preprocess(original_image)\n", - " img = img.astype(np.float32)\n", - " img.tofile(\"raw/\"+filename.split(\"/\")[-1].split(\".\")[0]+\".raw\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9acc7241-3049-48b0-bdc3-80df6049d28c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "find raw -name *.raw > input.txt" - ] - }, - { - "cell_type": "markdown", - "id": "09838622-c04b-4357-b5a6-361f955b4f65", - "metadata": {}, - "source": [ - "## Quantize the DLC" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "918fe137-ea15-461c-9cd0-69ce0480ffb5", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc models/yolo_nas_fp32.dlc --input_list input.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc models/yolo_nas_w8a8.dlc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "928a7554-64c1-4000-be34-80e20e8cef22", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "adb devices" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "752cc7dd-1fea-4204-a3d6-2921b4b4f956", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b3e61748-bbf7-4eb7-b4bb-55dd104b4a13", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6abb9a55-b105-4c54-ae1e-fc26e38cb2c7", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2ec4921c-3092-4759-8154-268744fa8979", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fe662871-0e92-4b8a-b6db-2c837af60b75", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=yolo_nas_w8a8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"yolonas\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --set_output_tensors 885,877 --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7fd0a0d3-7d37-43d3-9913-a42979e89ba2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=yolo_nas_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"yolonas\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --set_output_tensors 885,877 --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8a55bac6-d466-4e93-b890-8e8819a2873a", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output',exist_ok=True)" - ] - }, - { - "cell_type": "markdown", - "id": "1b73bf4a-e8d3-4205-b722-fa7a3bc3733d", - "metadata": {}, - "source": [ - "## Pull output from device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fe1ffbb7-63e9-4072-a9cf-b334b1f27573", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f33badd2-2ac5-435e-9796-853b74deb570", - "metadata": {}, - "outputs": [], - "source": [ - "def ImageNames():\n", - " inputlist = open('input.txt', 'r')\n", - " Lines = inputlist.readlines()\n", - " count = 0\n", - " imageList = []\n", - " for line in Lines:\n", - " name = line.split(\"/\",1)[1]\n", - " name = name.split('.')[0]\n", - " imageList.append(name)\n", - " count += 1\n", - " return imageList\n", - "imageList = ImageNames()\n", - "print((imageList))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4323ab83-b567-4237-9583-a2226c78737e", - "metadata": {}, - "outputs": [], - "source": [ - "label2class = {'0': 'person', '1': 'bicycle', '2': 'car', '3': 'motorcycle', '4': 'airplane', '5': 'bus', \n", - " '6': 'train', '7': 'truck', '8': 'boat', '9': 'traffic', '10': 'fire', '11': 'stop', '12': 'parking', \n", - " '13': 'bench', '14': 'bird', '15': 'cat', '16': 'dog', '17': 'horse', '18': 'sheep', '19': 'cow', \n", - " '20': 'elephant', '21': 'bear', '22': 'zebra', '23': 'giraffe', '24': 'backpack', '25': 'umbrella', \n", - " '26': 'handbag', '27': 'tie', '28': 'suitcase', '29': 'frisbee', '30': 'skis', '31': 'snowboard', \n", - " '32': 'sports', '33': 'kite', '34': 'baseball', '35': 'baseball', '36': 'skateboard', '37': 'surfboard', \n", - " '38': 'tennis', '39': 'bottle', '40': 'wine', '41': 'cup', '42': 'fork', '43': 'knife', '44': 'spoon', \n", - " '45': 'bowl', '46': 'banana', '47': 'apple', '48': 'sandwich', '49': 'orange', '50': 'broccoli', \n", - " '51': 'carrot', '52': 'hot', '53': 'pizza', '54': 'donut', '55': 'cake', '56': 'chair', '57': 'couch', \n", - " '58': 'potted', '59': 'bed', '60': 'dining', '61': 'toilet', '62': 'tv', '63': 'laptop', '64': 'mouse', \n", - " '65': 'remote', '66': 'keyboard', '67': 'cell', '68': 'microwave', '69': 'oven', '70': 'toaster', \n", - " '71': 'sink', '72': 'refrigerator', '73': 'book', '74': 'clock', '75': 'vase', '76': 'scissors', \n", - " '77': 'teddy', '78': 'hair', '79': 'toothbrush'}" - ] - }, - { - "cell_type": "markdown", - "id": "a53dfe36-f8ca-4c2a-ab50-e2fbeeb1e06d", - "metadata": {}, - "source": [ - "## Post Processing " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "97cad2e6-0a63-48bf-b257-45631ab6b06e", - "metadata": {}, - "outputs": [], - "source": [ - "from matplotlib import pyplot as plt\n", - "import cv2.dnn\n", - "import numpy as np\n", - "colors = np.random.uniform(0, 255, size=(len(list(label2class.values())), 3))\n", - "def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h):\n", - " label = f'{label2class[str(class_id)]} ({confidence:.2f})'\n", - " color = colors[class_id]\n", - " img = cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 1)\n", - " img = cv2.putText(img, label, (x +2, y -10), cv2.FONT_HERSHEY_TRIPLEX, 1, color, 4)\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "935cbdd2-6a66-4ede-bb83-0cfd5347aee7", - "metadata": {}, - "outputs": [], - "source": [ - "def postProc(filename, output1, output2, out_path):\n", - "\n", - " output1_reshape = output1.reshape(2100,4)\n", - " output2_reshape = output2.reshape(2100,80)\n", - " output = output2_reshape\n", - " \n", - " original_image: np.ndarray = cv2.imread(filename)\n", - " ratio_1 = original_image.shape[0]/320\n", - " ratio_2 = original_image.shape[1]/320\n", - " \n", - " boxes = []\n", - " scores = []\n", - " class_ids = []\n", - " \n", - " for i in range(0, output.shape[0]):\n", - " classes_scores = output[i]\n", - " (minScore, maxScore, minClassLoc, (x, maxClassIndex)) = cv2.minMaxLoc(classes_scores)\n", - " if maxScore >= 0.05:\n", - " x = round(output1_reshape[i][0]) ; y = round(output1_reshape[i][1]); \n", - " w = round(output1_reshape[i][2]) ; h = round(output1_reshape[i][3]);\n", - " \n", - " x1, y1 = x, y\n", - " x2, y2 = w, h\n", - " box = [x1, y1, x2, y2]\n", - " boxes.append(box)\n", - " scores.append(float(maxScore))\n", - " class_ids.append(maxClassIndex)\n", - " if(len(boxes)==704) or len(boxes)== 693:\n", - " print(\"i = \",i)\n", - " print(\"x = \",x)#x1\n", - " print(\"y = \",y)#y1\n", - " print(\"w = \",w)#x2\n", - " print(\"h = \",h)#y2\n", - " print(box)\n", - " result_boxes = cv2.dnn.NMSBoxes(boxes, scores, 0.20, 0.5, 0.5) #32b CPU\n", - " \n", - " print(\"result_boxes :: \",result_boxes)\n", - " detections = []\n", - " img = original_image\n", - " for i in range(len(result_boxes)):\n", - " index = result_boxes[i]\n", - " box = boxes[index]\n", - " detection = {\n", - " 'class_id': class_ids[index],\n", - " 'class_name': label2class[str(class_ids[index])],\n", - " 'confidence': scores[index],\n", - " 'box': box\n", - " }\n", - " detections.append(detection)\n", - " img = draw_bounding_box(original_image, class_ids[index], scores[index], int(box[0]*ratio_2), int(box[1]*ratio_1), int(box[2]*ratio_2), int(box[3]*ratio_1))\n", - " print(detection)\n", - " print(\"boxcords::\",int(box[0]), int(box[1]), int(box[2]), int(box[3]))\n", - " print(\"boxcords::\",int(box[0]*ratio_2), int(box[1]*ratio_1), int(box[2]*ratio_2), int(box[3]*ratio_1))\n", - " # cv2.imwrite(\"test.jpg\", img)\n", - " plt.imsave(out_path,img)\n", - " # plt.show()\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "eb0c5470-5072-4b2b-af3c-49f012cb98a0", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/32b_arm', exist_ok=True)\n", - "os.makedirs('output/8b_dsp', exist_ok=True)" - ] - }, - { - "cell_type": "markdown", - "id": "e994c713-b29a-4eb9-b601-d8c4ec07030e", - "metadata": {}, - "source": [ - "## Save results on CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8678f9ad-d575-41bb-9f6a-24901586c908", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_32b_CPU/\"\n", - "image_dir = 'val2017/'\n", - "import cv2\n", - "import os\n", - "import numpy as np\n", - "import torch\n", - "from PIL import Image\n", - "for i in range(0,len(imageList)):\n", - " img_path = image_dir+imageList[i]+'.jpg'\n", - " raw_path = os.path.join(test_images_dir, 'Result_')\n", - " output1 = np.fromfile(raw_path+str(i)+'/885.raw',dtype=\"float32\")\n", - " output2 = np.fromfile(raw_path+str(i)+'/877.raw',dtype=\"float32\")\n", - " print(output1.shape)\n", - " print(output2.shape)\n", - " out_path = 'output/32b_arm/'+imageList[i]+'_prediction_32b_arm.png'\n", - " postProc(img_path,output1,output2,out_path)\n", - " i = i +1" - ] - }, - { - "cell_type": "markdown", - "id": "a2ff6cbc-ee34-4f46-999d-f23700671b93", - "metadata": {}, - "source": [ - "## Save results on DSP" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b08a3c30-1868-4b57-bbe1-1d7d2c42cb73", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_8b_DSP/\"\n", - "image_dir = 'val2017/'\n", - "import cv2\n", - "import os\n", - "import numpy as np\n", - "import torch\n", - "from PIL import Image\n", - "for i in range(0,len(imageList)):\n", - " img_path = image_dir+imageList[i]+'.jpg'\n", - " raw_path = os.path.join(test_images_dir, 'Result_')\n", - " output1 = np.fromfile(raw_path+str(i)+'/885.raw',dtype=\"float32\")\n", - " output2 = np.fromfile(raw_path+str(i)+'/877.raw',dtype=\"float32\")\n", - " print(output1.shape)\n", - " print(output2.shape)\n", - " out_path = 'output/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png'\n", - " postProc(img_path,output1,output2,out_path)\n", - " i = i +1" - ] - }, - { - "cell_type": "markdown", - "id": "d904078b-2052-4bf3-94f8-7dc5aa4f5a39", - "metadata": {}, - "source": [ - "## Output on CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d3a78d6b-fcae-4d9a-bcb7-0e35755ceb21", - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(5):\n", - " img = plt.imread('output/32b_arm/'+imageList[i]+'_prediction_32b_arm.png')\n", - " plt.imshow(img)\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "id": "6a2d7734-2887-4022-a040-a752c1cc8794", - "metadata": {}, - "source": [ - "## Output on DSP" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8fc8bbe3-448f-4c2c-ae39-a8c424da441d", - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(5):\n", - " img = plt.imread('output/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png')\n", - " plt.imshow(img)\n", - " plt.show()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/04-image-segmentation/FFNet/ffnet.patch b/models-for-solutions/04-image-segmentation/FFNet/ffnet.patch deleted file mode 100644 index a79e90a4..00000000 --- a/models-for-solutions/04-image-segmentation/FFNet/ffnet.patch +++ /dev/null @@ -1,16 +0,0 @@ -diff --git a/aimet_zoo_torch/ffnet/model/ffnet_S_mobile.py b/aimet_zoo_torch/ffnet/model/ffnet_S_mobile.py -index 8935a5d..ef05033 100644 ---- a/aimet_zoo_torch/ffnet/model/ffnet_S_mobile.py -+++ b/aimet_zoo_torch/ffnet/model/ffnet_S_mobile.py -@@ -88,10 +88,7 @@ def segmentation_ffnet40S_dBBB_mobile(): - model_name="ffnnet40S_dBBB_mobile", - backbone=resnet.Resnet40S_D, - pre_downsampling=False, -- pretrained_weights_path=os.path.join( -- model_weights_base_path, -- "ffnet40S/ffnet40S_dBBB_cityscapes_state_dict_quarts.pth", -- ), -+ pretrained_weights_path="ffnet40S_dBBB_cityscapes_state_dict_quarts.pth", - strict_loading=True, - ) - diff --git a/models-for-solutions/04-image-segmentation/FFNet/ffnet_seg.ipynb b/models-for-solutions/04-image-segmentation/FFNet/ffnet_seg.ipynb deleted file mode 100644 index 95e48c55..00000000 --- a/models-for-solutions/04-image-segmentation/FFNet/ffnet_seg.ipynb +++ /dev/null @@ -1,674 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "587deb17-42d7-4f23-888b-37654da89a27", - "metadata": {}, - "source": [ - "# Setting up SDK artifacts" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7a5eca17-7cab-4821-8571-2e8e96528f44", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"../FFNet/\"\n", - "os.environ['DLC32']=\"models/ffnet_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/ffnet_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"ffnetseg\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #change with your device-id. Use command \"adb devices\" to get devices names.\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4043915c-c99a-4bd9-9b99-e9ceb7a83d65", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "git clone https://github.com/quic/aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2adc4d0a-e124-46e1-93ed-ec82e7deae3e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r ffnet.patch aimet-model-zoo/\n", - "cd aimet-model-zoo/\n", - "git apply ffnet.patch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4d7856e6-22e8-43df-9ebe-cecd2419bd96", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.makedirs('utils', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "76d048e5-124e-46e4-81f5-d281da5b6197", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cp -r aimet-model-zoo/aimet_zoo_torch/ffnet/model/* ./utils/\n", - "rm -rf aimet-model-zoo/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "565e2274-6866-4da8-a7dc-f08dd077360a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "wget https://github.com/quic/aimet-model-zoo/releases/download/torch_segmentation_ffnet/ffnet40S_dBBB_cityscapes_state_dict_quarts.pth" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "75a2deae-a540-41d1-8845-ae30aec2b8b9", - "metadata": {}, - "outputs": [], - "source": [ - "from utils.model_registry import model_entrypoint\n", - "net = model_entrypoint(\"segmentation_ffnet40S_dBBB_mobile\")()\n", - "net.eval()" - ] - }, - { - "cell_type": "markdown", - "id": "36363a57-ceb8-4ec9-a1e9-08729d062142", - "metadata": {}, - "source": [ - "# Getting The ONNX Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b553e278-0969-45f0-ad3d-1432453b0e29", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models',exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c6c9ed3f-ac94-4b46-b9ac-359d100e26b1", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import os\n", - "import cv2\n", - "import glob\n", - "import numpy as np\n", - "import torch\n", - "from os.path import isfile, join\n", - "import matplotlib.pyplot as plt \n", - "from PIL import Image\n", - "from torchvision import transforms as T" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4b85c7b4-9883-4e34-ba2e-02d53e242413", - "metadata": {}, - "outputs": [], - "source": [ - "dummy_input = torch.randn(1,3, 512, 512).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(net, dummy_input, \"./models/ffnet.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "markdown", - "id": "52423e4c-9b9c-4dd5-ab67-0946cbb6c26f", - "metadata": {}, - "source": [ - "### Generate DLC from ONNX" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c07d08d3-10b4-484e-9c83-22e22989b8e8", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/ffnet.onnx --output_path models/ffnet_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "e20656ce-9ef8-4ee5-863c-6680da58dcbb", - "metadata": {}, - "source": [ - "## Pre-Process the data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6112ba04-0650-4682-b0bf-615de5bfbfed", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('input',exist_ok=True)\n", - "os.makedirs('input/dataset',exist_ok=True)\n", - "os.makedirs('input/dataset/image/',exist_ok=True)\n", - "os.makedirs('input/dataset/ground_truth/',exist_ok=True)" - ] - }, - { - "cell_type": "markdown", - "id": "d48e9c55-0274-443d-8e92-0cf57a6858ee", - "metadata": {}, - "source": [ - "## Steps to set Dataset path\n" - ] - }, - { - "cell_type": "markdown", - "id": "fd363e51-c76f-4acb-9a68-8ff0b823086f", - "metadata": {}, - "source": [ - "User needs to download the dataset of their choice" - ] - }, - { - "cell_type": "markdown", - "id": "138a9c32-84e0-4f33-8333-90c6e74b415e", - "metadata": {}, - "source": [ - "## Generate raw files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "78feec60-df67-4712-b299-d522f2cb83fb", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('input/raw', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13a04af4-90fc-4058-a8d4-33851446d771", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = 'input/dataset/image/'\n", - "all_files = os.listdir(test_images_dir)\n", - "img_paths = []\n", - "image_names = []\n", - "for file in all_files:\n", - " img_paths.append(test_images_dir+file)\n", - " name = file.replace(\".png\", \"\")\n", - " image_names.append(name)\n", - "img_paths = sorted(img_paths)\n", - "image_names.sort()\n", - "i=0\n", - "for img in img_paths:\n", - " raw_data = preProcess(img)\n", - " type(raw_data)\n", - " raw_data.tofile('input/raw/'+image_names[i]+'.raw')\n", - " i = i+1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bb898b9b-6fe1-43d7-9fab-172a48c9ef5f", - "metadata": {}, - "outputs": [], - "source": [ - "directory_path = 'input/raw/'\n", - "output_file_path = 'input/input.txt' # The file where the output will be saved\n", - "all_files = os.listdir(directory_path)\n", - "# Filter only the .raw files and create a list of their names\n", - "raw_files = [file for file in all_files if file.endswith('.raw')]\n", - "raw_files = sorted(raw_files)\n", - "# Write the file names to the output file\n", - "with open(output_file_path, 'w') as f:\n", - " c=0\n", - " for raw_file in raw_files:\n", - " f.write(f\"./raw/{raw_file}\\n\")\n", - " c=c+1\n", - "print(f\"File names written to {output_file_path}.\")" - ] - }, - { - "cell_type": "markdown", - "id": "577b0def-bb1a-4091-afe7-d09547f589d4", - "metadata": {}, - "source": [ - "## generate Quantized dlc from dlc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fb764da8-e98e-4055-8e13-54006699d4f3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input\n", - "snpe-dlc-quantize --input_dlc ../models/ffnet_fp32.dlc --input_list input.txt --output_dlc ../models/ffnet_w8a8.dlc " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c5c51892-bde0-4054-a9cc-17367ffc86c0", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input\n", - "snpe-dlc-quantize --input_dlc ../models/ffnet_fp32.dlc --input_list input.txt --output_dlc ../models/ffnet_w8a16.dlc --act_bitwidth 16 " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a5aa5ab3-5b44-492c-b178-34ace144a351", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "adb devices" - ] - }, - { - "cell_type": "markdown", - "id": "46c0c6ae-558d-4a52-9b8f-a19e35dfd158", - "metadata": {}, - "source": [ - "## Model Inference on Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8f959b54-9a7c-41b3-8304-5679894f9465", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f16cdc34-20a5-497b-9e43-023b7530e4e6", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "453d7fd8-8538-44e8-83cc-a8839c169bcc", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "markdown", - "id": "b4a687ab-bf73-479e-9984-a8440c484cec", - "metadata": {}, - "source": [ - "## Push files on device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2964cbb8-6f34-48f7-ac0b-0be3d40e6db5", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "a2853869-ffc2-40e1-80a8-97604912927e", - "metadata": {}, - "source": [ - "## Inferencing the FP32 Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cf7b5647-877c-4233-9a1b-6280a55f58e3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=ffnet_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"ffnetseg\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER " - ] - }, - { - "cell_type": "markdown", - "id": "e292374c-1cfd-4497-92b3-296a007a2a37", - "metadata": {}, - "source": [ - "## Inferencing the INT 8 Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b9c7283e-6518-4656-b8e6-adfb6f5f7f43", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=ffnet_w8a8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"ffnetseg\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "a852c434-9d77-404b-930d-334e2ed08c14", - "metadata": {}, - "source": [ - "## Pull output raw files " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "120a71eb-c087-4435-85ae-03e791b59fb3", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output',exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5c09a1d5-0b56-43f0-8ec8-f626d706f040", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "markdown", - "id": "c60cae78-94d6-4e1c-b5c6-08020ff923ad", - "metadata": {}, - "source": [ - "# Post Processing" - ] - }, - { - "cell_type": "markdown", - "id": "ba243f58-597f-40d1-aae1-99fab467c3f5", - "metadata": {}, - "source": [ - "#### Creating the Necessary Path Needed" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7d8c41dc-0f81-4b80-a80a-14c8a1882f96", - "metadata": {}, - "outputs": [], - "source": [ - "if not os.path.exists('output/model_prediction'):\n", - " os.makedirs('output/model_prediction')\n", - " \n", - "if not os.path.exists('output/test_results/'):\n", - " os.makedirs('output/test_results/')\n", - " \n", - "if not os.path.exists('output/test_results/8b_dsp'):\n", - " os.makedirs('output/test_results/8b_dsp')\n", - " \n", - "if not os.path.exists('output/test_results/32b_arm'):\n", - " os.makedirs('output/test_results/32b_arm')\n" - ] - }, - { - "cell_type": "markdown", - "id": "79681d37-a176-40d5-aa23-11ee754c1e95", - "metadata": {}, - "source": [ - "## function for post processing raw outputs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1d961306-3563-4419-861c-1d6693b563b7", - "metadata": {}, - "outputs": [], - "source": [ - "def postProcessing(img_path,out_path):\n", - " res = np.fromfile(img_path, dtype=\"float32\")\n", - " #print(\"res\", res.shape)\n", - " res_reshape = res.reshape((1,64,64,19)).astype(np.float32)\n", - " res_reshape = np.transpose(res_reshape,(0,3,1,2))\n", - " res_reshape = torch.from_numpy(res_reshape)\n", - " size = [1,3,512,1024]\n", - " pred = F.interpolate(\n", - " input=res_reshape, size=size[-2:],\n", - " mode='bilinear', align_corners=False\n", - " )\n", - " model_img = pred\n", - " pred = model_img.max(1)[1].cpu().numpy()[0] # HW\n", - " pred.shape\n", - "\n", - " plt.imsave(out_path,pred)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e11f8d9d-ecfb-4c63-a318-866b126d242d", - "metadata": {}, - "outputs": [], - "source": [ - "from torch.nn import functional as F\n", - "import matplotlib.pyplot as plt\n", - "import os\n", - "test_images_dir = \"output/OUTPUT_8b_DSP/\"\n", - "\n", - "for i in range(0,5):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/617.raw'\n", - " out_path = 'output/test_results/8b_dsp/'+image_names[i]+'.png'\n", - " postProcessing(img_path, out_path)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8eb6272f-7c1e-42d6-981c-2909c742616d", - "metadata": {}, - "outputs": [], - "source": [ - "from torch.nn import functional as F\n", - "import matplotlib.pyplot as plt\n", - "\n", - "test_images_dir = \"output/OUTPUT_32b_CPU/\"\n", - "\n", - "for i in range(0,5):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/617.raw'\n", - " out_path = 'output/test_results/32b_arm/'+image_names[i]+'.png'\n", - " postProcessing(img_path, out_path)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e03893f1-e42a-4804-8446-34e60e76077a", - "metadata": {}, - "outputs": [], - "source": [ - "def image_overlay(image, segmented_image):\n", - " \n", - " alpha = 0.6 # how much transparency to apply\n", - " beta = 1 - alpha # alpha + beta should equal 1\n", - " gamma = 0 # scalar added to each sum\n", - " image = np.array(image)\n", - " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n", - " segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR)\n", - " cv2.addWeighted(segmented_image, alpha, image, beta, gamma, image)\n", - " return image" - ] - }, - { - "cell_type": "markdown", - "id": "f8ee966f-cfc9-4d08-81be-1a9793dd22a8", - "metadata": {}, - "source": [ - "## Visualize the outputs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ce896cea-18ba-4cca-ba3f-cf01f3282a64", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "fig = plt.figure(figsize=(30, 100));\n", - "import cv2\n", - "for i in range(0, 5):\n", - " \n", - " original = cv2.imread('input/dataset/image/'+image_names[i]+'.png')\n", - " original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)\n", - " ax = fig.add_subplot(28,4,4*i+1);\n", - " plt.imshow(original,cmap='gray');\n", - " ax.set_title('original image\\n');\n", - " ax.axis('off');\n", - "\n", - " temp_name = image_names[i].replace(\"_leftImg8bit\",\"_gtFine_color\")\n", - " pth_inf = cv2.imread('input/dataset/ground_truth/'+temp_name+'.png')\n", - "\n", - " ax = fig.add_subplot(28,4,4*i+2);\n", - " plt.imshow(pth_inf,cmap='gray');\n", - " ax.set_title('pth output\\n');\n", - " ax.axis('off');\n", - "\n", - "\n", - " arm_fp32= cv2.imread('output/test_results/32b_arm/'+image_names[i]+'.png')\n", - "\n", - " ax = fig.add_subplot(28,4,4*i+3);\n", - " plt.imshow(arm_fp32,cmap='gray');\n", - " ax.set_title('fp32 on ARM\\n');\n", - " ax.axis('off');\n", - "\n", - " dsp= cv2.imread('output/test_results/8b_dsp/'+image_names[i]+'.png')\n", - " ax = fig.add_subplot(28,4,4*i+4);\n", - " plt.imshow(dsp,cmap='gray');\n", - " ax.set_title('int8 on DSP\\n');\n", - " ax.axis('off');" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/04-image-segmentation/FFNet/readme.md b/models-for-solutions/04-image-segmentation/FFNet/readme.md deleted file mode 100644 index 803b8e37..00000000 --- a/models-for-solutions/04-image-segmentation/FFNet/readme.md +++ /dev/null @@ -1,55 +0,0 @@ -# Semantic segmentation FFNet - -## Pre-requisites - -* Please follow the instructions for setting up Qualcomm Neural Processing SDK using the [link] (https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) provided. -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install OpenCV ```pip install cv2``` -- Install mxnet ```pip install mxnet``` - - -## How to get the model ? -Download pretrained weight from [link](https://github.com/quic/aimet-model-zoo/releases/tag/torch_segmentation_ffnet) - - -for ONNX model follow attached notebook - -``` -import os -dummy_input = torch.randn(1,3, 512, 512).type(torch.FloatTensor).to('cpu') -torch.onnx.export(net, dummy_input, "./models/ffnet.onnx",opset_version=11) - - -``` -## Convert model to DLC - -for fp32_DLC and FP16_DLC model follow attached notebook - -``` -snpe-onnx-to-dlc --input_network models/ffnet.onnx --output_path models/ffnet.dlc - - -``` - -## Quantization of DLC -for quantized INT8_DLC, INT16_DLC model follow attached notebook -``` -cd input -snpe-dlc-quantize --input_dlc ../models/ffnet.dlc --input_list input.txt --output_dlc ../models/ffnet_quantized.dlc - -``` - - - -## Accuracy Analysis -- To check results please run "[FFNet](ffnet_seg.ipynb)". -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/04-image-segmentation/deeplabv3_resnet101/deeplabv3_resnet101.ipynb b/models-for-solutions/04-image-segmentation/deeplabv3_resnet101/deeplabv3_resnet101.ipynb deleted file mode 100644 index c922ca36..00000000 --- a/models-for-solutions/04-image-segmentation/deeplabv3_resnet101/deeplabv3_resnet101.ipynb +++ /dev/null @@ -1,635 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "c1ba280a-ea16-443e-8dcf-d2447f49f61e", - "metadata": {}, - "source": [ - "## Setting UP SNPE Artifacts" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2ed8c6a7-69c6-47ac-9993-832a693de610", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/deeplabv3_resnet101_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/deeplabv3_resnet101_quant_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"deeplabv3_resnet101\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #change with your device-id. Use command \"adb devices\" to get devices names.\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "97e0ed81-13cb-4ad7-a14a-a9b842da8765", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import torchvision.models.segmentation as models\n", - "import torch\n", - "import torch.nn as nn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "62ad7983-0084-47d1-b1fe-1813bd6651e5", - "metadata": {}, - "outputs": [], - "source": [ - "pretrained_model = models.deeplabv3_resnet101(pretrained=True)\n", - "pretrained_model.eval()\n", - "class CustomModel(nn.Module):\n", - " def __init__(self,pretrained_model):\n", - " super(CustomModel,self).__init__()\n", - " self.pretrained_model = pretrained_model\n", - " self.argmax = nn.LogSoftmax(dim=1)\n", - "\n", - " def forward(self,x):\n", - " output_dict = self.pretrained_model(x)\n", - " output = output_dict['out']\n", - " argmax_output = torch.argmax(output,dim=1,keepdim=False)\n", - " return argmax_output\n", - "\n", - "model = CustomModel(pretrained_model)\n", - "input = torch.randn(1,3,400,400)\n", - "output = model(input)\n", - "output.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fcbd56f3-7f0a-4404-b769-f888fa2f39f1", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models',exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "482d4180-e2ef-496d-8d23-e875ca8a3fae", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "dummy_input = torch.randn(1,3, 400, 400).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(model, dummy_input, \"./models/deeplabv3_resnet101.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cf0f7170-1db0-4597-841e-6506547adabc", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import cv2\n", - "import glob\n", - "import numpy as np\n", - "import torch\n", - "from os.path import isfile, join\n", - "import matplotlib.pyplot as plt \n", - "from PIL import Image\n", - "from torchvision import transforms as T" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "56fda866-c58d-42d7-bdca-a80cf2e72208", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('input',exist_ok=True)\n", - "os.makedirs('input/dataset',exist_ok=True)" - ] - }, - { - "cell_type": "markdown", - "id": "9473a262-3448-41dc-9985-636c0dc34a03", - "metadata": {}, - "source": [ - "## Download dataset in input/dataset/ directory\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "40abcc72-c8c6-4038-b5be-b7f27653e2fa", - "metadata": {}, - "outputs": [], - "source": [ - "# User needs to download test dataset and set the path accordingly\n", - "# Below steps will work, only when the test dataset is properly setup" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d34cac77-80a9-4df8-8fe5-d4dcff12fb51", - "metadata": {}, - "outputs": [], - "source": [ - "directory_path = 'input/raw/'\n", - "output_file_path = 'input/input.txt' # The file where the output will be saved\n", - "all_files = os.listdir(directory_path)\n", - "# Filter only the .raw files and create a list of their names\n", - "raw_files = [file for file in all_files if file.endswith('.raw')]\n", - "raw_files = sorted(raw_files)\n", - "# Write the file names to the output file\n", - "with open(output_file_path, 'w') as f:\n", - " c=0\n", - " for raw_file in raw_files:\n", - " f.write(f\"./raw/{raw_file}\\n\")\n", - " c=c+1\n", - "print(f\"File names written to {output_file_path}.\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb1bb5a9-166a-4687-a3cd-62355137da5d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/deeplabv3_resnet101.onnx --output_path models/deeplabv3_resnet101_fp32.dlc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18c383a5-f64d-44c3-8208-a1d5b65e013e", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input/\n", - "snpe-dlc-quantize --input_dlc ../models/deeplabv3_resnet101_fp32.dlc --input_list input.txt --axis_quant --output_dlc ../models/deeplabv3_resnet101_quant_w8a8.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19fedfcf-9c0d-4021-91fa-996e7507313e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aa0005f2-de81-42c5-bfa5-3c2f607ec761", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v73/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "549a9abe-c0b8-4982-872b-1713b1d9200e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "321c10d4-ef2d-43d1-adbb-6418f77a47db", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e0d07758-dd3b-45e7-ae57-02eb06110537", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=deeplabv3_resnet101_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"deeplabv3_resnet101\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f85ce847-db91-4282-bb0d-1bd1819cbd7b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=deeplabv3_resnet101_quant_w8a8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"deeplabv3_resnet101\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e701b246-fe6f-4287-bbd6-f4734d08581c", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "247650d9-d56e-497a-a665-03d94c86ddbf", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "879f4305-618f-4bf6-8478-1439beb79e1e", - "metadata": {}, - "outputs": [], - "source": [ - "def image_overlay(image, segmented_image):\n", - " alpha = 1 # transparency for the original image\n", - " beta = 0.8 # transparency for the segmentation map\n", - " gamma = 0 # scalar added to each sum\n", - " # print(image.size)\n", - " # print(segmented_image.shape)\n", - " segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR)\n", - " image = np.array(image)\n", - " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n", - " cv2.addWeighted(image, alpha, segmented_image, beta, gamma, image)\n", - " return image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6344c5b3-2523-4889-8086-dd6ee81c83ff", - "metadata": {}, - "outputs": [], - "source": [ - "import torchvision.transforms as transforms\n", - "crop_size = 400\n", - "transform = transforms.Compose([\n", - " transforms.Resize((crop_size,crop_size)),\n", - " transforms.ToTensor(),\n", - " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n", - " std=[0.229, 0.224, 0.225])\n", - "])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2383f20d-3197-4658-83d1-5c30550a9020", - "metadata": {}, - "outputs": [], - "source": [ - "label_map = [\n", - " (0, 0, 0), # background\n", - " (128, 0, 0), # aeroplane\n", - " (0, 128, 0), # bicycle\n", - " (128, 128, 0), # bird\n", - " (0, 0, 128), # boat\n", - " (128, 0, 128), # bottle\n", - " (0, 128, 128), # bus \n", - " (128, 128, 128), # car\n", - " (64, 0, 0), # cat\n", - " (192, 0, 0), # chair\n", - " (64, 128, 0), # cow\n", - " (192, 128, 0), # dining table\n", - " (64, 0, 128), # dog\n", - " (192, 0, 128), # horse\n", - " (64, 128, 128), # motorbike\n", - " (192, 128, 128), # person\n", - " (0, 64, 0), # potted plant\n", - " (128, 64, 0), # sheep\n", - " (0, 192, 0), # sofa\n", - " (128, 192, 0), # train\n", - " (0, 64, 128) # tv/monitor\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f4e32bc-8a32-4eb5-ad3b-ff3e4a45c9ae", - "metadata": {}, - "outputs": [], - "source": [ - "def get_segment_labels(image, model, device):\n", - " # transform the image to tensor and load into computation device\n", - " image = transform(image).to(device)\n", - " image = image.unsqueeze(0) # add a batch dimension\n", - " outputs = model(image)\n", - " return outputs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b3c6fbbc-fe9b-46f1-a09d-66677b658c21", - "metadata": {}, - "outputs": [], - "source": [ - "def draw_segmentation_map(outputs):\n", - " labels = outputs.detach().cpu().numpy()\n", - " # create Numpy arrays containing zeros\n", - " # later to be used to fill them with respective red, green, and blue pixels\n", - " red_map = np.zeros_like(labels).astype(np.uint8)\n", - " green_map = np.zeros_like(labels).astype(np.uint8)\n", - " blue_map = np.zeros_like(labels).astype(np.uint8)\n", - " \n", - " for label_num in range(0, len(label_map)):\n", - " index = labels == label_num\n", - " red_map[index] = np.array(label_map)[label_num, 0]\n", - " green_map[index] = np.array(label_map)[label_num, 1]\n", - " blue_map[index] = np.array(label_map)[label_num, 2]\n", - " \n", - " segmentation_map = np.stack([red_map, green_map, blue_map], axis=2)\n", - " return segmentation_map" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15c52e4b-25bc-4023-8719-1ef497c0d60a", - "metadata": {}, - "outputs": [], - "source": [ - "def ImageNames():\n", - " inputlist = open('input/input.txt', 'r')\n", - " Lines = inputlist.readlines()\n", - " count = 0\n", - " imageList = []\n", - " for line in Lines:\n", - " name = line.split(\"preproc_\",1)[1]\n", - " name = name.split('.')[0]\n", - " imageList.append(name)\n", - " count += 1\n", - " return imageList\n", - "imageList = ImageNames()\n", - "print((imageList))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3ba800e6-d036-4889-b5f1-917a34a2c642", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import numpy as np\n", - "device = torch.device('cpu')\n", - "model.eval()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e40e33e-e46b-4291-b1b7-8f9ae603b701", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/model_prediction', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1ad558bd-54b8-4e57-82fb-cafc49da4fc9", - "metadata": {}, - "outputs": [], - "source": [ - "from PIL import Image\n", - "import cv2\n", - "for i in range(0,len(imageList)):\n", - "\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " # print(image.size)\n", - " outputs = get_segment_labels(image, model, device)\n", - " # get the data from the `out` key\n", - " # outputs = outputs['out']\n", - " # print(type(outputs))\n", - " # print(outputs.shape)\n", - " segmented_image = draw_segmentation_map(outputs[0])\n", - " print(image.size)\n", - " print(segmented_image.shape)\n", - " final_image = image_overlay(image, segmented_image)\n", - " # show the segmented image and save to disk\n", - " # cv2.imshow('Segmented image', final_image)\n", - " # cv2.waitKey(0)\n", - " cv2.imwrite(f\"output/model_prediction/{imageList[i]}.jpg\", final_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3fe377f7-b9a8-4d48-8a3e-4a4c0b537edf", - "metadata": {}, - "outputs": [], - "source": [ - "def PostProc(img_path, out_path,i):\n", - " res = np.fromfile(img_path, dtype=\"float32\")\n", - " res_reshape = res.reshape((1,400,400)).astype(np.float32)\n", - " model_img = torch.from_numpy(res_reshape)\n", - " segmented_image = draw_segmentation_map(model_img[0])\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " print(image.size)\n", - " final_image = image_overlay(image, segmented_image)\n", - " cv2.imwrite(f\"{out_path}\", final_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5d407324-addc-4d5b-b4c6-601b4f155a1b", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/test_results', exist_ok=True)\n", - "os.makedirs('output/test_results/32b_arm', exist_ok=True)\n", - "os.makedirs('output/test_results/8b_dsp', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb5d05b3-dd99-4a58-8c11-108b2c58abab", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_32b_CPU/\"\n", - "image_dir = ''\n", - "import cv2\n", - "import os\n", - "import numpy as np\n", - "import torch\n", - "from PIL import Image\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/1073.raw'\n", - " out_path = 'output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f0cf6d4-841d-4ed7-bfce-4b3b75be0bd1", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_8b_DSP/\"\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/1073.raw'\n", - " out_path = 'output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "05011964-ce39-4fcd-800e-99cac01554f5", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "fig = plt.figure(figsize=(30, 100));\n", - "import cv2\n", - "for i in range(0, 5):\n", - " \n", - " original = cv2.imread(''+imageList[i]+'.jpg')\n", - " original = cv2.resize(original, (513,513))\n", - " original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)\n", - " ax = fig.add_subplot(28,4,4*i+1);\n", - " plt.imshow(original,cmap='gray');\n", - " ax.set_title('original image\\n');\n", - " ax.axis('off');\n", - " \n", - " pth_inf = cv2.imread('output/model_prediction/'+imageList[i]+'.jpg')\n", - " pth_inf = cv2.resize(pth_inf, (513,513))\n", - " # pth_inf = cv2.cvtColor(pth_inf, cv2.COLOR_BGR2RGB)\n", - " # pth_overlay = image_overlay(original, pth_inf)\n", - " ax = fig.add_subplot(28,4,4*i+2);\n", - " plt.imshow(pth_inf,cmap='gray');\n", - " ax.set_title('pth output\\n');\n", - " ax.axis('off');\n", - "\n", - "\n", - " arm_fp32= cv2.imread('output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png')\n", - " # arm_fp32 = cv2.cvtColor(arm_fp32, cv2.COLOR_BGR2RGB)\n", - " # fp32_overlay = image_overlay(original, arm_fp32)\n", - " arm_fp32 = cv2.resize(arm_fp32, (513,513))\n", - " ax = fig.add_subplot(28,4,4*i+3);\n", - " plt.imshow(arm_fp32,cmap='gray');\n", - " ax.set_title('fp32 on ARM\\n');\n", - " ax.axis('off');\n", - "\n", - " dsp= cv2.imread('output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png')\n", - " # dsp_int8 = cv2.cvtColor(dsp_int8, cv2.COLOR_BGR2RGB)\n", - " # int8_overlay = image_overlay(original, dsp_int8)\n", - " # print(dsp)\n", - " ax = fig.add_subplot(28,4,4*i+4);\n", - " plt.imshow(dsp,cmap='gray');\n", - " ax.set_title('int8 on DSP\\n');\n", - " ax.axis('off');" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/04-image-segmentation/deeplabv3_resnet101/readme.md b/models-for-solutions/04-image-segmentation/deeplabv3_resnet101/readme.md deleted file mode 100644 index 96dd234d..00000000 --- a/models-for-solutions/04-image-segmentation/deeplabv3_resnet101/readme.md +++ /dev/null @@ -1,49 +0,0 @@ -# Semantic segmentation deeplabv3_resnet101 - - -## Pre-requisites - -* Please follow the instructions for setting up Qualcomm Neural Processing SDK using the [link] (https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) provided. -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install OpenCV ```pip install cv2``` -- Install mxnet ```pip install mxnet``` - -## How to get the model ? - -for ONNX model follow attached notebook - -``` -import os -dummy_input = torch.randn(1,3, 400, 400).type(torch.FloatTensor).to('cpu') -torch.onnx.export(model, dummy_input, "./models/deeplabv3_resnet101.onnx",opset_version=11) - -``` -## Convert model to DLC - -for fp32_DLC and FP16_DLC model follow attached notebook - -``` -snpe-onnx-to-dlc --input_network models/deeplabv3_resnet101.onnx --output_path models/deeplabv3_resnet101.dlc - -``` - -## Quantization of DLC -for quantized INT8_DLC, INT16_DLC model follow attached notebook -``` -cd input/ -snpe-dlc-quantize --input_dlc ../models/deeplabv3_resnet101.dlc --input_list input.txt --axis_quant --output_dlc ../models/deeplabv3_resnet101_quant.dlc -``` - - - -## Accuracy Analysis -- To check results please run "[deeplabv3_resnet101](deeplabv3_resnet101.ipynb)". -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/04-image-segmentation/deeplabv3_resnet50/deeplabv3_resnet50.ipynb b/models-for-solutions/04-image-segmentation/deeplabv3_resnet50/deeplabv3_resnet50.ipynb deleted file mode 100644 index d0b2e5d1..00000000 --- a/models-for-solutions/04-image-segmentation/deeplabv3_resnet50/deeplabv3_resnet50.ipynb +++ /dev/null @@ -1,628 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "c1ba280a-ea16-443e-8dcf-d2447f49f61e", - "metadata": {}, - "source": [ - "# Setting UP SDK Artifacts" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "211b3837-b991-41b7-b990-82d4244a9ffd", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/deeplabv3_resnet50_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/deeplabv3_resnet50_quant_w8a8.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"deeplabv3_resnet50\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #change with your device-id. Use command \"adb devices\" to get devices names.\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "97e0ed81-13cb-4ad7-a14a-a9b842da8765", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import torchvision.models.segmentation as models\n", - "import torch\n", - "import torch.nn as nn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "62ad7983-0084-47d1-b1fe-1813bd6651e5", - "metadata": {}, - "outputs": [], - "source": [ - "pretrained_model = models.deeplabv3_resnet50(pretrained=True)\n", - "pretrained_model.eval()\n", - "class CustomModel(nn.Module):\n", - " def __init__(self,pretrained_model):\n", - " super(CustomModel,self).__init__()\n", - " self.pretrained_model = pretrained_model\n", - " self.argmax = nn.LogSoftmax(dim=1)\n", - " def forward(self,x):\n", - " output_dict = self.pretrained_model(x)\n", - " output = output_dict['out']\n", - " argmax_output = torch.argmax(output,dim=1,keepdim=False)\n", - " return argmax_output\n", - "model = CustomModel(pretrained_model)\n", - "input = torch.randn(1,3,400,400)\n", - "output = model(input)\n", - "output.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fcbd56f3-7f0a-4404-b769-f888fa2f39f1", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models',exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "482d4180-e2ef-496d-8d23-e875ca8a3fae", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "dummy_input = torch.randn(1,3, 400, 400).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(model, dummy_input, \"./models/deeplabv3_resnet50.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cf0f7170-1db0-4597-841e-6506547adabc", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import cv2\n", - "import glob\n", - "import numpy as np\n", - "import torch\n", - "from os.path import isfile, join\n", - "import matplotlib.pyplot as plt \n", - "from PIL import Image\n", - "from torchvision import transforms as T" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "56fda866-c58d-42d7-bdca-a80cf2e72208", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('input',exist_ok=True)\n", - "os.makedirs('input/dataset',exist_ok=True)" - ] - }, - { - "cell_type": "markdown", - "id": "9473a262-3448-41dc-9985-636c0dc34a03", - "metadata": {}, - "source": [ - "## Download dataset in input/dataset/ directory\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d34cac77-80a9-4df8-8fe5-d4dcff12fb51", - "metadata": {}, - "outputs": [], - "source": [ - "directory_path = 'input/raw/'\n", - "output_file_path = 'input/input.txt' # The file where the output will be saved\n", - "\n", - "all_files = os.listdir(directory_path)\n", - "\n", - "# Filter only the .raw files and create a list of their names\n", - "raw_files = [file for file in all_files if file.endswith('.raw')]\n", - "raw_files = sorted(raw_files)\n", - "# Write the file names to the output file\n", - "with open(output_file_path, 'w') as f:\n", - " c=0\n", - " for raw_file in raw_files:\n", - " f.write(f\"./raw/{raw_file}\\n\")\n", - " c=c+1\n", - " \n", - "\n", - "print(f\"File names written to {output_file_path}.\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb1bb5a9-166a-4687-a3cd-62355137da5d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/deeplabv3_resnet50.onnx --output_path models/deeplabv3_resnet50_fp32.dlc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18c383a5-f64d-44c3-8208-a1d5b65e013e", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input/\n", - "snpe-dlc-quantize --input_dlc ../models/deeplabv3_resnet50_fp32.dlc --input_list input.txt --axis_quant --output_dlc ../models/deeplabv3_resnet50_quant_w8a8.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19fedfcf-9c0d-4021-91fa-996e7507313e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aa0005f2-de81-42c5-bfa5-3c2f607ec761", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v73/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "549a9abe-c0b8-4982-872b-1713b1d9200e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "321c10d4-ef2d-43d1-adbb-6418f77a47db", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e0d07758-dd3b-45e7-ae57-02eb06110537", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=deeplabv3_resnet50_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"deeplabv3_resnet50\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f85ce847-db91-4282-bb0d-1bd1819cbd7b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=deeplabv3_resnet50_quant_w8a8.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"deeplabv3_resnet50\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e701b246-fe6f-4287-bbd6-f4734d08581c", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "247650d9-d56e-497a-a665-03d94c86ddbf", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "879f4305-618f-4bf6-8478-1439beb79e1e", - "metadata": {}, - "outputs": [], - "source": [ - "def image_overlay(image, segmented_image):\n", - " alpha = 1 # transparency for the original image\n", - " beta = 0.8 # transparency for the segmentation map\n", - " gamma = 0 # scalar added to each sum\n", - " # print(image.size)\n", - " # print(segmented_image.shape)\n", - " segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR)\n", - " image = np.array(image)\n", - " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n", - " cv2.addWeighted(image, alpha, segmented_image, beta, gamma, image)\n", - " return image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6344c5b3-2523-4889-8086-dd6ee81c83ff", - "metadata": {}, - "outputs": [], - "source": [ - "import torchvision.transforms as transforms\n", - "crop_size = 400\n", - "transform = transforms.Compose([\n", - " transforms.Resize((crop_size,crop_size)),\n", - " transforms.ToTensor(),\n", - " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n", - " std=[0.229, 0.224, 0.225])\n", - "])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2383f20d-3197-4658-83d1-5c30550a9020", - "metadata": {}, - "outputs": [], - "source": [ - "label_map = [\n", - " (0, 0, 0), # background\n", - " (128, 0, 0), # aeroplane\n", - " (0, 128, 0), # bicycle\n", - " (128, 128, 0), # bird\n", - " (0, 0, 128), # boat\n", - " (128, 0, 128), # bottle\n", - " (0, 128, 128), # bus \n", - " (128, 128, 128), # car\n", - " (64, 0, 0), # cat\n", - " (192, 0, 0), # chair\n", - " (64, 128, 0), # cow\n", - " (192, 128, 0), # dining table\n", - " (64, 0, 128), # dog\n", - " (192, 0, 128), # horse\n", - " (64, 128, 128), # motorbike\n", - " (192, 128, 128), # person\n", - " (0, 64, 0), # potted plant\n", - " (128, 64, 0), # sheep\n", - " (0, 192, 0), # sofa\n", - " (128, 192, 0), # train\n", - " (0, 64, 128) # tv/monitor\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f4e32bc-8a32-4eb5-ad3b-ff3e4a45c9ae", - "metadata": {}, - "outputs": [], - "source": [ - "def get_segment_labels(image, model, device):\n", - " # transform the image to tensor and load into computation device\n", - " image = transform(image).to(device)\n", - " image = image.unsqueeze(0) # add a batch dimension\n", - " outputs = model(image)\n", - " return outputs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b3c6fbbc-fe9b-46f1-a09d-66677b658c21", - "metadata": {}, - "outputs": [], - "source": [ - "def draw_segmentation_map(outputs):\n", - " labels = outputs.detach().cpu().numpy()\n", - " # create Numpy arrays containing zeros\n", - " # later to be used to fill them with respective red, green, and blue pixels\n", - " red_map = np.zeros_like(labels).astype(np.uint8)\n", - " green_map = np.zeros_like(labels).astype(np.uint8)\n", - " blue_map = np.zeros_like(labels).astype(np.uint8)\n", - " \n", - " for label_num in range(0, len(label_map)):\n", - " index = labels == label_num\n", - " red_map[index] = np.array(label_map)[label_num, 0]\n", - " green_map[index] = np.array(label_map)[label_num, 1]\n", - " blue_map[index] = np.array(label_map)[label_num, 2]\n", - " \n", - " segmentation_map = np.stack([red_map, green_map, blue_map], axis=2)\n", - " return segmentation_map" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15c52e4b-25bc-4023-8719-1ef497c0d60a", - "metadata": {}, - "outputs": [], - "source": [ - "def ImageNames():\n", - " inputlist = open('input/input.txt', 'r')\n", - " Lines = inputlist.readlines()\n", - " count = 0\n", - " imageList = []\n", - " for line in Lines:\n", - " name = line.split(\"preproc_\",1)[1]\n", - " name = name.split('.')[0]\n", - " imageList.append(name)\n", - " count += 1\n", - " return imageList\n", - "imageList = ImageNames()\n", - "print((imageList))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3ba800e6-d036-4889-b5f1-917a34a2c642", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import numpy as np\n", - "device = torch.device('cpu')\n", - "model.eval()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e40e33e-e46b-4291-b1b7-8f9ae603b701", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/model_prediction', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1ad558bd-54b8-4e57-82fb-cafc49da4fc9", - "metadata": {}, - "outputs": [], - "source": [ - "image_dir = #define this variable appropriately to reflect dataset path\n", - "from PIL import Image\n", - "import cv2\n", - "for i in range(0,len(imageList)):\n", - "\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " # print(image.size)\n", - " outputs = get_segment_labels(image, model, device)\n", - " # get the data from the `out` key\n", - " # outputs = outputs['out']\n", - " # print(type(outputs))\n", - " # print(outputs.shape)\n", - " segmented_image = draw_segmentation_map(outputs[0])\n", - " print(image.size)\n", - " print(segmented_image.shape)\n", - " final_image = image_overlay(image, segmented_image)\n", - " # show the segmented image and save to disk\n", - " # cv2.imshow('Segmented image', final_image)\n", - " # cv2.waitKey(0)\n", - " cv2.imwrite(f\"output/model_prediction/{imageList[i]}.jpg\", final_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3fe377f7-b9a8-4d48-8a3e-4a4c0b537edf", - "metadata": {}, - "outputs": [], - "source": [ - "def PostProc(img_path, out_path,i):\n", - "\n", - " res = np.fromfile(img_path, dtype=\"float32\")\n", - " res_reshape = res.reshape((1,400,400)).astype(np.float32)\n", - " model_img = torch.from_numpy(res_reshape)\n", - " segmented_image = draw_segmentation_map(model_img[0])\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " print(image.size)\n", - " final_image = image_overlay(image, segmented_image) \n", - " cv2.imwrite(f\"{out_path}\", final_image) " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5d407324-addc-4d5b-b4c6-601b4f155a1b", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/test_results', exist_ok=True)\n", - "os.makedirs('output/test_results/32b_arm', exist_ok=True)\n", - "os.makedirs('output/test_results/8b_dsp', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb5d05b3-dd99-4a58-8c11-108b2c58abab", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_32b_CPU/\"\n", - "image_dir = #define as mentioned aboves\n", - "import cv2\n", - "import os\n", - "import numpy as np\n", - "import torch\n", - "from PIL import Image\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/597.raw'\n", - " out_path = 'output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f0cf6d4-841d-4ed7-bfce-4b3b75be0bd1", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_8b_DSP/\"\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/597.raw'\n", - " out_path = 'output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "05011964-ce39-4fcd-800e-99cac01554f5", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "fig = plt.figure(figsize=(30, 100));\n", - "import cv2\n", - "for i in range(0, 5):\n", - " \n", - " original = cv2.imread(''+imageList[i]+'.jpg')\n", - " original = cv2.resize(original, (400,400))\n", - " original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)\n", - " ax = fig.add_subplot(28,4,4*i+1);\n", - " plt.imshow(original,cmap='gray');\n", - " ax.set_title('original image\\n');\n", - " ax.axis('off');\n", - " \n", - " pth_inf = cv2.imread('output/model_prediction/'+imageList[i]+'.jpg')\n", - " pth_inf = cv2.resize(pth_inf, (400,400))\n", - " # pth_inf = cv2.cvtColor(pth_inf, cv2.COLOR_BGR2RGB)\n", - " # pth_overlay = image_overlay(original, pth_inf)\n", - " ax = fig.add_subplot(28,4,4*i+2);\n", - " plt.imshow(pth_inf,cmap='gray');\n", - " ax.set_title('pth output\\n');\n", - " ax.axis('off');\n", - "\n", - "\n", - " arm_fp32= cv2.imread('output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png')\n", - " # arm_fp32 = cv2.cvtColor(arm_fp32, cv2.COLOR_BGR2RGB)\n", - " # fp32_overlay = image_overlay(original, arm_fp32)\n", - " arm_fp32 = cv2.resize(arm_fp32, (400,400))\n", - " ax = fig.add_subplot(28,4,4*i+3);\n", - " plt.imshow(arm_fp32,cmap='gray');\n", - " ax.set_title('fp32 on ARM\\n');\n", - " ax.axis('off');\n", - "\n", - " dsp= cv2.imread('output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png')\n", - " # dsp_int8 = cv2.cvtColor(dsp_int8, cv2.COLOR_BGR2RGB)\n", - " # int8_overlay = image_overlay(original, dsp_int8)\n", - " # print(dsp)\n", - " ax = fig.add_subplot(28,4,4*i+4);\n", - " plt.imshow(dsp,cmap='gray');\n", - " ax.set_title('int8 on DSP\\n');\n", - " ax.axis('off');" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/04-image-segmentation/deeplabv3_resnet50/readme.md b/models-for-solutions/04-image-segmentation/deeplabv3_resnet50/readme.md deleted file mode 100644 index baf3a1f9..00000000 --- a/models-for-solutions/04-image-segmentation/deeplabv3_resnet50/readme.md +++ /dev/null @@ -1,49 +0,0 @@ -# Semantic segmentation deeplabv3_resnet50 - - -## Pre-requisites - -* Please follow the instructions for setting up Qualcomm Neural Processing SDK using the [link] (https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) provided. -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install OpenCV ```pip install cv2``` -- Install mxnet ```pip install mxnet``` - -## How to get the model ? - -for ONNX model follow attached notebook - -``` -import os -dummy_input = torch.randn(1,3, 400, 400).type(torch.FloatTensor).to('cpu') -torch.onnx.export(model, dummy_input, "./models/deeplabv3_resnet50.onnx",opset_version=11) - -``` -## Convert model to DLC - -for fp32_DLC and FP16_DLC model follow attached notebook - -``` -snpe-onnx-to-dlc --input_network models/deeplabv3_resnet50.onnx --output_path models/deeplabv3_resnet50.dlc - -``` - -## Quantization of DLC -for quantized INT8_DLC, INT16_DLC model follow attached notebook - -``` -cd input/ -snpe-dlc-quantize --input_dlc ../models/deeplabv3_resnet50.dlc --input_list input.txt --axis_quant --output_dlc ../models/deeplabv3_resnet50_quant.dlc -``` - -## Accuracy Analysis - -- To check results please run "[deeplabv3_resnet50](deeplabv3_resnet50.ipynb)". -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/04-image-segmentation/fcn_resnet101/fcn_resnet101.ipynb b/models-for-solutions/04-image-segmentation/fcn_resnet101/fcn_resnet101.ipynb deleted file mode 100644 index c741bff2..00000000 --- a/models-for-solutions/04-image-segmentation/fcn_resnet101/fcn_resnet101.ipynb +++ /dev/null @@ -1,647 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "33e1ee76-7229-4154-8f68-8ec75ba5edd9", - "metadata": {}, - "source": [ - "# Setting up SDK artifacts" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "97e0ed81-13cb-4ad7-a14a-a9b842da8765", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/fcn_resnet101_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/fcn_resnet101_quant16_w8a16.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"fcn_resnet101\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #change with your device-id. Use command \"adb devices\" to get devices names.\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ea09199c-b80d-4f3d-a277-31625e4b987d", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import torchvision.models.segmentation as models\n", - "import torch\n", - "import torch.nn as nn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4beb6dbe-8a77-4826-9d44-c25f09362bcf", - "metadata": {}, - "outputs": [], - "source": [ - "pretrained_model = models.fcn_resnet101(pretrained=True)\n", - "class CustomModel(nn.Module):\n", - " def __init__(self,pretrained_model):\n", - " super(CustomModel,self).__init__()\n", - " self.pretrained_model = pretrained_model\n", - " self.argmax = nn.LogSoftmax(dim=1)\n", - " def forward(self,x):\n", - " output_dict = self.pretrained_model(x)\n", - " output = output_dict['out']\n", - " argmax_output = torch.argmax(output,dim=1,keepdim=False)\n", - " return argmax_output\n", - "model = CustomModel(pretrained_model)\n", - "input = torch.randn(1,3,400,400)\n", - "output = model(input)\n", - "output.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12c13449-3ee0-413e-a424-2640d81ca997", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models',exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "482d4180-e2ef-496d-8d23-e875ca8a3fae", - "metadata": {}, - "outputs": [], - "source": [ - "dummy_input = torch.randn(1,3, 400, 400).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(model, dummy_input, \"./models/fcn_resnet101.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cf0f7170-1db0-4597-841e-6506547adabc", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import cv2\n", - "import glob\n", - "import numpy as np\n", - "import torch\n", - "from os.path import isfile, join\n", - "import matplotlib.pyplot as plt \n", - "from PIL import Image\n", - "from torchvision import transforms as T" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a07173ba-596b-492a-a5cc-c258348c6fe9", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('input',exist_ok=True)\n", - "os.makedirs('input/dataset',exist_ok=True)" - ] - }, - { - "cell_type": "markdown", - "id": "b17c35fb-5c8b-4e26-9899-2247de1bd3aa", - "metadata": {}, - "source": [ - "## Download dataset in input/dataset/ directory\n", - "#### Try in terminal if notebook getting freezed" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d34cac77-80a9-4df8-8fe5-d4dcff12fb51", - "metadata": {}, - "outputs": [], - "source": [ - "directory_path = 'input/raw/'\n", - "output_file_path = 'input/input.txt' # The file where the output will be saved\n", - "all_files = os.listdir(directory_path)\n", - "# Filter only the .raw files and create a list of their names\n", - "raw_files = [file for file in all_files if file.endswith('.raw')]\n", - "raw_files = sorted(raw_files)\n", - "# Write the file names to the output file\n", - "with open(output_file_path, 'w') as f:\n", - " c=0\n", - " for raw_file in raw_files:\n", - " f.write(f\"./raw/{raw_file}\\n\")\n", - " c=c+1\n", - "print(f\"File names written to {output_file_path}.\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb1bb5a9-166a-4687-a3cd-62355137da5d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/fcn_resnet101.onnx --output_path models/fcn_resnet101_fp32.dlc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18c383a5-f64d-44c3-8208-a1d5b65e013e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input/\n", - "snpe-dlc-quantize --input_dlc ../models/fcn_resnet101_fp32.dlc --input_list input.txt --axis_quant --output_dlc ../models/fcn_resnet101_quant_w8a8.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "58792696-c7c2-419e-9bb3-56e5213a47ea", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input/\n", - "snpe-dlc-quantize --input_dlc ../models/fcn_resnet101_fp32.dlc --input_list input.txt --axis_quant --output_dlc ../models/fcn_resnet101_quant16_w8a16.dlc --act_bitwidth 16 --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c75dbc88-89b1-4269-8476-afaebf54a8aa", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/fcn_resnet101_fp32.dlc --output_dlc models/fcn_resnet101_fp16.dlc --use_float_io " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19fedfcf-9c0d-4021-91fa-996e7507313e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aa0005f2-de81-42c5-bfa5-3c2f607ec761", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "549a9abe-c0b8-4982-872b-1713b1d9200e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "321c10d4-ef2d-43d1-adbb-6418f77a47db", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e0d07758-dd3b-45e7-ae57-02eb06110537", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=fcn_resnet101_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"fcn_resnet101\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f85ce847-db91-4282-bb0d-1bd1819cbd7b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=fcn_resnet101_quant16_w8a16.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"fcn_resnet101\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "# modified the inputlist.txt. That's the only change\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed45d308-20d9-473a-9b5c-e30fa926eb5b", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "247650d9-d56e-497a-a665-03d94c86ddbf", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "879f4305-618f-4bf6-8478-1439beb79e1e", - "metadata": {}, - "outputs": [], - "source": [ - "def image_overlay(image, segmented_image):\n", - " alpha = 1 # transparency for the original image\n", - " beta = 0.8 # transparency for the segmentation map\n", - " gamma = 0 # scalar added to each sum\n", - " # print(image.size)\n", - " # print(segmented_image.shape)\n", - " segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR)\n", - " image = np.array(image)\n", - " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n", - " cv2.addWeighted(image, alpha, segmented_image, beta, gamma, image)\n", - " return image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6344c5b3-2523-4889-8086-dd6ee81c83ff", - "metadata": {}, - "outputs": [], - "source": [ - "import torchvision.transforms as transforms\n", - "crop_size = 400\n", - "transform = transforms.Compose([\n", - " transforms.Resize((crop_size,crop_size)),\n", - " transforms.ToTensor(),\n", - " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n", - " std=[0.229, 0.224, 0.225])\n", - "])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2383f20d-3197-4658-83d1-5c30550a9020", - "metadata": {}, - "outputs": [], - "source": [ - "label_map = [\n", - " (0, 0, 0), # background\n", - " (128, 0, 0), # aeroplane\n", - " (0, 128, 0), # bicycle\n", - " (128, 128, 0), # bird\n", - " (0, 0, 128), # boat\n", - " (128, 0, 128), # bottle\n", - " (0, 128, 128), # bus \n", - " (128, 128, 128), # car\n", - " (64, 0, 0), # cat\n", - " (192, 0, 0), # chair\n", - " (64, 128, 0), # cow\n", - " (192, 128, 0), # dining table\n", - " (64, 0, 128), # dog\n", - " (192, 0, 128), # horse\n", - " (64, 128, 128), # motorbike\n", - " (192, 128, 128), # person\n", - " (0, 64, 0), # potted plant\n", - " (128, 64, 0), # sheep\n", - " (0, 192, 0), # sofa\n", - " (128, 192, 0), # train\n", - " (0, 64, 128) # tv/monitor\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f4e32bc-8a32-4eb5-ad3b-ff3e4a45c9ae", - "metadata": {}, - "outputs": [], - "source": [ - "def get_segment_labels(image, model, device):\n", - " # transform the image to tensor and load into computation device\n", - " image = transform(image).to(device)\n", - " image = image.unsqueeze(0) # add a batch dimension\n", - " outputs = model(image)\n", - " return outputs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b3c6fbbc-fe9b-46f1-a09d-66677b658c21", - "metadata": {}, - "outputs": [], - "source": [ - "def draw_segmentation_map(outputs):\n", - " labels = outputs.detach().cpu().numpy()\n", - " # create Numpy arrays containing zeros\n", - " # later to be used to fill them with respective red, green, and blue pixels\n", - " red_map = np.zeros_like(labels).astype(np.uint8)\n", - " green_map = np.zeros_like(labels).astype(np.uint8)\n", - " blue_map = np.zeros_like(labels).astype(np.uint8)\n", - " \n", - " for label_num in range(0, len(label_map)):\n", - " index = labels == label_num\n", - " red_map[index] = np.array(label_map)[label_num, 0]\n", - " green_map[index] = np.array(label_map)[label_num, 1]\n", - " blue_map[index] = np.array(label_map)[label_num, 2]\n", - " \n", - " segmentation_map = np.stack([red_map, green_map, blue_map], axis=2)\n", - " return segmentation_map" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15c52e4b-25bc-4023-8719-1ef497c0d60a", - "metadata": {}, - "outputs": [], - "source": [ - "def ImageNames():\n", - " inputlist = open('input/input.txt', 'r')\n", - " Lines = inputlist.readlines()\n", - " count = 0\n", - " imageList = []\n", - " for line in Lines:\n", - " name = line.split(\"preproc_\",1)[1]\n", - " name = name.split('.')[0]\n", - " imageList.append(name)\n", - " count += 1\n", - " return imageList\n", - "imageList = ImageNames()\n", - "print((imageList))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3ba800e6-d036-4889-b5f1-917a34a2c642", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import numpy as np\n", - "device = torch.device('cpu')\n", - "model.eval()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e40e33e-e46b-4291-b1b7-8f9ae603b701", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/model_prediction', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1ad558bd-54b8-4e57-82fb-cafc49da4fc9", - "metadata": {}, - "outputs": [], - "source": [ - "image_dir = # set this path to dataset path\n", - "from PIL import Image\n", - "import cv2\n", - "for i in range(0,len(imageList)):\n", - "\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " # print(image.size)\n", - " outputs = get_segment_labels(image, model, device)\n", - " # get the data from the `out` key\n", - " # outputs = outputs['out']\n", - " # print(type(outputs))\n", - " # print(outputs.shape)\n", - " segmented_image = draw_segmentation_map(outputs[0])\n", - " print(image.size)\n", - " print(segmented_image.shape)\n", - " final_image = image_overlay(image, segmented_image)\n", - " # show the segmented image and save to disk\n", - " # cv2.imshow('Segmented image', final_image)\n", - " # cv2.waitKey(0)\n", - " cv2.imwrite(f\"output/model_prediction/{imageList[i]}.jpg\", final_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3fe377f7-b9a8-4d48-8a3e-4a4c0b537edf", - "metadata": {}, - "outputs": [], - "source": [ - "def PostProc(img_path, out_path,i):\n", - " res = np.fromfile(img_path, dtype=\"float32\")\n", - " res_reshape = res.reshape((1,400,400)).astype(np.float32)\n", - " model_img = torch.from_numpy(res_reshape)\n", - " segmented_image = draw_segmentation_map(model_img[0])\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " final_image = image_overlay(image, segmented_image)\n", - " cv2.imwrite(f\"{out_path}\", final_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5d407324-addc-4d5b-b4c6-601b4f155a1b", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/test_results', exist_ok=True)\n", - "os.makedirs('output/test_results/32b_arm', exist_ok=True)\n", - "os.makedirs('output/test_results/8b_dsp', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb5d05b3-dd99-4a58-8c11-108b2c58abab", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_32b_CPU/\"\n", - "image_dir = # set this path to dataset path\n", - "import cv2\n", - "import os\n", - "import numpy as np\n", - "import torch\n", - "from PIL import Image\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/1002.raw'\n", - " out_path = 'output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f0cf6d4-841d-4ed7-bfce-4b3b75be0bd1", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_8b_DSP/\"\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/1002.raw'\n", - " out_path = 'output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "467eb305-e30a-4792-8d04-518cdf7cb6dc", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "fig = plt.figure(figsize=(30, 100));\n", - "import cv2\n", - "for i in range(0, 5):\n", - " \n", - " original = cv2.imread(''+imageList[i]+'.jpg')\n", - " original = cv2.resize(original, (400,400))\n", - " original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)\n", - " ax = fig.add_subplot(28,4,4*i+1);\n", - " plt.imshow(original,cmap='gray');\n", - " ax.set_title('original image\\n');\n", - " ax.axis('off');\n", - " \n", - " pth_inf = cv2.imread('output/model_prediction/'+imageList[i]+'.jpg')\n", - " pth_inf = cv2.resize(pth_inf, (400,400))\n", - " # pth_inf = cv2.cvtColor(pth_inf, cv2.COLOR_BGR2RGB)\n", - " # pth_overlay = image_overlay(original, pth_inf)\n", - " ax = fig.add_subplot(28,4,4*i+2);\n", - " plt.imshow(pth_inf,cmap='gray');\n", - " ax.set_title('pth output\\n');\n", - " ax.axis('off');\n", - "\n", - "\n", - " arm_fp32= cv2.imread('output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png')\n", - " # arm_fp32 = cv2.cvtColor(arm_fp32, cv2.COLOR_BGR2RGB)\n", - " # fp32_overlay = image_overlay(original, arm_fp32)\n", - " arm_fp32 = cv2.resize(arm_fp32, (400,400))\n", - " ax = fig.add_subplot(28,4,4*i+3);\n", - " plt.imshow(arm_fp32,cmap='gray');\n", - " ax.set_title('fp32 on ARM\\n');\n", - " ax.axis('off');\n", - "\n", - " dsp= cv2.imread('output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png')\n", - " # dsp_int8 = cv2.cvtColor(dsp_int8, cv2.COLOR_BGR2RGB)\n", - " # int8_overlay = image_overlay(original, dsp_int8)\n", - " # print(dsp)\n", - " ax = fig.add_subplot(28,4,4*i+4);\n", - " plt.imshow(dsp,cmap='gray');\n", - " ax.set_title('int8 on DSP\\n');\n", - " ax.axis('off');" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/04-image-segmentation/fcn_resnet101/readme.md b/models-for-solutions/04-image-segmentation/fcn_resnet101/readme.md deleted file mode 100644 index 2c33ee30..00000000 --- a/models-for-solutions/04-image-segmentation/fcn_resnet101/readme.md +++ /dev/null @@ -1,48 +0,0 @@ -# Semantic segmentation fcn_resnet101 - - -## Pre-requisites - -* Please follow the instructions for setting up Qualcomm Neural Processing SDK using the [link] (https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) provided. -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install OpenCV ```pip install cv2``` -- Install mxnet ```pip install mxnet``` - - -## How to get the model ? - -for ONNX model follow attached notebook - -``` -import os -dummy_input = torch.randn(1,3, 400, 400).type(torch.FloatTensor).to('cpu') -torch.onnx.export(model, dummy_input, "./models/fcn_resnet101.onnx",opset_version=11) - -``` -## Convert model to DLC - -for fp32_DLC and FP16_DLC model follow attached notebook - -``` -snpe-onnx-to-dlc --input_network models/fcn_resnet101.onnx --output_path models/fcn_resnet101.dlc - -``` - -## Quantization of DLC -for quantized INT8_DLC, INT16_DLC model follow attached notebook -``` -cd input/ -snpe-dlc-quantize --input_dlc ../models/fcn_resnet101.dlc --input_list input.txt --axis_quant --output_dlc ../models/fcn_resnet101_quant.dlc -``` - -## Accuracy Analysis -- To check results please run "[fcn_resnet101](fcn_resnet101.ipynb)". -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/04-image-segmentation/fcn_resnet50/fcn_resnet50_optimized.ipynb b/models-for-solutions/04-image-segmentation/fcn_resnet50/fcn_resnet50_optimized.ipynb deleted file mode 100644 index 1f25ad70..00000000 --- a/models-for-solutions/04-image-segmentation/fcn_resnet50/fcn_resnet50_optimized.ipynb +++ /dev/null @@ -1,659 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "33e1ee76-7229-4154-8f68-8ec75ba5edd9", - "metadata": {}, - "source": [ - "# Setting up SDK artifacts" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b2957050-87cc-4b0b-89a5-d05f60b20afb", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/fcn_resnet50_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/fcn_resnet50_quant16_w8a16.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"fcn_resnet50\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #change with your device-id. Use command \"adb devices\" to get devices names.\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "28ee22fe-0a59-4e60-89a3-798d87a440df", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import torchvision.models.segmentation as models\n", - "import torch\n", - "import torch.nn as nn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "62ad7983-0084-47d1-b1fe-1813bd6651e5", - "metadata": {}, - "outputs": [], - "source": [ - "pretrained_model = models.fcn_resnet50(pretrained=True)\n", - "class CustomModel(nn.Module):\n", - " def __init__(self,pretrained_model):\n", - " super(CustomModel,self).__init__()\n", - " self.pretrained_model = pretrained_model\n", - " self.argmax = nn.LogSoftmax(dim=1)\n", - "\n", - " def forward(self,x):\n", - " output_dict = self.pretrained_model(x)\n", - " output = output_dict['out']\n", - " argmax_output = torch.argmax(output,dim=1,keepdim=False)\n", - " return argmax_output\n", - "\n", - "model = CustomModel(pretrained_model)\n", - "input = torch.randn(1,3,400,400)\n", - "output = model(input)\n", - "output.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12c13449-3ee0-413e-a424-2640d81ca997", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models',exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "482d4180-e2ef-496d-8d23-e875ca8a3fae", - "metadata": {}, - "outputs": [], - "source": [ - "dummy_input = torch.randn(1,3, 400, 400).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(model, dummy_input, \"./models/fcn_resnet50.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cf0f7170-1db0-4597-841e-6506547adabc", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import cv2\n", - "import glob\n", - "import numpy as np\n", - "import torch\n", - "from os.path import isfile, join\n", - "import matplotlib.pyplot as plt \n", - "from PIL import Image\n", - "from torchvision import transforms as T" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a07173ba-596b-492a-a5cc-c258348c6fe9", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('input',exist_ok=True)\n", - "os.makedirs('input/dataset',exist_ok=True)" - ] - }, - { - "cell_type": "markdown", - "id": "b17c35fb-5c8b-4e26-9899-2247de1bd3aa", - "metadata": {}, - "source": [ - "## Download dataset in input/dataset/ directory\n", - "\n", - "User needs to download the dataset of their choice to proceed with below accuracy validation scripts. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d34cac77-80a9-4df8-8fe5-d4dcff12fb51", - "metadata": {}, - "outputs": [], - "source": [ - "directory_path = 'input/raw/'\n", - "output_file_path = 'input/input.txt' # The file where the output will be saved\n", - "all_files = os.listdir(directory_path)\n", - "# Filter only the .raw files and create a list of their names\n", - "raw_files = [file for file in all_files if file.endswith('.raw')]\n", - "raw_files = sorted(raw_files)\n", - "# Write the file names to the output file\n", - "with open(output_file_path, 'w') as f:\n", - " c=0\n", - " for raw_file in raw_files:\n", - " f.write(f\"./raw/{raw_file}\\n\")\n", - " c=c+1\n", - "print(f\"File names written to {output_file_path}.\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb1bb5a9-166a-4687-a3cd-62355137da5d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/fcn_resnet50.onnx --output_path models/fcn_resnet50_fp32.dlc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18c383a5-f64d-44c3-8208-a1d5b65e013e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input/\n", - "snpe-dlc-quantize --input_dlc ../models/fcn_resnet50_fp32.dlc --input_list input.txt --axis_quant --output_dlc ../models/fcn_resnet50_quant_w8a8.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "58792696-c7c2-419e-9bb3-56e5213a47ea", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input/\n", - "snpe-dlc-quantize --input_dlc ../models/fcn_resnet50_fp32.dlc --input_list input.txt --axis_quant --output_dlc ../models/fcn_resnet50_quant16_w8a16.dlc --act_bitwidth 16 --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c75dbc88-89b1-4269-8476-afaebf54a8aa", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/fcn_resnet50_fp32.dlc --output_dlc models/fcn_resnet50_fp16.dlc --use_float_io " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19fedfcf-9c0d-4021-91fa-996e7507313e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aa0005f2-de81-42c5-bfa5-3c2f607ec761", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "549a9abe-c0b8-4982-872b-1713b1d9200e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "321c10d4-ef2d-43d1-adbb-6418f77a47db", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e0d07758-dd3b-45e7-ae57-02eb06110537", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=fcn_resnet50_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"fcn_resnet50\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f85ce847-db91-4282-bb0d-1bd1819cbd7b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=fcn_resnet50_quant16_w8a16.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"fcn_resnet50\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "# modified the inputlist.txt. That's the only change\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed45d308-20d9-473a-9b5c-e30fa926eb5b", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "247650d9-d56e-497a-a665-03d94c86ddbf", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "879f4305-618f-4bf6-8478-1439beb79e1e", - "metadata": {}, - "outputs": [], - "source": [ - "def image_overlay(image, segmented_image):\n", - " alpha = 1 # transparency for the original image\n", - " beta = 0.8 # transparency for the segmentation map\n", - " gamma = 0 # scalar added to each sum\n", - " # print(image.size)\n", - " # print(segmented_image.shape)\n", - " segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR)\n", - " image = np.array(image)\n", - " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n", - " cv2.addWeighted(image, alpha, segmented_image, beta, gamma, image)\n", - " return image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6344c5b3-2523-4889-8086-dd6ee81c83ff", - "metadata": {}, - "outputs": [], - "source": [ - "import torchvision.transforms as transforms\n", - "crop_size = 400\n", - "transform = transforms.Compose([\n", - " transforms.Resize((crop_size,crop_size)),\n", - " transforms.ToTensor(),\n", - " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n", - " std=[0.229, 0.224, 0.225])\n", - "])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2383f20d-3197-4658-83d1-5c30550a9020", - "metadata": {}, - "outputs": [], - "source": [ - "label_map = [\n", - " (0, 0, 0), # background\n", - " (128, 0, 0), # aeroplane\n", - " (0, 128, 0), # bicycle\n", - " (128, 128, 0), # bird\n", - " (0, 0, 128), # boat\n", - " (128, 0, 128), # bottle\n", - " (0, 128, 128), # bus \n", - " (128, 128, 128), # car\n", - " (64, 0, 0), # cat\n", - " (192, 0, 0), # chair\n", - " (64, 128, 0), # cow\n", - " (192, 128, 0), # dining table\n", - " (64, 0, 128), # dog\n", - " (192, 0, 128), # horse\n", - " (64, 128, 128), # motorbike\n", - " (192, 128, 128), # person\n", - " (0, 64, 0), # potted plant\n", - " (128, 64, 0), # sheep\n", - " (0, 192, 0), # sofa\n", - " (128, 192, 0), # train\n", - " (0, 64, 128) # tv/monitor\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f4e32bc-8a32-4eb5-ad3b-ff3e4a45c9ae", - "metadata": {}, - "outputs": [], - "source": [ - "def get_segment_labels(image, model, device):\n", - " # transform the image to tensor and load into computation device\n", - " image = transform(image).to(device)\n", - " image = image.unsqueeze(0) # add a batch dimension\n", - " outputs = model(image)\n", - " return outputs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b3c6fbbc-fe9b-46f1-a09d-66677b658c21", - "metadata": {}, - "outputs": [], - "source": [ - "def draw_segmentation_map(outputs):\n", - " labels = outputs.detach().cpu().numpy()\n", - " # create Numpy arrays containing zeros\n", - " # later to be used to fill them with respective red, green, and blue pixels\n", - " red_map = np.zeros_like(labels).astype(np.uint8)\n", - " green_map = np.zeros_like(labels).astype(np.uint8)\n", - " blue_map = np.zeros_like(labels).astype(np.uint8)\n", - " \n", - " for label_num in range(0, len(label_map)):\n", - " index = labels == label_num\n", - " red_map[index] = np.array(label_map)[label_num, 0]\n", - " green_map[index] = np.array(label_map)[label_num, 1]\n", - " blue_map[index] = np.array(label_map)[label_num, 2]\n", - " \n", - " segmentation_map = np.stack([red_map, green_map, blue_map], axis=2)\n", - " return segmentation_map" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15c52e4b-25bc-4023-8719-1ef497c0d60a", - "metadata": {}, - "outputs": [], - "source": [ - "def ImageNames():\n", - " inputlist = open('input/input.txt', 'r')\n", - " Lines = inputlist.readlines()\n", - " count = 0\n", - " imageList = []\n", - " for line in Lines:\n", - " name = line.split(\"preproc_\",1)[1]\n", - " name = name.split('.')[0]\n", - " imageList.append(name)\n", - " count += 1\n", - " return imageList\n", - "imageList = ImageNames()\n", - "print((imageList))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3ba800e6-d036-4889-b5f1-917a34a2c642", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import numpy as np\n", - "device = torch.device('cpu')\n", - "model.eval()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e40e33e-e46b-4291-b1b7-8f9ae603b701", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/model_prediction', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1ad558bd-54b8-4e57-82fb-cafc49da4fc9", - "metadata": {}, - "outputs": [], - "source": [ - "image_dir = # setup dataset path here. \n", - "from PIL import Image\n", - "import cv2\n", - "for i in range(0,len(imageList)):\n", - "\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " # print(image.size)\n", - " outputs = get_segment_labels(image, model, device)\n", - " # get the data from the `out` key\n", - " # outputs = outputs['out']\n", - " # print(type(outputs))\n", - " # print(outputs.shape)\n", - " segmented_image = draw_segmentation_map(outputs[0])\n", - " print(image.size)\n", - " print(segmented_image.shape)\n", - " final_image = image_overlay(image, segmented_image)\n", - " # show the segmented image and save to disk\n", - " # cv2.imshow('Segmented image', final_image)\n", - " # cv2.waitKey(0)\n", - " cv2.imwrite(f\"output/model_prediction/{imageList[i]}.jpg\", final_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3fe377f7-b9a8-4d48-8a3e-4a4c0b537edf", - "metadata": {}, - "outputs": [], - "source": [ - "def PostProc(img_path, out_path,i):\n", - " res = np.fromfile(img_path, dtype=\"float32\")\n", - " res_reshape = res.reshape((1,400,400)).astype(np.float32)\n", - " model_img = torch.from_numpy(res_reshape)\n", - " segmented_image = draw_segmentation_map(model_img[0])\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " final_image = image_overlay(image, segmented_image)\n", - " cv2.imwrite(f\"{out_path}\", final_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5d407324-addc-4d5b-b4c6-601b4f155a1b", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/test_results', exist_ok=True)\n", - "os.makedirs('output/test_results/32b_arm', exist_ok=True)\n", - "os.makedirs('output/test_results/8b_dsp', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb5d05b3-dd99-4a58-8c11-108b2c58abab", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_32b_CPU/\"\n", - "image_dir = # mention dataset path here. \n", - "import cv2\n", - "import os\n", - "import numpy as np\n", - "import torch\n", - "from PIL import Image\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/526.raw'\n", - " out_path = 'output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f0cf6d4-841d-4ed7-bfce-4b3b75be0bd1", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_8b_DSP/\"\n", - "\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/526.raw'\n", - " out_path = 'output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "467eb305-e30a-4792-8d04-518cdf7cb6dc", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "fig = plt.figure(figsize=(30, 100));\n", - "import cv2\n", - "for i in range(0, 5):\n", - " \n", - " original = cv2.imread(''+imageList[i]+'.jpg')\n", - " original = cv2.resize(original, (513,513))\n", - " original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)\n", - " ax = fig.add_subplot(28,4,4*i+1);\n", - " plt.imshow(original,cmap='gray');\n", - " ax.set_title('original image\\n');\n", - " ax.axis('off');\n", - " \n", - " pth_inf = cv2.imread('output/model_prediction/'+imageList[i]+'.jpg')\n", - " pth_inf = cv2.resize(pth_inf, (513,513))\n", - " # pth_inf = cv2.cvtColor(pth_inf, cv2.COLOR_BGR2RGB)\n", - " # pth_overlay = image_overlay(original, pth_inf)\n", - " ax = fig.add_subplot(28,4,4*i+2);\n", - " plt.imshow(pth_inf,cmap='gray');\n", - " ax.set_title('pth output\\n');\n", - " ax.axis('off');\n", - "\n", - "\n", - " arm_fp32= cv2.imread('output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png')\n", - " # arm_fp32 = cv2.cvtColor(arm_fp32, cv2.COLOR_BGR2RGB)\n", - " # fp32_overlay = image_overlay(original, arm_fp32)\n", - " arm_fp32 = cv2.resize(arm_fp32, (513,513))\n", - " ax = fig.add_subplot(28,4,4*i+3);\n", - " plt.imshow(arm_fp32,cmap='gray');\n", - " ax.set_title('fp32 on ARM\\n');\n", - " ax.axis('off');\n", - "\n", - " dsp= cv2.imread('output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png')\n", - " # dsp_int8 = cv2.cvtColor(dsp_int8, cv2.COLOR_BGR2RGB)\n", - " # int8_overlay = image_overlay(original, dsp_int8)\n", - " # print(dsp)\n", - " ax = fig.add_subplot(28,4,4*i+4);\n", - " plt.imshow(dsp,cmap='gray');\n", - " ax.set_title('int8 on DSP\\n');\n", - " ax.axis('off');" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b1b7af3c-e399-449c-a28c-4ba568dcab0d", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/04-image-segmentation/fcn_resnet50/fcn_resnet50_original.ipynb b/models-for-solutions/04-image-segmentation/fcn_resnet50/fcn_resnet50_original.ipynb deleted file mode 100644 index f97e536a..00000000 --- a/models-for-solutions/04-image-segmentation/fcn_resnet50/fcn_resnet50_original.ipynb +++ /dev/null @@ -1,641 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "33e1ee76-7229-4154-8f68-8ec75ba5edd9", - "metadata": {}, - "source": [ - "# Setting up SDK artifacts" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b2957050-87cc-4b0b-89a5-d05f60b20afb", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/snpe/2.15.1.230926/\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/fcn_resnet50_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/fcn_resnet50_quant16_w8a16.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"fcn_resnet50\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #change with your device-id. Use command \"adb devices\" to get devices names.\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "28ee22fe-0a59-4e60-89a3-798d87a440df", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import torchvision.models.segmentation as models\n", - "import torch\n", - "import torch.nn as nn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "62ad7983-0084-47d1-b1fe-1813bd6651e5", - "metadata": {}, - "outputs": [], - "source": [ - "model = models.fcn_resnet50(pretrained=True)\n", - "input = torch.randn(1,3,400,400)\n", - "output = model(input)\n", - "output['out'].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12c13449-3ee0-413e-a424-2640d81ca997", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models',exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "482d4180-e2ef-496d-8d23-e875ca8a3fae", - "metadata": {}, - "outputs": [], - "source": [ - "dummy_input = torch.randn(1,3, 400, 400).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(model, dummy_input, \"./models/fcn_resnet50.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cf0f7170-1db0-4597-841e-6506547adabc", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import cv2\n", - "import glob\n", - "import numpy as np\n", - "import torch\n", - "from os.path import isfile, join\n", - "import matplotlib.pyplot as plt \n", - "from PIL import Image\n", - "from torchvision import transforms as T" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a07173ba-596b-492a-a5cc-c258348c6fe9", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('input',exist_ok=True)\n", - "os.makedirs('input/dataset',exist_ok=True)" - ] - }, - { - "cell_type": "markdown", - "id": "b17c35fb-5c8b-4e26-9899-2247de1bd3aa", - "metadata": {}, - "source": [ - "## Download dataset in input/dataset/ directory\n", - "\n", - "User needs to download dataset of their choice to verify accuracy steps given below. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d34cac77-80a9-4df8-8fe5-d4dcff12fb51", - "metadata": {}, - "outputs": [], - "source": [ - "directory_path = 'input/raw/'\n", - "output_file_path = 'input/input.txt' # The file where the output will be saved\n", - "all_files = os.listdir(directory_path)\n", - "# Filter only the .raw files and create a list of their names\n", - "raw_files = [file for file in all_files if file.endswith('.raw')]\n", - "raw_files = sorted(raw_files)\n", - "# Write the file names to the output file\n", - "with open(output_file_path, 'w') as f:\n", - " c=0\n", - " for raw_file in raw_files:\n", - " f.write(f\"./raw/{raw_file}\\n\")\n", - " c=c+1\n", - "print(f\"File names written to {output_file_path}.\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb1bb5a9-166a-4687-a3cd-62355137da5d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/fcn_resnet50.onnx --output_path models/fcn_resnet50_fp32.dlc" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18c383a5-f64d-44c3-8208-a1d5b65e013e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input/\n", - "snpe-dlc-quantize --input_dlc ../models/fcn_resnet50_fp32.dlc --input_list input.txt --axis_quant --output_dlc ../models/fcn_resnet50_quant_w8a8.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "58792696-c7c2-419e-9bb3-56e5213a47ea", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input/\n", - "snpe-dlc-quantize --input_dlc ../models/fcn_resnet50_fp32.dlc --input_list input.txt --axis_quant --output_dlc ../models/fcn_resnet50_quant16_w8a16.dlc --act_bitwidth 16 --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c75dbc88-89b1-4269-8476-afaebf54a8aa", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/fcn_resnet50_fp32.dlc --output_dlc models/fcn_resnet50_fp16.dlc --use_float_io " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19fedfcf-9c0d-4021-91fa-996e7507313e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aa0005f2-de81-42c5-bfa5-3c2f607ec761", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "549a9abe-c0b8-4982-872b-1713b1d9200e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "321c10d4-ef2d-43d1-adbb-6418f77a47db", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e0d07758-dd3b-45e7-ae57-02eb06110537", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=fcn_resnet50_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"fcn_resnet50\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f85ce847-db91-4282-bb0d-1bd1819cbd7b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=fcn_resnet50_quant16_w8a16.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"fcn_resnet50\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "# modified the inputlist.txt. That's the only change\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed45d308-20d9-473a-9b5c-e30fa926eb5b", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "247650d9-d56e-497a-a665-03d94c86ddbf", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "879f4305-618f-4bf6-8478-1439beb79e1e", - "metadata": {}, - "outputs": [], - "source": [ - "def image_overlay(image, segmented_image):\n", - " alpha = 1 # transparency for the original image\n", - " beta = 0.8 # transparency for the segmentation map\n", - " gamma = 0 # scalar added to each sum\n", - " # print(image.size)\n", - " # print(segmented_image.shape)\n", - " segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR)\n", - " image = np.array(image)\n", - " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n", - " cv2.addWeighted(image, alpha, segmented_image, beta, gamma, image)\n", - " return image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6344c5b3-2523-4889-8086-dd6ee81c83ff", - "metadata": {}, - "outputs": [], - "source": [ - "import torchvision.transforms as transforms\n", - "crop_size = 400\n", - "transform = transforms.Compose([\n", - " transforms.Resize((crop_size,crop_size)),\n", - " transforms.ToTensor(),\n", - " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n", - " std=[0.229, 0.224, 0.225])\n", - "])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2383f20d-3197-4658-83d1-5c30550a9020", - "metadata": {}, - "outputs": [], - "source": [ - "label_map = [\n", - " (0, 0, 0), # background\n", - " (128, 0, 0), # aeroplane\n", - " (0, 128, 0), # bicycle\n", - " (128, 128, 0), # bird\n", - " (0, 0, 128), # boat\n", - " (128, 0, 128), # bottle\n", - " (0, 128, 128), # bus \n", - " (128, 128, 128), # car\n", - " (64, 0, 0), # cat\n", - " (192, 0, 0), # chair\n", - " (64, 128, 0), # cow\n", - " (192, 128, 0), # dining table\n", - " (64, 0, 128), # dog\n", - " (192, 0, 128), # horse\n", - " (64, 128, 128), # motorbike\n", - " (192, 128, 128), # person\n", - " (0, 64, 0), # potted plant\n", - " (128, 64, 0), # sheep\n", - " (0, 192, 0), # sofa\n", - " (128, 192, 0), # train\n", - " (0, 64, 128) # tv/monitor\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f4e32bc-8a32-4eb5-ad3b-ff3e4a45c9ae", - "metadata": {}, - "outputs": [], - "source": [ - "def get_segment_labels(image, model, device):\n", - " # transform the image to tensor and load into computation device\n", - " image = transform(image).to(device)\n", - " image = image.unsqueeze(0) # add a batch dimension\n", - " outputs = model(image)\n", - " return outputs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b3c6fbbc-fe9b-46f1-a09d-66677b658c21", - "metadata": {}, - "outputs": [], - "source": [ - "def draw_segmentation_map(outputs):\n", - " labels = outputs.detach().cpu().numpy()\n", - " # create Numpy arrays containing zeros\n", - " # later to be used to fill them with respective red, green, and blue pixels\n", - " red_map = np.zeros_like(labels).astype(np.uint8)\n", - " green_map = np.zeros_like(labels).astype(np.uint8)\n", - " blue_map = np.zeros_like(labels).astype(np.uint8)\n", - " \n", - " for label_num in range(0, len(label_map)):\n", - " index = labels == label_num\n", - " red_map[index] = np.array(label_map)[label_num, 0]\n", - " green_map[index] = np.array(label_map)[label_num, 1]\n", - " blue_map[index] = np.array(label_map)[label_num, 2]\n", - " \n", - " segmentation_map = np.stack([red_map, green_map, blue_map], axis=2)\n", - " return segmentation_map" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15c52e4b-25bc-4023-8719-1ef497c0d60a", - "metadata": {}, - "outputs": [], - "source": [ - "def ImageNames():\n", - " inputlist = open('input/input.txt', 'r')\n", - " Lines = inputlist.readlines()\n", - " count = 0\n", - " imageList = []\n", - " for line in Lines:\n", - " name = line.split(\"preproc_\",1)[1]\n", - " name = name.split('.')[0]\n", - " imageList.append(name)\n", - " count += 1\n", - " return imageList\n", - "imageList = ImageNames()\n", - "print((imageList))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3ba800e6-d036-4889-b5f1-917a34a2c642", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import numpy as np\n", - "device = torch.device('cpu')\n", - "model.eval()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e40e33e-e46b-4291-b1b7-8f9ae603b701", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/model_prediction', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1ad558bd-54b8-4e57-82fb-cafc49da4fc9", - "metadata": {}, - "outputs": [], - "source": [ - "image_dir = # set this path to dataset path\n", - "from PIL import Image\n", - "import cv2\n", - "for i in range(0,len(imageList)):\n", - "\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " # print(image.size)\n", - " outputs = get_segment_labels(image, model, device)\n", - " # get the data from the `out` key\n", - " outputs = outputs['out']\n", - " # print(type(outputs))\n", - " # print(outputs.shape)\n", - " argmax_output = torch.argmax(outputs,dim=1,keepdim=False)\n", - " segmented_image = draw_segmentation_map(argmax_output[0])\n", - " print(image.size)\n", - " print(segmented_image.shape)\n", - " final_image = image_overlay(image, segmented_image)\n", - " # show the segmented image and save to disk\n", - " # cv2.imshow('Segmented image', final_image)\n", - " # cv2.waitKey(0)\n", - " cv2.imwrite(f\"output/model_prediction/{imageList[i]}.jpg\", final_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3fe377f7-b9a8-4d48-8a3e-4a4c0b537edf", - "metadata": {}, - "outputs": [], - "source": [ - "def PostProc(img_path, out_path,i):\n", - " res = np.fromfile(img_path, dtype=\"float32\")\n", - " res_reshape = res.reshape((1,400,400,21)).astype(np.float32)\n", - " model_img = torch.from_numpy(res_reshape)\n", - " argmax_output = torch.argmax(model_img,dim=3,keepdim=False)\n", - " \n", - " segmented_image = draw_segmentation_map(argmax_output[0])\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " final_image = image_overlay(image, segmented_image)\n", - " cv2.imwrite(f\"{out_path}\", final_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5d407324-addc-4d5b-b4c6-601b4f155a1b", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/test_results', exist_ok=True)\n", - "os.makedirs('output/test_results/32b_arm', exist_ok=True)\n", - "os.makedirs('output/test_results/8b_dsp', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb5d05b3-dd99-4a58-8c11-108b2c58abab", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_32b_CPU/\"\n", - "image_dir = # set this path to dataset path\n", - "import cv2\n", - "import os\n", - "import numpy as np\n", - "import torch\n", - "from PIL import Image\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/542.raw'\n", - " out_path = 'output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f0cf6d4-841d-4ed7-bfce-4b3b75be0bd1", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_8b_DSP/\"\n", - "\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/542.raw'\n", - " out_path = 'output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "467eb305-e30a-4792-8d04-518cdf7cb6dc", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "fig = plt.figure(figsize=(30, 100));\n", - "import cv2\n", - "for i in range(0, 5):\n", - " \n", - " original = cv2.imread(''+imageList[i]+'.jpg')\n", - " original = cv2.resize(original, (513,513))\n", - " original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)\n", - " ax = fig.add_subplot(28,4,4*i+1);\n", - " plt.imshow(original,cmap='gray');\n", - " ax.set_title('original image\\n');\n", - " ax.axis('off');\n", - " \n", - " pth_inf = cv2.imread('output/model_prediction/'+imageList[i]+'.jpg')\n", - " pth_inf = cv2.resize(pth_inf, (513,513))\n", - " # pth_inf = cv2.cvtColor(pth_inf, cv2.COLOR_BGR2RGB)\n", - " # pth_overlay = image_overlay(original, pth_inf)\n", - " ax = fig.add_subplot(28,4,4*i+2);\n", - " plt.imshow(pth_inf,cmap='gray');\n", - " ax.set_title('pth output\\n');\n", - " ax.axis('off');\n", - "\n", - "\n", - " arm_fp32= cv2.imread('output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png')\n", - " # arm_fp32 = cv2.cvtColor(arm_fp32, cv2.COLOR_BGR2RGB)\n", - " # fp32_overlay = image_overlay(original, arm_fp32)\n", - " arm_fp32 = cv2.resize(arm_fp32, (513,513))\n", - " ax = fig.add_subplot(28,4,4*i+3);\n", - " plt.imshow(arm_fp32,cmap='gray');\n", - " ax.set_title('fp32 on ARM\\n');\n", - " ax.axis('off');\n", - "\n", - " dsp= cv2.imread('output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png')\n", - " # dsp_int8 = cv2.cvtColor(dsp_int8, cv2.COLOR_BGR2RGB)\n", - " # int8_overlay = image_overlay(original, dsp_int8)\n", - " # print(dsp)\n", - " ax = fig.add_subplot(28,4,4*i+4);\n", - " plt.imshow(dsp,cmap='gray');\n", - " ax.set_title('int8 on DSP\\n');\n", - " ax.axis('off');" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/04-image-segmentation/fcn_resnet50/readme.md b/models-for-solutions/04-image-segmentation/fcn_resnet50/readme.md deleted file mode 100644 index 3dbb9781..00000000 --- a/models-for-solutions/04-image-segmentation/fcn_resnet50/readme.md +++ /dev/null @@ -1,48 +0,0 @@ -# Semantic segmentation fcn_resnet50 - - -## Pre-requisites - -* Please follow the instructions for setting up Qualcomm Neural Processing SDK using the [link] (https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) provided. -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install OpenCV ```pip install cv2``` -- Install mxnet ```pip install mxnet``` - -## How to get the model ? -Here We have given [original-notebook](fcn_resnet50_original.ipynb) for original FCN_RESNET50 model from pytorch and We have optimized this model for better performance on DSP and you can see [optimized-notebook](fcn_resnet50_optimized.ipynb) of optimized model. -For demo we have used optimized-model. - -for ONNX model follow attached notebook - -``` -import os -dummy_input = torch.randn(1,3, 400, 400).type(torch.FloatTensor).to('cpu') -torch.onnx.export(model, dummy_input, "./models/fcn_resnet50.onnx",opset_version=11) - -``` -## Convert model to DLC - -for fp32_DLC and FP16_DLC model follow attached notebook - -``` -snpe-onnx-to-dlc --input_network models/fcn_resnet50.onnx --output_path models/fcn_resnet50.dlc - -``` - -## Quantization of DLC -for quantized INT8_DLC, INT16_DLC model follow attached notebook -``` -cd input/ -snpe-dlc-quantize --input_dlc ../models/fcn_resnet50.dlc --input_list input.txt --axis_quant --output_dlc ../models/fcn_resnet50_quant.dlc -``` -## Accuracy Analysis -- To check results please run "[notebook](fcn_resnet50_optimized.ipynb)". -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/04-image-segmentation/lraspp/lraspp_mobilenetv3.ipynb b/models-for-solutions/04-image-segmentation/lraspp/lraspp_mobilenetv3.ipynb deleted file mode 100644 index 35b7f365..00000000 --- a/models-for-solutions/04-image-segmentation/lraspp/lraspp_mobilenetv3.ipynb +++ /dev/null @@ -1,664 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "33e1ee76-7229-4154-8f68-8ec75ba5edd9", - "metadata": {}, - "source": [ - "## Import necessary libraries and load model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "97e0ed81-13cb-4ad7-a14a-a9b842da8765", - "metadata": {}, - "outputs": [], - "source": [ - "## Give appropriate permission to the directory \"FOLDER_WITH_ARTIFACTS\" you are working with\n", - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"input/raw\"\n", - "os.environ['DLC32']=\"models/lraspp_fp32.dlc\"\n", - "os.environ['DLC8']=\"models/lraspp_w8a16.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"input/input.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"lraspp\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #change with your device-id. Use command \"adb devices\" to get devices names.\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ae45ab72-b611-404b-8fd6-d7b640298e00", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import torchvision.models.segmentation as models\n", - "import torch\n", - "import torch.nn as nn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "62ad7983-0084-47d1-b1fe-1813bd6651e5", - "metadata": {}, - "outputs": [], - "source": [ - "pretrained_model = models.lraspp_mobilenet_v3_large(pretrained=True)\n", - "class CustomModel(nn.Module):\n", - " def __init__(self,pretrained_model):\n", - " super(CustomModel,self).__init__()\n", - " self.pretrained_model = pretrained_model\n", - " self.argmax = nn.LogSoftmax(dim=1)\n", - "\n", - " def forward(self,x):\n", - " output_dict = self.pretrained_model(x)\n", - " output = output_dict['out']\n", - " argmax_output = torch.argmax(output,dim=1,keepdim=False)\n", - " return argmax_output\n", - "\n", - "model = CustomModel(pretrained_model)\n", - "input = torch.randn(1,3,400,400)\n", - "output = model(input)\n", - "output.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12c13449-3ee0-413e-a424-2640d81ca997", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('models',exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "482d4180-e2ef-496d-8d23-e875ca8a3fae", - "metadata": {}, - "outputs": [], - "source": [ - "dummy_input = torch.randn(1,3, 400, 400).type(torch.FloatTensor).to('cpu')\n", - "torch.onnx.export(model, dummy_input, \"./models/lraspp.onnx\",opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cf0f7170-1db0-4597-841e-6506547adabc", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import cv2\n", - "import glob\n", - "import numpy as np\n", - "import torch\n", - "from os.path import isfile, join\n", - "import matplotlib.pyplot as plt \n", - "from PIL import Image\n", - "from torchvision import transforms as T" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a07173ba-596b-492a-a5cc-c258348c6fe9", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('input',exist_ok=True)\n", - "os.makedirs('input/dataset',exist_ok=True)" - ] - }, - { - "cell_type": "markdown", - "id": "b17c35fb-5c8b-4e26-9899-2247de1bd3aa", - "metadata": {}, - "source": [ - "## Download dataset in input/dataset/ directory\n", - "\n", - "User needs to download dataset of their choice before proceeding further" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d34cac77-80a9-4df8-8fe5-d4dcff12fb51", - "metadata": {}, - "outputs": [], - "source": [ - "directory_path = 'input/raw/'\n", - "output_file_path = 'input/input.txt' # The file where the output will be saved\n", - "\n", - "all_files = os.listdir(directory_path)\n", - "\n", - "# Filter only the .raw files and create a list of their names\n", - "raw_files = [file for file in all_files if file.endswith('.raw')]\n", - "raw_files = sorted(raw_files)\n", - "# Write the file names to the output file\n", - "with open(output_file_path, 'w') as f:\n", - " c=0\n", - " for raw_file in raw_files:\n", - " f.write(f\"./raw/{raw_file}\\n\")\n", - " c=c+1\n", - " \n", - "\n", - "print(f\"File names written to {output_file_path}.\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb1bb5a9-166a-4687-a3cd-62355137da5d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc --input_network models/lraspp.onnx --output_path models/lraspp_fp32.dlc\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18c383a5-f64d-44c3-8208-a1d5b65e013e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input/\n", - "snpe-dlc-quantize --input_dlc ../models/lraspp_fp32.dlc --input_list input.txt --axis_quant --output_dlc ../models/lraspp_mobilenet_v3_large_quant_w8a8.dlc --enable_htp --htp_socs sm8550" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "58792696-c7c2-419e-9bb3-56e5213a47ea", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "cd input/\n", - "snpe-dlc-quantize --input_dlc ../models/lraspp_fp32.dlc --input_list input.txt --axis_quant --output_dlc ../models/lraspp_w8a16.dlc --act_bitwidth 16 --enable_htp " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c75dbc88-89b1-4269-8476-afaebf54a8aa", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/lraspp_fp32.dlc --output_dlc models/lraspp_fp16.dlc --use_float_io " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19fedfcf-9c0d-4021-91fa-996e7507313e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aa0005f2-de81-42c5-bfa5-3c2f607ec761", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "549a9abe-c0b8-4982-872b-1713b1d9200e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "321c10d4-ef2d-43d1-adbb-6418f77a47db", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "#find ./raw -name *.raw > list.txt\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLC32 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLC8 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $RAW_FILE_FOLDER /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e0d07758-dd3b-45e7-ae57-02eb06110537", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=lraspp_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"lraspp\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list input.txt --output_dir $OUTPUT_FOLDER " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f85ce847-db91-4282-bb0d-1bd1819cbd7b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_8b_DSP\n", - "export OUTPUT_DLC_QUANTIZED8=lraspp_w8a16.dlc\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export ONDEVICE_FOLDER=\"lraspp\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "# modified the inputlist.txt. That's the only change\n", - "snpe-net-run --container $OUTPUT_DLC_QUANTIZED8 --input_list input.txt --output_dir $OUTPUT_FOLDER --use_dsp " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8fe729b0-902e-4f41-b17a-f11dfeba0389", - "metadata": {}, - "outputs": [], - "source": [ - "rm -rf output/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ed45d308-20d9-473a-9b5c-e30fa926eb5b", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "247650d9-d56e-497a-a665-03d94c86ddbf", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_8b_DSP output/OUTPUT_8b_DSP\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU output/OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "879f4305-618f-4bf6-8478-1439beb79e1e", - "metadata": {}, - "outputs": [], - "source": [ - "def image_overlay(image, segmented_image):\n", - " alpha = 1 # transparency for the original image\n", - " beta = 0.8 # transparency for the segmentation map\n", - " gamma = 0 # scalar added to each sum\n", - " # print(image.size)\n", - " # print(segmented_image.shape)\n", - " segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR)\n", - " image = np.array(image)\n", - " image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n", - " cv2.addWeighted(image, alpha, segmented_image, beta, gamma, image)\n", - " return image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6344c5b3-2523-4889-8086-dd6ee81c83ff", - "metadata": {}, - "outputs": [], - "source": [ - "import torchvision.transforms as transforms\n", - "crop_size = 400\n", - "transform = transforms.Compose([\n", - " transforms.Resize((crop_size,crop_size)),\n", - " transforms.ToTensor(),\n", - " transforms.Normalize(mean=[0.485, 0.456, 0.406],\n", - " std=[0.229, 0.224, 0.225])\n", - "])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2383f20d-3197-4658-83d1-5c30550a9020", - "metadata": {}, - "outputs": [], - "source": [ - "label_map = [\n", - " (0, 0, 0), # background\n", - " (128, 0, 0), # aeroplane\n", - " (0, 128, 0), # bicycle\n", - " (128, 128, 0), # bird\n", - " (0, 0, 128), # boat\n", - " (128, 0, 128), # bottle\n", - " (0, 128, 128), # bus \n", - " (128, 128, 128), # car\n", - " (64, 0, 0), # cat\n", - " (192, 0, 0), # chair\n", - " (64, 128, 0), # cow\n", - " (192, 128, 0), # dining table\n", - " (64, 0, 128), # dog\n", - " (192, 0, 128), # horse\n", - " (64, 128, 128), # motorbike\n", - " (192, 128, 128), # person\n", - " (0, 64, 0), # potted plant\n", - " (128, 64, 0), # sheep\n", - " (0, 192, 0), # sofa\n", - " (128, 192, 0), # train\n", - " (0, 64, 128) # tv/monitor\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f4e32bc-8a32-4eb5-ad3b-ff3e4a45c9ae", - "metadata": {}, - "outputs": [], - "source": [ - "def get_segment_labels(image, model, device):\n", - " # transform the image to tensor and load into computation device\n", - " image = transform(image).to(device)\n", - " image = image.unsqueeze(0) # add a batch dimension\n", - " outputs = model(image)\n", - " return outputs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b3c6fbbc-fe9b-46f1-a09d-66677b658c21", - "metadata": {}, - "outputs": [], - "source": [ - "def draw_segmentation_map(outputs):\n", - " labels = outputs.detach().cpu().numpy()\n", - " # create Numpy arrays containing zeros\n", - " # later to be used to fill them with respective red, green, and blue pixels\n", - " red_map = np.zeros_like(labels).astype(np.uint8)\n", - " green_map = np.zeros_like(labels).astype(np.uint8)\n", - " blue_map = np.zeros_like(labels).astype(np.uint8)\n", - " \n", - " for label_num in range(0, len(label_map)):\n", - " index = labels == label_num\n", - " red_map[index] = np.array(label_map)[label_num, 0]\n", - " green_map[index] = np.array(label_map)[label_num, 1]\n", - " blue_map[index] = np.array(label_map)[label_num, 2]\n", - " \n", - " segmentation_map = np.stack([red_map, green_map, blue_map], axis=2)\n", - " return segmentation_map" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15c52e4b-25bc-4023-8719-1ef497c0d60a", - "metadata": {}, - "outputs": [], - "source": [ - "def ImageNames():\n", - " inputlist = open('input/input.txt', 'r')\n", - " Lines = inputlist.readlines()\n", - " count = 0\n", - " imageList = []\n", - " for line in Lines:\n", - " name = line.split(\"preproc_\",1)[1]\n", - " name = name.split('.')[0]\n", - " imageList.append(name)\n", - " count += 1\n", - " return imageList\n", - "imageList = ImageNames()\n", - "print((imageList))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3ba800e6-d036-4889-b5f1-917a34a2c642", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import numpy as np\n", - "device = torch.device('cpu')\n", - "model.eval()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e40e33e-e46b-4291-b1b7-8f9ae603b701", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/model_prediction', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1ad558bd-54b8-4e57-82fb-cafc49da4fc9", - "metadata": {}, - "outputs": [], - "source": [ - "image_dir = # mention dataset path here\n", - "from PIL import Image\n", - "import cv2\n", - "for i in range(0,len(imageList)):\n", - "\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " # print(image.size)\n", - " outputs = get_segment_labels(image, model, device)\n", - " # get the data from the `out` key\n", - " # outputs = outputs['out']\n", - " # print(type(outputs))\n", - " # print(outputs.shape)\n", - " segmented_image = draw_segmentation_map(outputs[0])\n", - " print(image.size)\n", - " print(segmented_image.shape)\n", - " final_image = image_overlay(image, segmented_image)\n", - " # show the segmented image and save to disk\n", - " # cv2.imshow('Segmented image', final_image)\n", - " # cv2.waitKey(0)\n", - " cv2.imwrite(f\"output/model_prediction/{imageList[i]}.jpg\", final_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3fe377f7-b9a8-4d48-8a3e-4a4c0b537edf", - "metadata": {}, - "outputs": [], - "source": [ - "def PostProc(img_path, out_path,i):\n", - " res = np.fromfile(img_path, dtype=\"float32\")\n", - " res_reshape = res.reshape((1,400,400)).astype(np.float32)\n", - " model_img = torch.from_numpy(res_reshape)\n", - " segmented_image = draw_segmentation_map(model_img[0])\n", - " image = Image.open(image_dir+imageList[i]+'.jpg')\n", - " # do forward pass and get the output dictionary\n", - " image = image.resize((crop_size,crop_size))\n", - " final_image = image_overlay(image, segmented_image)\n", - " cv2.imwrite(f\"{out_path}\", final_image) " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5d407324-addc-4d5b-b4c6-601b4f155a1b", - "metadata": {}, - "outputs": [], - "source": [ - "os.makedirs('output/test_results', exist_ok=True)\n", - "os.makedirs('output/test_results/32b_arm', exist_ok=True)\n", - "os.makedirs('output/test_results/8b_dsp', exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb5d05b3-dd99-4a58-8c11-108b2c58abab", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_32b_CPU/\"\n", - "image_dir = # mention dataset path here\n", - "import cv2\n", - "import os\n", - "import numpy as np\n", - "import torch\n", - "from PIL import Image\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/732.raw'\n", - " out_path = 'output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f0cf6d4-841d-4ed7-bfce-4b3b75be0bd1", - "metadata": {}, - "outputs": [], - "source": [ - "test_images_dir = \"output/OUTPUT_8b_DSP/\"\n", - "for i in range(0,len(imageList)):\n", - " img_path = os.path.join(test_images_dir, 'Result_')\n", - " img_path = img_path+str(i)+'/732.raw'\n", - " out_path = 'output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png'\n", - " PostProc(img_path, out_path,i)\n", - " i = i +1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "467eb305-e30a-4792-8d04-518cdf7cb6dc", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "fig = plt.figure(figsize=(30, 100));\n", - "import cv2\n", - "for i in range(0, 5):\n", - " \n", - " original = cv2.imread(''+imageList[i]+'.jpg')\n", - " original = cv2.resize(original, (513,513))\n", - " original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)\n", - " ax = fig.add_subplot(28,4,4*i+1);\n", - " plt.imshow(original,cmap='gray');\n", - " ax.set_title('original image\\n');\n", - " ax.axis('off');\n", - " \n", - " pth_inf = cv2.imread('output/model_prediction/'+imageList[i]+'.jpg')\n", - " pth_inf = cv2.resize(pth_inf, (513,513))\n", - " # pth_inf = cv2.cvtColor(pth_inf, cv2.COLOR_BGR2RGB)\n", - " # pth_overlay = image_overlay(original, pth_inf)\n", - " ax = fig.add_subplot(28,4,4*i+2);\n", - " plt.imshow(pth_inf,cmap='gray');\n", - " ax.set_title('pth output\\n');\n", - " ax.axis('off');\n", - "\n", - "\n", - " arm_fp32= cv2.imread('output/test_results/32b_arm/'+imageList[i]+'_prediction_32b_arm.png')\n", - " # arm_fp32 = cv2.cvtColor(arm_fp32, cv2.COLOR_BGR2RGB)\n", - " # fp32_overlay = image_overlay(original, arm_fp32)\n", - " arm_fp32 = cv2.resize(arm_fp32, (513,513))\n", - " ax = fig.add_subplot(28,4,4*i+3);\n", - " plt.imshow(arm_fp32,cmap='gray');\n", - " ax.set_title('fp32 on ARM\\n');\n", - " ax.axis('off');\n", - "\n", - " dsp= cv2.imread('output/test_results/8b_dsp/'+imageList[i]+'_prediction_8b_dsp.png')\n", - " # dsp_int8 = cv2.cvtColor(dsp_int8, cv2.COLOR_BGR2RGB)\n", - " # int8_overlay = image_overlay(original, dsp_int8)\n", - " # print(dsp)\n", - " ax = fig.add_subplot(28,4,4*i+4);\n", - " plt.imshow(dsp,cmap='gray');\n", - " ax.set_title('int8 on DSP\\n');\n", - " ax.axis('off');" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/04-image-segmentation/lraspp/readme.md b/models-for-solutions/04-image-segmentation/lraspp/readme.md deleted file mode 100644 index 66653441..00000000 --- a/models-for-solutions/04-image-segmentation/lraspp/readme.md +++ /dev/null @@ -1,53 +0,0 @@ -# Semantic segmentation LRASPP_Mobilenetv3_large - - -## Pre-requisites - -* Please follow the instructions for setting up Qualcomm Neural Processing SDK using the [link] (https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) provided. -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install OpenCV ```pip install cv2``` -- Install mxnet ```pip install mxnet``` - - -## How to get the model ? - -for ONNX model follow attached notebook -- You Need to change 2 layer Hardsigmoid and hardswish Layer(It'll Take 10 Minutes) -- https://github.com/quic/qidk/tree/master/Model-Enablement/Model-Conversion-Layer-Replacement -- Follow The above Link to add CustomHardSigmoid and CustomHardswish Layer -``` -import os -dummy_input = torch.randn(1,3, 400, 400).type(torch.FloatTensor).to('cpu') -torch.onnx.export(model, dummy_input, "./models/lraspp.onnx",opset_version=11) - - -``` -## Convert model to DLC - -for fp32_DLC and FP16_DLC model follow attached notebook -``` -snpe-onnx-to-dlc --input_network models/lraspp.onnx --output_path models/lraspp_fp32.dlc - -``` - -## Quantization of DLC -for quantized INT8_DLC, INT16_DLC model follow attached notebook - -``` -cd input/ -snpe-dlc-quantize --input_dlc ../models/lraspp_fp32.dlc --input_list input.txt --axis_quant --output_dlc ../models/lraspp_quant_w8a8.dlc - -``` - -## Accuracy Analysis - -- To check results please run "[LRASPP_Mobilenetv3_large](lraspp_mobilenetv3.ipynb)". -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - -###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/05-nlp-nlu/Albert/README.md b/models-for-solutions/05-nlp-nlu/Albert/README.md deleted file mode 100644 index 1cc69a31..00000000 --- a/models-for-solutions/05-nlp-nlu/Albert/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# "Question Answering" using Alberta - -| Field | Description | -| --- | --- | -| Model Name | Alberta | -| DNN Framwork | ONNX | -| Public Repo | https://huggingface.co/docs/transformers/model_doc/albert | -| Paper | https://arxiv.org/abs/1909.11942 | -| DLC Number of Inputs | 3 | -| DLC Input Ids Dimension | (1,384) | -| DLC Attention Mask Dimension | (1,384) | -| DLC Token Type Ids | (1,384) | -| Pre-Processing | Use Model Specific Tokenizer | -| Post-Processing | Again Use Model Specific Tokenizer to Post Process the Output | - -## Pre-Requisites - -- Qualcomm® Neural Processing SDK setup should be completed by following the guide [here](https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install optimum ```pip install optimum```. - -## How to get the model ? - -For this demo, you can directly get the [onnx model](Models/onnx-model/model.onnx) from the directory onnx_model or you can generate it from this [jupyter_notebook](generating_model). - - -## Convert model to DLC - -- Convert the onnx model to DLC with below command. Below, command will also fix the input dimension for the dlc. - -```python -snpe-onnx-to-dlc -i Models/onnx-model/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o Models/dlc/alberta.dlc -``` - -## Quantization of DLC -- Quantization can improve model performance in terms of latency and make the model light weight. -- Before Running this command make sure you've created the raw file and the list.txt -```python -snpe-dlc-graph-prepare --input_dlc Models/dlc/alberta.dlc --input_list tf_raw_list.txt --output_dlc Models/dlc/aleberta_int.dlc -``` - -# Accuracy analysis -- To check accuracy please run "accuracy_analyzer.ipynb" a jupyter notebook present in accuracy folder. -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - - -# References - -1. https://arxiv.org/abs/1909.11942 -2. https://huggingface.co/docs/transformers/model_doc/albert - - -###### *Qualcomm Neural Processing SDK and Snapdragon are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/05-nlp-nlu/Albert/accuracy_analyzer.ipynb b/models-for-solutions/05-nlp-nlu/Albert/accuracy_analyzer.ipynb deleted file mode 100644 index 93564ee7..00000000 --- a/models-for-solutions/05-nlp-nlu/Albert/accuracy_analyzer.ipynb +++ /dev/null @@ -1,501 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "fc0c79e6-b8a4-4f65-98a6-8a440ee653f2", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8b750fa4-3be5-4c10-97bb-98b0274f386d", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"alberta\"\n", - "os.environ['DLCFP16']=\"models/alberta_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/albertaw16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/alberta_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"alberta_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cf16c2ac-7942-49c7-a8ec-63d798f6da2d", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cec9f9db-cbc7-460b-95ea-be33d1370e0c", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, AlbertForQuestionAnswering\n", - "import torch\n", - "\n", - "tokenizer = AutoTokenizer.from_pretrained(\"twmkn9/albert-base-v2-squad2\")\n", - "\n", - "question_token={}\n", - "\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "109a4d23-d4ee-4f41-bc58-409cd4227e5d", - "metadata": {}, - "source": [ - "#### F1 Score calculation custom code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e4a36d51-2e05-49d4-a4ef-21474adc1eeb", - "metadata": {}, - "outputs": [], - "source": [ - "from sklearn.metrics import f1_score\n", - "def f1_scores_custom(prediction,ground_truth):\n", - " prediction_tokens=prediction.lower().split()\n", - " ground_truth_tokens=ground_truth.lower().split()\n", - " common_tokens=[token for token in prediction_tokens if token in ground_truth_tokens] \n", - " if (len(prediction_tokens)==0 and len(ground_truth_tokens)==0):\n", - " return [1.0,1.0,1.0]\n", - " elif len(prediction_tokens)==0 or len(ground_truth_tokens)==0:\n", - " return [0.0,0.0,0.0]\n", - " precision=len(common_tokens)/len(prediction_tokens)\n", - " recall=len(common_tokens)/len(ground_truth_tokens)\n", - " if precision+recall==0:\n", - " return [0.0,0.0,0.0]\n", - " f1= 2*(precision*recall)/(precision+recall) \n", - " return [f1,precision,recall]" - ] - }, - { - "cell_type": "markdown", - "id": "02f15be1-a4be-4348-b747-de4834850baa", - "metadata": {}, - "source": [ - "### Normal Model Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4c23ddde-74ba-43a6-8f0b-8f887303fa83", - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow as tf\n", - "import numpy as np\n", - "from transformers import AutoTokenizer, AlbertForQuestionAnswering\n", - "import torch\n", - "tokenizer = AutoTokenizer.from_pretrained(\"twmkn9/albert-base-v2-squad2\")\n", - "model = AlbertForQuestionAnswering.from_pretrained(\"twmkn9/albert-base-v2-squad2\")\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "question_answer={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"pt\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " outputs = model(**inputs)\n", - " answer_start_index = int(tf.math.argmax(outputs.start_logits.detach().numpy(), axis=-1)[0])\n", - " answer_end_index = int(tf.math.argmax(outputs.end_logits.detach().numpy(), axis=-1)[0])\n", - " \n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " predicted_answer=tokenizer.decode(predict_answer_tokens)\n", - " #print(question,tokenizer.decode(predict_answer_tokens, skip_special_tokens=True))\n", - " question_answer[question]=predicted_answer\n", - " f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " f1_scores.append(f1)\n", - " precision_scores.append(precision)\n", - " recall_scores.append(recall)\n", - "\n", - "mean_f1_score=np.mean(f1_scores)\n", - "mean_precision_score=np.mean(precision_scores)\n", - "mean_recall_score=np.mean(recall_scores)\n", - "\n", - "mean_f1_score,mean_recall_score,mean_precision_score" - ] - }, - { - "cell_type": "markdown", - "id": "3c3b41d6-f68a-40b8-9adf-11b9ea47bd5f", - "metadata": {}, - "source": [ - "## Creating Directory on Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "64dd2ea0-286f-4581-a5ca-5c33809fa7d6", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "f94a991b-7fb7-41c5-adb2-5f2de930f58e", - "metadata": {}, - "source": [ - "## Pushing All SNPE Lib and Bin folders onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f3104960-f5fd-4a4e-b837-70159d5a5201", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0dc14097-16a4-4fd3-9807-88f45dddbb9e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "markdown", - "id": "05319633-9abe-4390-8531-b15759c28cb4", - "metadata": {}, - "source": [ - "## Pushing all Model Artifacts onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "45e7e2ad-a521-45e1-bf41-cebd4140aa19", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLCFP16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCW16A16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCFP32 /data/local/tmp/$ONDEVICE_FOLDER \n", - "$DEVICE_SHELL push attention_mask /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push input_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push token_type_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "3986ff07-180c-4420-864a-1d7f2300e36d", - "metadata": {}, - "source": [ - "## Inferencing FP32 Model on CPU Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5ab0ae7f-a97c-4fd6-9595-eb099a0c8572", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=alberta_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"alberta_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list tf_raw_list.txt --set_unconsumed_as_output --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "8338c2cf-0d2d-4f0c-babd-a511dc030902", - "metadata": {}, - "source": [ - "## Inferencing FP16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7e328cf9-6193-4419-b25b-9abda8f1be30", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_FP16\n", - "export OUTPUT_FP_16=alberta_fp16.dlc\n", - "export ONDEVICE_FOLDER=\"alberta_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_FP_16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "6188db51-9e65-4129-94eb-fe57de6ca50f", - "metadata": {}, - "source": [ - "## Inferencing W16A16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9e0bc4a5-3867-4473-aba7-bd395f7bca64", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_W16A16\n", - "export DLC_W16A16=albertaw16a16_offline.dlc\n", - "export ONDEVICE_FOLDER=\"alberta_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $DLC_W16A16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp --enable_cpu_fallback" - ] - }, - { - "cell_type": "markdown", - "id": "450ec666-2ef5-4538-8678-52c058edd974", - "metadata": {}, - "source": [ - "## Pulling the Output from Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ff04cfb4-e697-45d1-9b8d-1ca5a8472471", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_W16A16 OUTPUT_DSP_W16A16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_FP16 OUTPUT_DSP_FP16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4bee1f53-76e8-45f7-a64e-8885d32e8e8c", - "metadata": {}, - "outputs": [], - "source": [ - "def func(start_logits,end_logits,inputs):\n", - " answer_start_index = int(tf.math.argmax(start_logits, axis=-1)[0])\n", - " answer_end_index = int(tf.math.argmax(end_logits, axis=-1)[0])\n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " return tokenizer.decode(predict_answer_tokens)" - ] - }, - { - "cell_type": "markdown", - "id": "176ecd52-b267-480b-8562-e0bd787cfaa5", - "metadata": {}, - "source": [ - "## Comparing Accuracy of FP32 Vs FP16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4818f105-ba53-4565-9f09-d9a0b04b2619", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_FP16\"]\n", - "dlc_type = [\"fp32\",\"fp16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - }, - { - "cell_type": "markdown", - "id": "2140d406-f585-4bcf-bd68-bb1829a7527e", - "metadata": {}, - "source": [ - "## Comparing Accuracy of FP32 Vs W16A16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e2e0ddd4-38d4-4a04-9d40-82a50f2a3b0f", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_W16A16\"]\n", - "dlc_type = [\"fp32\",\"W16A16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/05-nlp-nlu/Albert/generating_model.ipynb b/models-for-solutions/05-nlp-nlu/Albert/generating_model.ipynb deleted file mode 100644 index 1aac1a63..00000000 --- a/models-for-solutions/05-nlp-nlu/Albert/generating_model.ipynb +++ /dev/null @@ -1,324 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "fcbf0297-5cff-4ece-a694-35c1c7d4b2a1", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c8638d99-8a43-4be0-883f-abc09a618da5", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"alberta\"\n", - "os.environ['DLCFP16']=\"models/alberta_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/albertaw16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/alberta_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"alberta_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "markdown", - "id": "e880d159-b9d0-4c3b-8483-8fc9e89720f0", - "metadata": {}, - "source": [ - "## Downloading Data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3f36571c-eb78-4268-94e3-08fb9d8e3f51", - "metadata": {}, - "outputs": [], - "source": [ - "!wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "33f5ac7e-5579-4eb4-8a59-89695ea5205b", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ccf4f0fe-36ac-400e-867e-7ee69cf31577", - "metadata": {}, - "outputs": [], - "source": [ - "!pip install optimum\n", - "!pip install sentencepiece" - ] - }, - { - "cell_type": "markdown", - "id": "5009eaa9-7bca-4d97-a83c-6ea88df7543c", - "metadata": {}, - "source": [ - "## Converting the Model to ONNX format using optimum" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7d5ce14-dcd6-4cae-9daa-5f30aeff8cb2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model twmkn9/albert-base-v2-squad2 models/" - ] - }, - { - "cell_type": "markdown", - "id": "18948252-ef6b-4e64-8d18-82a004678976", - "metadata": {}, - "source": [ - "### Getting Model Input name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c1e7f307-d86e-4723-a0c8-9aea6cdad4bf", - "metadata": {}, - "outputs": [], - "source": [ - "import onnxruntime\n", - "model_path='models/model.onnx'\n", - "sess=onnxruntime.InferenceSession(model_path)\n", - "input_layer_names=sess.get_inputs()\n", - "for input_layer in input_layer_names:\n", - " print(input_layer)" - ] - }, - { - "cell_type": "markdown", - "id": "20fb9fb4-9e70-4ab2-bcaa-6f791b13e405", - "metadata": {}, - "source": [ - "## Converting ONNX to FP32 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "920e6c08-0650-4d4f-abb3-42c915d7b24b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i models/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o models/alberta_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "f5299ea7-e76d-4e66-8d7f-b5d755d10346", - "metadata": {}, - "source": [ - "### Creating the RAW Files " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b49e7a47-bef2-4ea4-9731-535afd3dc3cf", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "mkdir input_ids\n", - "mkdir attention_mask\n", - "mkdir token_type_ids" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d4b56a51-042f-4d91-857a-c764843e4446", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, AlbertForQuestionAnswering\n", - "import torch\n", - "tokenizer = AutoTokenizer.from_pretrained(\"twmkn9/albert-base-v2-squad2\")\n", - "question_token={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "ceede093-2e38-4820-9c68-f44df5ac0880", - "metadata": {}, - "source": [ - "## Creating List Files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b5b66906-8321-4765-a02d-c02832b3d143", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 30\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw token_type_ids:=token_type_ids/token_type_id_{}.raw\\n\".format(i,i,i))" - ] - }, - { - "cell_type": "markdown", - "id": "e629e349-99d1-4d4f-8b9d-d7cecf6110cb", - "metadata": {}, - "source": [ - "## Creating W16A16 Precision Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0a1e2443-c411-4a7c-9dbc-9aae7f9c5f56", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc models/alberta_fp32.dlc --input_list tf_raw_list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --output_dlc models/albertaw16a16.dlc --enable_htp --htp_socs sm8550 --weights_bitwidth 16 --act_bitwidth 16" - ] - }, - { - "cell_type": "markdown", - "id": "423d36b5-be38-45e4-ab7b-0cb3ce400851", - "metadata": {}, - "source": [ - "## Offline Preparation of W16A16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2668c0d4-5f65-4722-93ea-9682c9f36c9e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/albertaw16a16.dlc --output_dlc models/albertaw16a16_offline.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "markdown", - "id": "9ff03b19-3c5f-4e6f-a3ab-69df9bdf4835", - "metadata": {}, - "source": [ - "## Creating FP16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f20d3288-658b-43af-86f9-8ce79fda9f3b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/alberta_fp32.dlc --use_float_io --output_dlc models/alberta_fp16.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "43fd3afb-86f9-410f-ad90-206b43f2e627", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/05-nlp-nlu/BertBase/README.md b/models-for-solutions/05-nlp-nlu/BertBase/README.md deleted file mode 100644 index 9e6231db..00000000 --- a/models-for-solutions/05-nlp-nlu/BertBase/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# "Question Answering" using BertBase - -| Field | Description | -| --- | --- | -| Model Name | BertBase | -| DNN Framwork | ONNX | -| Public Repo | https://huggingface.co/bert-base-uncased | -| Paper | https://arxiv.org/abs/1810.04805 | -| DLC Number of Inputs | 3 | -| DLC Input Ids Dimension | (1,384) | -| DLC Attention Mask Dimension | (1,384) | -| DLC Token Type Ids | (1,384) | -| Pre-Processing | Use Model Specific Tokenizer | -| Post-Processing | Again Use Model Specific Tokenizer to Post Process the Output | - -## Pre-Requisites - -- Qualcomm® Neural Processing SDK setup should be completed by following the guide [here](https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install optimum ```pip install optimum```. - -## How to get the model ? - -For this demo, you can directly get the [onnx model](Models/onnx-model/model.onnx) from the directory onnx_model or you can generate it from this [jupyter_notebook](generating_model). - - -## Convert model to DLC - -- Convert the onnx model to DLC with below command. Below, command will also fix the input dimension for the dlc. - -```python -snpe-onnx-to-dlc -i bertbase-onnx-2/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o bert_base2.dlc -``` - -## Quantization of DLC -- Quantization can improve model performance in terms of latency and make the model light weight. -- Before Running this command make sure you've created the raw file and the list.txt -```python -snpe-dlc-graph-prepare --input_dlc bert_base2.dlc --input_list snpe_raw_list.txt --output_dlc bert_base2_int.dlc -``` - -# Accuracy analysis -- To check accuracy please run "accuracy_analyzer.ipynb" a jupyter notebook present in accuracy folder. -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - - -# References - -1. https://arxiv.org/abs/1810.04805 -2. https://huggingface.co/bert-base-uncased - - -###### *Qualcomm Neural Processing SDK and Snapdragon are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/05-nlp-nlu/BertBase/accuracy_analyzer.ipynb b/models-for-solutions/05-nlp-nlu/BertBase/accuracy_analyzer.ipynb deleted file mode 100644 index 86466444..00000000 --- a/models-for-solutions/05-nlp-nlu/BertBase/accuracy_analyzer.ipynb +++ /dev/null @@ -1,512 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "7e06ad8d-9571-488e-a157-4d2d39a2e645", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "832b37ec-bb78-400a-924a-15a263d55191", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"BertBase\"\n", - "os.environ['DLCFP16']=\"models/BertBase_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/BertBase_w16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/BertBase_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"BertBase_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a7cfd8bf-128d-4253-9464-4a0591600d92", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "markdown", - "id": "907bab2a-19b9-4ac5-89c1-c8d1f9157bcf", - "metadata": {}, - "source": [ - "#### F1 Score calculation custom code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "338db109-9951-4aef-8c43-a712c53ae6b4", - "metadata": {}, - "outputs": [], - "source": [ - "from sklearn.metrics import f1_score\n", - "\n", - "def f1_scores_custom(prediction,ground_truth):\n", - "\n", - " prediction_tokens=prediction.lower().split()\n", - " ground_truth_tokens=ground_truth.lower().split()\n", - "\n", - " common_tokens=[token for token in prediction_tokens if token in ground_truth_tokens]\n", - " \n", - " if (len(prediction_tokens)==0 and len(ground_truth_tokens)==0):\n", - " return [1.0,1.0,1.0]\n", - " elif len(prediction_tokens)==0 or len(ground_truth_tokens)==0:\n", - " return [0.0,0.0,0.0]\n", - " precision=len(common_tokens)/len(prediction_tokens)\n", - " recall=len(common_tokens)/len(ground_truth_tokens)\n", - "\n", - " if precision+recall==0:\n", - " return [0.0,0.0,0.0]\n", - " f1= 2*(precision*recall)/(precision+recall)\n", - " \n", - " return [f1,precision,recall]" - ] - }, - { - "cell_type": "markdown", - "id": "63ee23d0-5106-487d-8d83-269ba2edb850", - "metadata": {}, - "source": [ - "## Normal Model Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e6e7b5d0-fded-4f32-8a70-d56b00aad1a8", - "metadata": {}, - "outputs": [], - "source": [ - "from transformers import AutoTokenizer, BertForQuestionAnswering\n", - "import torch\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "tokenizer = AutoTokenizer.from_pretrained(\"deepset/bert-base-cased-squad2\")\n", - "model = BertForQuestionAnswering.from_pretrained(\"deepset/bert-base-cased-squad2\")\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "question_answer={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"pt\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " outputs = model(**inputs)\n", - " answer_start_index = outputs.start_logits.argmax()\n", - " answer_end_index = outputs.end_logits.argmax()\n", - " \n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " predicted_answer=tokenizer.decode(predict_answer_tokens)\n", - " question_answer[question]=predicted_answer\n", - " #print(\"predicted_answer\",predicted_answer,\"Actual Answer\",answer)\n", - " f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " f1_scores.append(f1)\n", - " precision_scores.append(precision)\n", - " recall_scores.append(recall)\n", - "\n", - "mean_f1_score=np.mean(f1_scores)\n", - "mean_precision_score=np.mean(precision_scores)\n", - "mean_recall_score=np.mean(recall_scores)\n", - "\n", - "mean_f1_score,mean_recall_score,mean_precision_score" - ] - }, - { - "cell_type": "markdown", - "id": "2115b4b1-2504-4ffa-aeff-2cd9c9a9cfbf", - "metadata": {}, - "source": [ - "## Creating Directory on Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d200f6e7-4e6d-4fca-a055-86f0b9b9225f", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "be9fcae6-b813-4d8d-9094-ffbc5f972c73", - "metadata": {}, - "source": [ - "## Pushing All SNPE Lib and Bin folders onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "848acaaa-e417-4ae1-afec-c27eae28b3d0", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "228966f0-8276-41f9-b48d-86710e4346b2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "markdown", - "id": "bf983696-3ac4-438b-b008-c5da4fa8b204", - "metadata": {}, - "source": [ - "## Pushing all Model Artifacts onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2879ad38-cd38-4636-9f08-fad1adc4713c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLCFP16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCW16A16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCFP32 /data/local/tmp/$ONDEVICE_FOLDER \n", - "$DEVICE_SHELL push attention_mask /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push input_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push token_type_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "d5786375-0eb7-4139-85c2-9a6d6b7f201e", - "metadata": {}, - "source": [ - "## Inferencing FP32 Model on CPU Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "813f5df9-5e2c-4046-a5f4-7d4ce372b5ff", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=BertBase_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"BertBase_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list tf_raw_list.txt --set_unconsumed_as_output --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "fc1451fb-7276-4936-9053-37d8412919a9", - "metadata": {}, - "source": [ - "## Inferencing FP16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f8db3059-09d1-4478-9549-3c53da248b82", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_FP16\n", - "export OUTPUT_FP_16=BertBase_fp16.dlc\n", - "export ONDEVICE_FOLDER=\"BertBase_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_FP_16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "82704c1d-88a0-45f6-a39f-0645267945b2", - "metadata": {}, - "source": [ - "## Inferencing W16A16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d79b4e1a-4a0b-4366-a8d4-3130f6df4246", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_W16A16\n", - "export DLC_W16A16=BertBase_w16a16_offline.dlc\n", - "export ONDEVICE_FOLDER=\"BertBase_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $DLC_W16A16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp --enable_cpu_fallback" - ] - }, - { - "cell_type": "markdown", - "id": "4f273711-6ca7-461f-9ea6-ee94bb57aac1", - "metadata": {}, - "source": [ - "## Pulling the Output from Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0b0e9875-1118-4d03-b93d-2d4c08b4b69f", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_W16A16 OUTPUT_DSP_W16A16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_FP16 OUTPUT_DSP_FP16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24af9313-f180-4e51-b0f3-0c16457ff8f6", - "metadata": {}, - "outputs": [], - "source": [ - "def func(start_logits,end_logits,inputs):\n", - " answer_start_index = int(tf.math.argmax(start_logits, axis=-1)[0])\n", - " answer_end_index = int(tf.math.argmax(end_logits, axis=-1)[0])\n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " return tokenizer.decode(predict_answer_tokens)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fd6b21e9-960d-4476-87a2-ccc95d19f236", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, AlbertForQuestionAnswering\n", - "import torch\n", - "tokenizer = AutoTokenizer.from_pretrained(\"deepset/bert-base-cased-squad2\")\n", - "question_token={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "b011316e-02bc-404c-a85a-f1ed5e77464b", - "metadata": {}, - "source": [ - "## Comparing Accuracy of FP32 Vs FP16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0cc05a29-11c4-49bf-8094-f8b99f286923", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_FP16\"]\n", - "dlc_type = [\"fp32\",\"fp16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - }, - { - "cell_type": "markdown", - "id": "3a9ac4c8-37aa-4de5-887f-63db1e073987", - "metadata": {}, - "source": [ - "## Comparing Accuracy of FP32 Vs W16A16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0f980b59-da79-4265-b627-8694fc6ab764", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_W16A16\"]\n", - "dlc_type = [\"fp32\",\"W16A16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "38f4ab80-7dba-4eb1-b202-c1eb19112cab", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/05-nlp-nlu/BertBase/generating_model.ipynb b/models-for-solutions/05-nlp-nlu/BertBase/generating_model.ipynb deleted file mode 100644 index 3accc7d2..00000000 --- a/models-for-solutions/05-nlp-nlu/BertBase/generating_model.ipynb +++ /dev/null @@ -1,324 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "40e7c9d5-24c2-450e-b2e0-f4ce791d30e4", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ab6f9afe-a5af-4630-870b-0b87c5897e04", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"BertBase\"\n", - "os.environ['DLCFP16']=\"models/BertBase_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/BertBasew16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/BertBase_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"BertBase_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "markdown", - "id": "f2beb07a-5168-4291-9f19-4e9da6988b26", - "metadata": {}, - "source": [ - "## Downloading Data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4dd44e48-e9e0-4173-99b1-06d0b03fdbe2", - "metadata": {}, - "outputs": [], - "source": [ - "!wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37645480-a3b6-49ab-a4a5-8ded18611567", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23ba1494-b4ea-4e4e-869d-e17dd157cb10", - "metadata": {}, - "outputs": [], - "source": [ - "!pip install optimum\n", - "!pip install sentencepiece" - ] - }, - { - "cell_type": "markdown", - "id": "f273656f-7fe6-4be5-9425-797d8424a948", - "metadata": {}, - "source": [ - "## Converting the Model to ONNX format using optimum" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b826be45-d109-452e-bf86-5cf2167de506", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model deepset/bert-base-cased-squad2 models/" - ] - }, - { - "cell_type": "markdown", - "id": "9e7b1699-1107-45ef-87a5-2aa7aff9b6bc", - "metadata": {}, - "source": [ - "## Getting the Model Input Names" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5de276c8-fe96-4b41-971f-07484e61bb8e", - "metadata": {}, - "outputs": [], - "source": [ - "import onnxruntime\n", - "model_path='models/model.onnx'\n", - "sess=onnxruntime.InferenceSession(model_path)\n", - "input_layer_names=sess.get_inputs()\n", - "for input_layer in input_layer_names:\n", - " print(input_layer)" - ] - }, - { - "cell_type": "markdown", - "id": "980d964c-c65f-4295-b69f-1b80a383d032", - "metadata": {}, - "source": [ - "## Converting ONNX to FP32 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2deb064b-4adf-4e48-b463-f32fe362f02f", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i models/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o models/BertBase_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "77e7dfd5-6d82-451a-8c50-2d547a04bf86", - "metadata": {}, - "source": [ - "### Creating the RAW Files " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d99f1e7f-e748-4571-86f8-557f75ac0e73", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "mkdir input_ids\n", - "mkdir attention_mask\n", - "mkdir token_type_ids" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "996d4098-24a3-4de0-937b-c6dc2388497e", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, AlbertForQuestionAnswering\n", - "import torch\n", - "tokenizer = AutoTokenizer.from_pretrained(\"deepset/bert-base-cased-squad2\")\n", - "question_token={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "f6160f74-196d-4608-b739-b6cd7f6cdd01", - "metadata": {}, - "source": [ - "## Creating List Files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "570454f0-a579-4413-be31-239626ec7f9e", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 30\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw token_type_ids:=token_type_ids/token_type_id_{}.raw\\n\".format(i,i,i))" - ] - }, - { - "cell_type": "markdown", - "id": "564c4fb3-3c14-435d-9762-2f26b3a0634a", - "metadata": {}, - "source": [ - "## Creating W16A16 Precision Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21de7b9f-ba44-4dde-8f3f-34fc7dac1c33", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc models/BertBase_fp32.dlc --input_list tf_raw_list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --output_dlc models/BertBase_w16a16.dlc --enable_htp --htp_socs sm8550 --weights_bitwidth 16 --act_bitwidth 16" - ] - }, - { - "cell_type": "markdown", - "id": "be26a9a3-2aac-480d-9e24-a9bc413a3b54", - "metadata": {}, - "source": [ - "## Offline Preparation of W16A16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7a4324f-90db-432a-9cdd-eac1decbf35d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/BertBase_w16a16.dlc --output_dlc models/BertBase_w16a16_offline.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "markdown", - "id": "b07eb235-c10e-4cb2-9c24-f97d2a1483b3", - "metadata": {}, - "source": [ - "## Creating FP16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4a04077d-ea0c-4ece-a54a-23de099a6522", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/BertBase_fp32.dlc --use_float_io --output_dlc models/BertBase_fp16.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7f5c5a2a-2af0-47f7-af58-ad476f482861", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/05-nlp-nlu/BertLarge/README.md b/models-for-solutions/05-nlp-nlu/BertLarge/README.md deleted file mode 100644 index 53256a98..00000000 --- a/models-for-solutions/05-nlp-nlu/BertLarge/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# "Question Answering" using BertLarge - -| Field | Description | -| --- | --- | -| Model Name | BertLarge | -| DNN Framwork | ONNX | -| Public Repo | https://huggingface.co/bert-large-uncased | -| Paper | https://arxiv.org/abs/1810.04805 | -| DLC Number of Inputs | 3 | -| DLC Input Ids Dimension | (1,384) | -| DLC Attention Mask Dimension | (1,384) | -| DLC Token Type Ids | (1,384) | -| Pre-Processing | Use Model Specific Tokenizer | -| Post-Processing | Again Use Model Specific Tokenizer to Post Process the Output | - -## Pre-Requisites - -- Qualcomm® Neural Processing SDK setup should be completed by following the guide [here](https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install optimum ```pip install optimum```. - -## How to get the model ? - -For this demo, you can directly get the [onnx model](Models/onnx-model/model.onnx) from the directory onnx_model or you can generate it from this [jupyter_notebook](generating_model). - - -## Convert model to DLC - -- Convert the onnx model to DLC with below command. Below, command will also fix the input dimension for the dlc. - -```python -snpe-onnx-to-dlc -i bertlarge-onnx-3/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o bert_large3.dlc -``` - -## Quantization of DLC -- Quantization can improve model performance in terms of latency and make the model light weight. -- Before Running this command make sure you've created the raw file and the list.txt -```python -snpe-dlc-graph-prepare --input_dlc bert_large3.dlc --input_list snpe_raw_list.txt --output_dlc bert_large3_int.dlc -``` - -# Accuracy analysis -- To check accuracy please run "accuracy_analyzer.ipynb" a jupyter notebook present in accuracy folder. -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - - -# References - -1. https://arxiv.org/abs/1810.04805 -2. https://huggingface.co/bert-large-uncased - - -###### *Qualcomm Neural Processing SDK and Snapdragon are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/05-nlp-nlu/BertLarge/accuracy_analyzer.ipynb b/models-for-solutions/05-nlp-nlu/BertLarge/accuracy_analyzer.ipynb deleted file mode 100644 index 366091bb..00000000 --- a/models-for-solutions/05-nlp-nlu/BertLarge/accuracy_analyzer.ipynb +++ /dev/null @@ -1,499 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "442bb43b-c36c-4db9-be5a-38ff4517c55a", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0d700ca6-7af0-45e0-ad70-7bb06a9b13eb", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"BertLarge\"\n", - "os.environ['DLCFP16']=\"models/BertLarge_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/BertLarge_w16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/BertLarge_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"BertLarge_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "74b7b340-ee40-46b2-9820-2ad431af38d3", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "41a7b452-d0e8-40b4-811c-0036cbf8ecf3", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, AlbertForQuestionAnswering\n", - "import torch\n", - "tokenizer = AutoTokenizer.from_pretrained(\"deepset/bert-large-uncased-whole-word-masking-squad2\")\n", - "question_token={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "0d847087-c1b5-4bb7-8e9d-bcd37e1c436e", - "metadata": {}, - "source": [ - "#### F1 Score calculation custom code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a9bd30d2-99a6-45a6-a62e-ec1fdde9e03b", - "metadata": {}, - "outputs": [], - "source": [ - "from sklearn.metrics import f1_score\n", - "def f1_scores_custom(prediction,ground_truth):\n", - " prediction_tokens=prediction.lower().split()\n", - " ground_truth_tokens=ground_truth.lower().split()\n", - " common_tokens=[token for token in prediction_tokens if token in ground_truth_tokens] \n", - " if (len(prediction_tokens)==0 and len(ground_truth_tokens)==0):\n", - " return [1.0,1.0,1.0]\n", - " elif len(prediction_tokens)==0 or len(ground_truth_tokens)==0:\n", - " return [0.0,0.0,0.0]\n", - " precision=len(common_tokens)/len(prediction_tokens)\n", - " recall=len(common_tokens)/len(ground_truth_tokens)\n", - " if precision+recall==0:\n", - " return [0.0,0.0,0.0]\n", - " f1= 2*(precision*recall)/(precision+recall) \n", - " return [f1,precision,recall]" - ] - }, - { - "cell_type": "markdown", - "id": "c78f3828-72ec-498c-b1b2-c132dc97da4f", - "metadata": {}, - "source": [ - "### Normal Model Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6a924423-5b09-4634-bc98-bd37e3e05494", - "metadata": {}, - "outputs": [], - "source": [ - "from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline\n", - "import numpy as np\n", - "\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "\n", - "model_name = \"deepset/bert-large-uncased-whole-word-masking-squad2\"\n", - "model = AutoModelForQuestionAnswering.from_pretrained(model_name)\n", - "tokenizer = AutoTokenizer.from_pretrained(model_name)\n", - "question_answer={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"pt\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " outputs = model(**inputs)\n", - " answer_start_index = outputs.start_logits.argmax()\n", - " answer_end_index = outputs.end_logits.argmax()\n", - " \n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " predicted_answer=tokenizer.decode(predict_answer_tokens)\n", - " question_answer[question]=predicted_answer\n", - " #print(\"predicted_answer\",predicted_answer,\"Actual Answer\",answer)\n", - " f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " f1_scores.append(f1)\n", - " precision_scores.append(precision)\n", - " recall_scores.append(recall)\n", - "\n", - "mean_f1_score=np.mean(f1_scores)\n", - "mean_precision_score=np.mean(precision_scores)\n", - "mean_recall_score=np.mean(recall_scores)\n", - "\n", - "mean_f1_score,mean_recall_score,mean_precision_score" - ] - }, - { - "cell_type": "markdown", - "id": "0a7b3aef-7f81-46fa-afc0-6623418be5a1", - "metadata": {}, - "source": [ - "## Creating Directory on Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6e4f421a-c018-4b58-9ca0-d24a03617858", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "c8a40a57-88d0-4a4e-8357-3bd41765a3ec", - "metadata": {}, - "source": [ - "## Pushing All SNPE Lib and Bin folders onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "125b1307-0a37-4886-8460-ae839b091779", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a95950ca-1747-4562-b325-c2520f02f592", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "markdown", - "id": "77bd867e-64ea-4f48-9ea3-61592bb3c20a", - "metadata": {}, - "source": [ - "## Pushing all Model Artifacts onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8fc7adef-b53b-4e26-94dd-3f054772e308", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLCFP16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCW16A16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCFP32 /data/local/tmp/$ONDEVICE_FOLDER \n", - "$DEVICE_SHELL push attention_mask /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push input_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push token_type_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "46509a5c-0c86-467b-8d86-595d04a4d8d9", - "metadata": {}, - "source": [ - "## Inferencing FP32 Model on CPU Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "04bedb9c-22a6-4ae3-8c28-9f0d8086f59f", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=BertLarge_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"BertLarge_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list tf_raw_list.txt --set_unconsumed_as_output --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "9481bba9-e4ed-4e2f-b539-fee6d047cc1f", - "metadata": {}, - "source": [ - "## Inferencing FP16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "037aea4c-73b6-4170-90f7-6072b27d0cd2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_FP16\n", - "export OUTPUT_FP_16=BertLarge_fp16.dlc\n", - "export ONDEVICE_FOLDER=\"BertLarge_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_FP_16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "e73b9c0a-6efc-4499-a3bb-c8ebed097635", - "metadata": {}, - "source": [ - "## Inferencing W16A16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ba3feae2-fde4-44de-a997-876c88bb1ae1", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_W16A16\n", - "export DLC_W16A16=BertLarge_w16a16_offline.dlc\n", - "export ONDEVICE_FOLDER=\"BertLarge_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $DLC_W16A16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp --enable_cpu_fallback" - ] - }, - { - "cell_type": "markdown", - "id": "cd227e6b-f70b-402f-99d6-f30ee3651f25", - "metadata": {}, - "source": [ - "## Pulling the Output from Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0b4510ae-1e31-4af2-9fb4-93984903e2d2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_W16A16 OUTPUT_DSP_W16A16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_FP16 OUTPUT_DSP_FP16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "55ca1266-8d89-4693-919e-51c0f5778e63", - "metadata": {}, - "outputs": [], - "source": [ - "def func(start_logits,end_logits,inputs):\n", - " answer_start_index = int(tf.math.argmax(start_logits, axis=-1)[0])\n", - " answer_end_index = int(tf.math.argmax(end_logits, axis=-1)[0])\n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " return tokenizer.decode(predict_answer_tokens)" - ] - }, - { - "cell_type": "markdown", - "id": "fa138dd8-85b8-48dd-bd1b-915cc2f21a25", - "metadata": {}, - "source": [ - "## Comparing Accuracy of FP32 Vs FP16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c01e7692-5850-410d-9889-e9d62cc22255", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_FP16\"]\n", - "dlc_type = [\"fp32\",\"fp16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "55c1c67b-9cb5-4048-86ca-aa73fc514abf", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_W16A16\"]\n", - "dlc_type = [\"fp32\",\"W16A16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c384b111-f3d5-415e-9905-92e8d0823d8e", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/05-nlp-nlu/BertLarge/generating_model.ipynb b/models-for-solutions/05-nlp-nlu/BertLarge/generating_model.ipynb deleted file mode 100644 index bcfa9269..00000000 --- a/models-for-solutions/05-nlp-nlu/BertLarge/generating_model.ipynb +++ /dev/null @@ -1,324 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "3f16e4cb-bf98-4d27-a438-cd1ebcc49fb3", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d046e07a-02ac-4113-85cc-0b2a5544fd27", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"BertLarge\"\n", - "os.environ['DLCFP16']=\"models/BertLarge_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/BertLargew16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/BertLarge_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"BertLarge_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "markdown", - "id": "2ed8acb3-0f58-4ddd-8520-5ab33b8dc2e1", - "metadata": {}, - "source": [ - "## Downloading Data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d1cdd8d5-3fae-4797-b08e-2c940d06fffc", - "metadata": {}, - "outputs": [], - "source": [ - "!wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3f08da31-a614-4aef-8324-0b46dcd81e90", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "089ad56c-5058-4962-8964-c0308797c96f", - "metadata": {}, - "outputs": [], - "source": [ - "!pip install optimum\n", - "!pip install sentencepiece" - ] - }, - { - "cell_type": "markdown", - "id": "ca76068b-fe8a-4453-9c0b-09bbff0c37fb", - "metadata": {}, - "source": [ - "## Converting the Model to ONNX format using optimum" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7542d8f8-1110-43f6-a426-2073d81f227d", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model deepset/bert-large-uncased-whole-word-masking-squad2 models/" - ] - }, - { - "cell_type": "markdown", - "id": "261c9503-bc07-42ac-a886-d82c80d395f5", - "metadata": {}, - "source": [ - "### Getting Model Input name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "59a30768-b720-493e-84bf-ab4fd63cccc4", - "metadata": {}, - "outputs": [], - "source": [ - "import onnxruntime\n", - "model_path='models/model.onnx'\n", - "sess=onnxruntime.InferenceSession(model_path)\n", - "input_layer_names=sess.get_inputs()\n", - "for input_layer in input_layer_names:\n", - " print(input_layer)" - ] - }, - { - "cell_type": "markdown", - "id": "fe9800ea-d667-40bd-8417-2dfe0825737c", - "metadata": {}, - "source": [ - "## Converting ONNX to FP32 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e46c3925-5b1a-4a37-9ae0-d3bf0eba2012", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i models/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o models/BertLarge_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "90842225-710b-4f82-9e97-b26d620978f7", - "metadata": {}, - "source": [ - "### Creating the RAW Files " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d5cfa61a-7f58-49ac-92b6-cb60c9bb46ec", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "mkdir input_ids\n", - "mkdir attention_mask\n", - "mkdir token_type_ids" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1356759a-36da-40f3-8c51-bfffde7066ce", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, AlbertForQuestionAnswering\n", - "import torch\n", - "tokenizer = AutoTokenizer.from_pretrained(\"deepset/bert-large-uncased-whole-word-masking-squad2\")\n", - "question_token={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "a0ce9ff0-9dc4-4147-9adc-08ce752640fd", - "metadata": {}, - "source": [ - "## Creating List Files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c55fd398-3817-4ce3-8a02-c7e86e3d686a", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 30\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw token_type_ids:=token_type_ids/token_type_id_{}.raw\\n\".format(i,i,i))" - ] - }, - { - "cell_type": "markdown", - "id": "7bf87ded-93d4-4158-81cd-1d2f7409fafc", - "metadata": {}, - "source": [ - "## Creating W16A16 Precision Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "431f750e-3299-4b79-af5b-7fcd5b4058cb", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc models/BertLarge_fp32.dlc --input_list tf_raw_list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --output_dlc models/BertLarge_w16a16.dlc --enable_htp --htp_socs sm8550 --weights_bitwidth 16 --act_bitwidth 16" - ] - }, - { - "cell_type": "markdown", - "id": "f3d48d57-822b-46d7-a670-f474ae53285c", - "metadata": {}, - "source": [ - "## Offline Preparation of W16A16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "79adc902-1826-4c29-a37a-ab38283d8c5a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/BertLarge_w16a16.dlc --output_dlc models/BertLarge_w16a16_offline.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "markdown", - "id": "8f6b70e6-5a40-4164-9fa5-372c2e0f972f", - "metadata": {}, - "source": [ - "## Creating FP16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "78ba141b-dacc-4ac7-ac6b-3e07d78bf082", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/BertLarge_fp32.dlc --use_float_io --output_dlc models/BertLarge_fp16.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "04f254d9-25db-44e0-b9e7-35f2fa93aa2a", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/05-nlp-nlu/DistillBert/README.md b/models-for-solutions/05-nlp-nlu/DistillBert/README.md deleted file mode 100644 index 7ba97365..00000000 --- a/models-for-solutions/05-nlp-nlu/DistillBert/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# "Question Answering" using DistillBert - -| Field | Description | -| --- | --- | -| Model Name | DistillBert | -| DNN Framwork | ONNX | -| Public Repo | https://huggingface.co/docs/transformers/model_doc/distilbert | -| Paper | https://arxiv.org/abs/1910.01108 | -| DLC Number of Inputs | 2 | -| DLC Input Ids Dimension | (1,384) | -| DLC Attention Mask Dimension | (1,384) | -| Pre-Processing | Use Model Specific Tokenizer | -| Post-Processing | Again Use Model Specific Tokenizer to Post Process the Output | - -## Pre-Requisites - -- Qualcomm® Neural Processing SDK setup should be completed by following the guide [here](https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install optimum ```pip install optimum```. - -## How to get the model ? - -For this demo, you can directly get the [onnx model](Models/onnx-model/model.onnx) from the directory onnx_model or you can generate it from this [jupyter_notebook](generating_model). - - -## Convert model to DLC - -- Convert the onnx model to DLC with below command. Below, command will also fix the input dimension for the dlc. - -```python -snpe-onnx-to-dlc -i distilbert-uncased-onnx/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -o distilbert2.dlc -``` - -## Quantization of DLC -- Quantization can improve model performance in terms of latency and make the model light weight. -- Before Running this command make sure you've created the raw file and the list.txt -```python -snpe-dlc-quantize --input_dlc distilbert2.dlc --input_list tf_raw_list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc distilbert2_Q.dlc --enable_htp --htp_socs sm8550 -``` - -# Accuracy analysis -- To check accuracy please run "accuracy_analyzer.ipynb" a jupyter notebook present in accuracy folder. -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - - -# References - -1. https://arxiv.org/abs/1910.01108 -2. https://huggingface.co/docs/transformers/model_doc/distilbert - - -###### *Qualcomm Neural Processing SDK and Snapdragon are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/05-nlp-nlu/DistillBert/accuracy_analyzer.ipynb b/models-for-solutions/05-nlp-nlu/DistillBert/accuracy_analyzer.ipynb deleted file mode 100644 index 20b28de9..00000000 --- a/models-for-solutions/05-nlp-nlu/DistillBert/accuracy_analyzer.ipynb +++ /dev/null @@ -1,497 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "ff16fcaa-8e97-44df-9a8e-7ca0064db049", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "330fe9b7-1f53-4070-af89-eb287a36b468", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"Distilbert\"\n", - "os.environ['DLCFP16']=\"models/Distilbert_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/Distilbert_w16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/Distilbert_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"Distilbert_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "980f04c3-80d6-4125-b6c7-2302cebfac07", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "07e39486-bad5-41ee-8951-f04762cb227e", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import DistilBertTokenizer, TFDistilBertForQuestionAnswering\n", - "import tensorflow as tf\n", - "tokenizer = DistilBertTokenizer.from_pretrained(\"distilbert-base-uncased-distilled-squad\")\n", - "question_token={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "9a8d0cea-518e-40a0-bd23-e04361024055", - "metadata": {}, - "source": [ - "#### F1 Score calculation custom code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cfb36f0d-cd77-4d5d-96d0-1b27604a9ff1", - "metadata": {}, - "outputs": [], - "source": [ - "from sklearn.metrics import f1_score\n", - "def f1_scores_custom(prediction,ground_truth):\n", - " prediction_tokens=prediction.lower().split()\n", - " ground_truth_tokens=ground_truth.lower().split()\n", - " common_tokens=[token for token in prediction_tokens if token in ground_truth_tokens] \n", - " if (len(prediction_tokens)==0 and len(ground_truth_tokens)==0):\n", - " return [1.0,1.0,1.0]\n", - " elif len(prediction_tokens)==0 or len(ground_truth_tokens)==0:\n", - " return [0.0,0.0,0.0]\n", - " precision=len(common_tokens)/len(prediction_tokens)\n", - " recall=len(common_tokens)/len(ground_truth_tokens)\n", - " if precision+recall==0:\n", - " return [0.0,0.0,0.0]\n", - " f1= 2*(precision*recall)/(precision+recall) \n", - " return [f1,precision,recall]" - ] - }, - { - "cell_type": "markdown", - "id": "b6e40cf2-49df-40d2-902d-37ae499724ec", - "metadata": {}, - "source": [ - "### Normal Model Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e97d8745-7e6b-447c-8200-c21c6944dc65", - "metadata": {}, - "outputs": [], - "source": [ - "from transformers import DistilBertTokenizer, TFDistilBertForQuestionAnswering\n", - "import tensorflow as tf\n", - "import numpy as np\n", - "tokenizer = DistilBertTokenizer.from_pretrained(\"distilbert-base-uncased-distilled-squad\")\n", - "model = TFDistilBertForQuestionAnswering.from_pretrained(\"distilbert-base-uncased-distilled-squad\")\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "question_answer={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " outputs = model(**inputs)\n", - " answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0])\n", - " answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0])\n", - " \n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " predicted_answer=tokenizer.decode(predict_answer_tokens)\n", - " question_answer[question]=predicted_answer\n", - " f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " f1_scores.append(f1)\n", - " precision_scores.append(precision)\n", - " recall_scores.append(recall)\n", - "mean_f1_score=np.mean(f1_scores)\n", - "mean_precision_score=np.mean(precision_scores)\n", - "mean_recall_score=np.mean(recall_scores)\n", - "mean_f1_score,mean_recall_score,mean_precision_score" - ] - }, - { - "cell_type": "markdown", - "id": "d0f5e31c-9a56-4fbe-bac2-2d5c51b9c804", - "metadata": {}, - "source": [ - "## Creating Directory on Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "09aafd6a-4921-4031-b9f5-28c7495da9d5", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "7ca37c56-57fc-4a06-baf1-b521ef1d3696", - "metadata": {}, - "source": [ - "## Pushing All SNPE Lib and Bin folders onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c41c9acc-ee05-4378-9969-2763bd7286e1", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0f397227-0cf9-4894-b690-490c755e61e5", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "markdown", - "id": "ede54514-8a1c-4e6c-91f2-5695201bc378", - "metadata": {}, - "source": [ - "## Pushing all Model Artifacts onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cdb1bfbb-a479-4a48-b0e1-bd659be4569a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLCFP16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCW16A16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCFP32 /data/local/tmp/$ONDEVICE_FOLDER \n", - "$DEVICE_SHELL push attention_mask /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push input_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push token_type_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "e1eb5339-d8ba-4c2d-b9a6-bcb864451579", - "metadata": {}, - "source": [ - "## Inferencing FP32 Model on CPU Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f30643a0-2051-45f3-99c7-a113202ed5d3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=Distilbert_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"Distilbert_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list tf_raw_list.txt --set_unconsumed_as_output --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "9e4d692a-04b4-4481-8fec-08b73856073e", - "metadata": {}, - "source": [ - "## Inferencing FP16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1f9bfe56-3d19-4c7b-97fb-0935ef67f8d7", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_FP16\n", - "export OUTPUT_FP_16=Distilbert_fp16.dlc\n", - "export ONDEVICE_FOLDER=\"Distilbert_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_FP_16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "bdff7599-dfc1-470a-baf1-c3d7561648a4", - "metadata": {}, - "source": [ - "## Inferencing W16A16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8d1f7442-a4c3-4184-a47a-1f50fa008c4c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_W16A16\n", - "export DLC_W16A16=Distilbert_w16a16_offline.dlc\n", - "export ONDEVICE_FOLDER=\"Distilbert_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $DLC_W16A16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp --enable_cpu_fallback" - ] - }, - { - "cell_type": "markdown", - "id": "b2e92b49-0fab-4da8-a82d-f8c7c09a996b", - "metadata": {}, - "source": [ - "## Pulling the Output from Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21ed8bc8-d5d1-47f4-9425-d63fc410f48c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_W16A16 OUTPUT_DSP_W16A16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_FP16 OUTPUT_DSP_FP16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f67c53f5-8209-4758-a25e-e49629011ccd", - "metadata": {}, - "outputs": [], - "source": [ - "def func(start_logits,end_logits,inputs):\n", - " answer_start_index = int(tf.math.argmax(start_logits, axis=-1)[0])\n", - " answer_end_index = int(tf.math.argmax(end_logits, axis=-1)[0])\n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " return tokenizer.decode(predict_answer_tokens)" - ] - }, - { - "cell_type": "markdown", - "id": "acf05cbe-af95-4661-a6e0-b7b271617f6e", - "metadata": {}, - "source": [ - "## Comparing Accuracy of FP32 Vs FP16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "edbc82af-ac9b-48fb-9744-3f703bd07f99", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_FP16\"]\n", - "dlc_type = [\"fp32\",\"fp16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - }, - { - "cell_type": "markdown", - "id": "7b00c99d-8773-40e6-9e8a-184f53737d62", - "metadata": {}, - "source": [ - "## Comparing Accuracy of FP32 Vs W16A16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fb33f96a-01b2-4651-8755-9915329fdfb8", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_W16A16\"]\n", - "dlc_type = [\"fp32\",\"W16A16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20fd5dab-38d7-4ac9-a05a-9d39a834a9ae", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/05-nlp-nlu/DistillBert/generating_model.ipynb b/models-for-solutions/05-nlp-nlu/DistillBert/generating_model.ipynb deleted file mode 100644 index b7265946..00000000 --- a/models-for-solutions/05-nlp-nlu/DistillBert/generating_model.ipynb +++ /dev/null @@ -1,382 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "c8fb0104-fcca-4c92-846f-15361e6d8bf5", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "658edf27-a90f-4f43-9f81-2899f5ee50ba", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"Distilbert\"\n", - "os.environ['DLCFP16']=\"models/Distilbert_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/Distilbert_w16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/Distilbert_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"Distilbert_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "markdown", - "id": "f8edebe3-603e-4ec6-961b-e83e5ff46bea", - "metadata": {}, - "source": [ - "## Downloading Data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "907a54b8-8e36-443a-8e59-7791f3a37fa0", - "metadata": {}, - "outputs": [], - "source": [ - "!wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "64a0739b-fa9b-4f5b-aaaf-c730c630f39b", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "73e3211a-edc6-4508-9b17-226baf03b3f5", - "metadata": {}, - "outputs": [], - "source": [ - "!pip install optimum\n", - "!pip install sentencepiece" - ] - }, - { - "cell_type": "markdown", - "id": "419d929b-d365-48d9-89f0-27e6dc5222f2", - "metadata": {}, - "source": [ - "## Converting the Model to ONNX format using optimum" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "921fd96d-2e98-4d47-a7a7-5ecb762a1694", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model distilbert-base-uncased-distilled-squad models/" - ] - }, - { - "cell_type": "markdown", - "id": "a43b5b15-cde0-4036-bda3-dfc2f8df4e4b", - "metadata": {}, - "source": [ - "### Getting Model Input name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e4c01ae3-8d82-441e-a88b-4ad837893370", - "metadata": {}, - "outputs": [], - "source": [ - "import onnxruntime\n", - "model_path='models/model.onnx'\n", - "sess=onnxruntime.InferenceSession(model_path)\n", - "input_layer_names=sess.get_inputs()\n", - "for input_layer in input_layer_names:\n", - " print(input_layer)" - ] - }, - { - "cell_type": "markdown", - "id": "74a16b9c-32e1-42f5-a6df-cc3b27d0c3cf", - "metadata": {}, - "source": [ - "## Converting ONNX to FP32 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2aa75b00-bcaf-4a58-89c5-b7f40bc388d2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i models/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -o models/Distilbert_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "b3dadb69-ede9-43ec-a520-c5c180c8d8ab", - "metadata": {}, - "source": [ - "### Creating the RAW Files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25d39a2a-0f69-4298-90fc-9920b2e86dd7", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "mkdir input_ids\n", - "mkdir attention_mask" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2f4f3b1a-4161-4e8a-b498-78065ded28dc", - "metadata": {}, - "outputs": [], - "source": [ - "from sklearn.metrics import f1_score\n", - "def f1_scores_custom(prediction,ground_truth):\n", - " prediction_tokens=prediction.lower().split()\n", - " ground_truth_tokens=ground_truth.lower().split()\n", - " common_tokens=[token for token in prediction_tokens if token in ground_truth_tokens] \n", - " if (len(prediction_tokens)==0 and len(ground_truth_tokens)==0):\n", - " return [1.0,1.0,1.0]\n", - " elif len(prediction_tokens)==0 or len(ground_truth_tokens)==0:\n", - " return [0.0,0.0,0.0]\n", - " precision=len(common_tokens)/len(prediction_tokens)\n", - " recall=len(common_tokens)/len(ground_truth_tokens)\n", - "\n", - " if precision+recall==0:\n", - " return [0.0,0.0,0.0]\n", - " f1= 2*(precision*recall)/(precision+recall)\n", - " \n", - " return [f1,precision,recall]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5e3a980e-263a-4fee-93cc-855fc5622d85", - "metadata": {}, - "outputs": [], - "source": [ - "from transformers import DistilBertTokenizer, TFDistilBertForQuestionAnswering\n", - "import tensorflow as tf\n", - "import numpy as np\n", - "tokenizer = DistilBertTokenizer.from_pretrained(\"distilbert-base-uncased-distilled-squad\")\n", - "model = TFDistilBertForQuestionAnswering.from_pretrained(\"distilbert-base-uncased-distilled-squad\")\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "question_answer={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " outputs = model(**inputs)\n", - " answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0])\n", - " answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0])\n", - " \n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " predicted_answer=tokenizer.decode(predict_answer_tokens)\n", - " question_answer[question]=predicted_answer\n", - " f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " f1_scores.append(f1)\n", - " precision_scores.append(precision)\n", - " recall_scores.append(recall)\n", - "mean_f1_score=np.mean(f1_scores)\n", - "mean_precision_score=np.mean(precision_scores)\n", - "mean_recall_score=np.mean(recall_scores)\n", - "mean_f1_score,mean_recall_score,mean_precision_score" - ] - }, - { - "cell_type": "markdown", - "id": "2a59af3d-a07c-444c-b486-2eb7330a21c6", - "metadata": {}, - "source": [ - "## Creating List Files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4a4619e4-d836-4ab2-a7da-eb501abd4911", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import DistilBertTokenizer, TFDistilBertForQuestionAnswering\n", - "import tensorflow as tf\n", - "tokenizer = DistilBertTokenizer.from_pretrained(\"distilbert-base-uncased-distilled-squad\")\n", - "question_token={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "261fff17-e4a1-4dda-ae4d-9960a880e5dd", - "metadata": {}, - "outputs": [], - "source": [ - "# :=[:=]\n", - "total_iter = 30\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw\\n\".format(i,i)) # add token mask if needed" - ] - }, - { - "cell_type": "markdown", - "id": "32f92ac8-18c9-4567-9e95-e33808b028e6", - "metadata": {}, - "source": [ - "## Creating W16A16 Precision Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "382733a6-ca6f-4997-bea5-5d74049eb41b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc models/Distilbert_fp32.dlc --input_list tf_raw_list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --output_dlc models/Distilbert_w16a16.dlc --enable_htp --htp_socs sm8550 --weights_bitwidth 16 --act_bitwidth 16" - ] - }, - { - "cell_type": "markdown", - "id": "ee37858f-b527-4f3c-bb17-7c289cd00cc4", - "metadata": {}, - "source": [ - "## Offline Preparation of W16A16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "54359fb6-c2c5-4ae2-9975-d0a708b1c9a5", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/Distilbert_w16a16.dlc --output_dlc models/Distilbert_w16a16_offline.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "markdown", - "id": "3f39b1d4-740a-4e55-b156-1df0b374fefc", - "metadata": {}, - "source": [ - "## Creating FP16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e9994394-b98e-4576-92fb-2f59edae892e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/Distilbert_fp32.dlc --use_float_io --output_dlc models/Distilbert_fp16.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "89c7a960-2937-4946-a738-1dec31ca02e9", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/05-nlp-nlu/Electrabase/README.md b/models-for-solutions/05-nlp-nlu/Electrabase/README.md deleted file mode 100644 index 54d8b0cb..00000000 --- a/models-for-solutions/05-nlp-nlu/Electrabase/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# "Question Answering" using Electrabase - -| Field | Description | -| --- | --- | -| Model Name | Electrabase | -| DNN Framwork | ONNX | -| Public Repo | https://huggingface.co/google/electra-base-discriminator | -| Paper | https://www.arxiv-vanity.com/papers/2003.10555/ | -| DLC Number of Inputs | 3 | -| DLC Input Ids Dimension | (1,384) | -| DLC Attention Mask Dimension | (1,384) | -| DLC Token Type Ids | (1,384) | -| Pre-Processing | Use Model Specific Tokenizer | -| Post-Processing | Again Use Model Specific Tokenizer to Post Process the Output | - -## Pre-Requisites - -- Qualcomm® Neural Processing SDK setup should be completed by following the guide [here](https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install optimum ```pip install optimum```. - -## How to get the model ? - -For this demo, you can directly get the [onnx model](Models/onnx-model/model.onnx) from the directory onnx_model or you can generate it from this [jupyter_notebook](generating_model). - - -## Convert model to DLC - -- Convert the onnx model to DLC with below command. Below, command will also fix the input dimension for the dlc. - -```python -snpe-onnx-to-dlc -i electrabase-onnx/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o electrabase.dlc -``` - -## Quantization of DLC -- Quantization can improve model performance in terms of latency and make the model light weight. -- Before Running this command make sure you've created the raw file and the list.txt -```python -snpe-dlc-graph-prepare --input_dlc electrabase.dlc --input_list tf_raw_list.txt --output_dlc electrabase_int.dlc -``` - -# Accuracy analysis -- To check accuracy please run "accuracy_analyzer.ipynb" a jupyter notebook present in accuracy folder. -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - - -# References - -1. https://www.arxiv-vanity.com/papers/2003.10555/ -2. https://huggingface.co/google/electra-base-discriminator - - -###### *Qualcomm Neural Processing SDK and Snapdragon are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/05-nlp-nlu/Electrabase/accuracy_analyzer.ipynb b/models-for-solutions/05-nlp-nlu/Electrabase/accuracy_analyzer.ipynb deleted file mode 100644 index 9d28403f..00000000 --- a/models-for-solutions/05-nlp-nlu/Electrabase/accuracy_analyzer.ipynb +++ /dev/null @@ -1,508 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "3d8b2366-147b-49b5-bfc5-bfa3948278e9", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f87b61f2-a5f0-4f9e-a3dd-ed4a40db4a52", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"Electrabase\"\n", - "os.environ['DLCFP16']=\"models/Electrabase_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/Electrabase_w16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/Electrabase_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"Electrabase_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4a5842b0-0ed6-4c95-9c51-1b47debf3753", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0dc4dac8-2200-4ac3-b859-df0dabe991c4", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, ElectraForQuestionAnswering\n", - "import torch\n", - "tokenizer = AutoTokenizer.from_pretrained(\"bhadresh-savani/electra-base-squad2\")\n", - "question_token={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "6b3d9896-63d8-491e-87e9-db2385cc4ebc", - "metadata": {}, - "source": [ - "#### F1 Score calculation custom code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8f5373ca-8702-4268-b30f-a4ba2da5947b", - "metadata": {}, - "outputs": [], - "source": [ - "from sklearn.metrics import f1_score\n", - "def f1_scores_custom(prediction,ground_truth):\n", - " prediction_tokens=prediction.lower().split()\n", - " ground_truth_tokens=ground_truth.lower().split()\n", - " common_tokens=[token for token in prediction_tokens if token in ground_truth_tokens] \n", - " if (len(prediction_tokens)==0 and len(ground_truth_tokens)==0):\n", - " return [1.0,1.0,1.0]\n", - " elif len(prediction_tokens)==0 or len(ground_truth_tokens)==0:\n", - " return [0.0,0.0,0.0]\n", - " precision=len(common_tokens)/len(prediction_tokens)\n", - " recall=len(common_tokens)/len(ground_truth_tokens)\n", - " if precision+recall==0:\n", - " return [0.0,0.0,0.0]\n", - " f1= 2*(precision*recall)/(precision+recall) \n", - " return [f1,precision,recall]" - ] - }, - { - "cell_type": "markdown", - "id": "008b2a7b-a26e-4f4e-9b26-61012d0b0665", - "metadata": {}, - "source": [ - "### Normal Model Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "27b5da09-852f-4054-8088-44262ddf910a", - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow as tf\n", - "import numpy as np\n", - "from transformers import AutoTokenizer, ElectraForQuestionAnswering\n", - "import torch\n", - "\n", - "tokenizer = AutoTokenizer.from_pretrained(\"bhadresh-savani/electra-base-squad2\")\n", - "model = ElectraForQuestionAnswering.from_pretrained(\"bhadresh-savani/electra-base-squad2\")\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "question_answer={}\n", - "\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"pt\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " outputs = model(**inputs)\n", - " answer_start_index = int(tf.math.argmax(outputs.start_logits.detach().numpy(), axis=-1)[0])\n", - " answer_end_index = int(tf.math.argmax(outputs.end_logits.detach().numpy(), axis=-1)[0])\n", - " \n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " predicted_answer=tokenizer.decode(predict_answer_tokens)\n", - " print(question,tokenizer.decode(predict_answer_tokens, skip_special_tokens=True))\n", - " question_answer[question]=predicted_answer\n", - " f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " f1_scores.append(f1)\n", - " precision_scores.append(precision)\n", - " recall_scores.append(recall)\n", - "\n", - "mean_f1_score=np.mean(f1_scores)\n", - "mean_precision_score=np.mean(precision_scores)\n", - "mean_recall_score=np.mean(recall_scores)\n", - "\n", - "mean_f1_score,mean_recall_score,mean_precision_score" - ] - }, - { - "cell_type": "markdown", - "id": "15b88290-bd2f-40e4-9c08-60e9ebbac475", - "metadata": {}, - "source": [ - "## Creating Directory on Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7c3f8e93-4ad5-4fb5-8ddf-dd1e891eb1a6", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "5cb9d6ea-5f4d-412a-98e5-a3bc3bf5b710", - "metadata": {}, - "source": [ - "## Pushing All SNPE Lib and Bin folders onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "46b63e40-c2bb-4c47-96ba-88c12120be7e", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6a2766ba-acbd-4b9c-bbc7-abf94b94ce95", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "markdown", - "id": "995d591d-22b2-437f-9a26-d830d1aabb52", - "metadata": {}, - "source": [ - "## Pushing all Model Artifacts onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb28b07e-b128-4212-bd5d-691cb40d8f9b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLCFP16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCW16A16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCFP32 /data/local/tmp/$ONDEVICE_FOLDER \n", - "$DEVICE_SHELL push attention_mask /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push input_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push token_type_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "940510f0-3eea-47a6-8df3-7dbb3eded975", - "metadata": {}, - "source": [ - "## Inferencing FP32 Model on CPU Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "94531d9e-8171-4936-92aa-64f65781c195", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=Electrabase_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"Electrabase_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list tf_raw_list.txt --set_unconsumed_as_output --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "0c852c8d-4db9-474b-89c8-595ccc6f2b8d", - "metadata": {}, - "source": [ - "## Inferencing FP16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b218f493-b4e6-4fcc-b28a-3ba1afecc19a", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_FP16\n", - "export OUTPUT_FP_16=Electrabase_fp16.dlc\n", - "export ONDEVICE_FOLDER=\"Electrabase_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_FP_16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "12f908ae-c353-45fc-a2d1-71e6890e46d4", - "metadata": {}, - "source": [ - "## Inferencing W16A16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ea114a76-cb81-499d-a798-fbef36397d0c", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_W16A16\n", - "export DLC_W16A16=Electrabase_w16a16_offline.dlc\n", - "export ONDEVICE_FOLDER=\"Electrabase_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $DLC_W16A16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp --enable_cpu_fallback" - ] - }, - { - "cell_type": "markdown", - "id": "3b6d8331-f071-40c1-8b51-1361e84f63a3", - "metadata": {}, - "source": [ - "## Pulling the Output from Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4d629db7-cb9c-407e-9d83-9a5591a55473", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_W16A16 OUTPUT_DSP_W16A16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_FP16 OUTPUT_DSP_FP16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26f3cc73-0bf3-4490-ac18-568791cde837", - "metadata": {}, - "outputs": [], - "source": [ - "def func(start_logits,end_logits,inputs):\n", - " answer_start_index = int(tf.math.argmax(start_logits, axis=-1)[0])\n", - " answer_end_index = int(tf.math.argmax(end_logits, axis=-1)[0])\n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " return tokenizer.decode(predict_answer_tokens)" - ] - }, - { - "cell_type": "markdown", - "id": "11462a34-bbdc-4779-ac78-833482cd2571", - "metadata": {}, - "source": [ - "## Comparing Accuracy of FP32 Vs FP16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8d0278e6-3065-464f-ba8d-a191dfc4ed73", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_FP16\"]\n", - "dlc_type = [\"fp32\",\"fp16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - }, - { - "cell_type": "markdown", - "id": "f48e450c-dcc9-424a-a513-b901ab3bf168", - "metadata": {}, - "source": [ - "## Comparing Accuracy of FP32 Vs W16A16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7d5d6ced-670b-4dd4-9ee2-ff762ff3d954", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_W16A16\"]\n", - "dlc_type = [\"fp32\",\"W16A16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "64c73248-2c4b-4b4b-a23d-b416f8724293", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/05-nlp-nlu/Electrabase/generating_model.ipynb b/models-for-solutions/05-nlp-nlu/Electrabase/generating_model.ipynb deleted file mode 100644 index bca6a344..00000000 --- a/models-for-solutions/05-nlp-nlu/Electrabase/generating_model.ipynb +++ /dev/null @@ -1,324 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "23d3165c-c538-4c49-9b56-6752cc77ed41", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "98158c6a-dd65-44f7-8476-704d94b8ef24", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"Electrabase\"\n", - "os.environ['DLCFP16']=\"models/Electrabase_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/Electrabase_w16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/Electrabase_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"Electrabase_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "markdown", - "id": "778cffa6-1261-4aab-84e8-448dfe179d7d", - "metadata": {}, - "source": [ - "## Downloading Data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "72cb2698-0740-41af-8c25-aef098600460", - "metadata": {}, - "outputs": [], - "source": [ - "!wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "668e119e-3d26-452f-99a1-e2ee00b8ef3c", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8d1567e8-c878-4e3c-be6a-07d9563ab59d", - "metadata": {}, - "outputs": [], - "source": [ - "!pip install optimum\n", - "!pip install sentencepiece" - ] - }, - { - "cell_type": "markdown", - "id": "86494dd2-e890-4fd5-8ab4-5689140239b3", - "metadata": {}, - "source": [ - "## Converting the Model to ONNX format using optimum" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e4e36e87-cf5a-451f-92ae-73e966145c73", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model bhadresh-savani/electra-base-squad2 models/" - ] - }, - { - "cell_type": "markdown", - "id": "4f0533f3-d96a-40d3-b878-b80d8a29728d", - "metadata": {}, - "source": [ - "### Getting Model Input name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9a988492-bfa3-458c-a65b-60cff6025589", - "metadata": {}, - "outputs": [], - "source": [ - "import onnxruntime\n", - "model_path='models/model.onnx'\n", - "sess=onnxruntime.InferenceSession(model_path)\n", - "input_layer_names=sess.get_inputs()\n", - "for input_layer in input_layer_names:\n", - " print(input_layer)" - ] - }, - { - "cell_type": "markdown", - "id": "6254fad7-05f5-4532-ac89-74c64a755d48", - "metadata": {}, - "source": [ - "## Converting ONNX to FP32 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "edbacb31-2135-4b14-8dde-95e1a9a07a59", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i models/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o models/Electrabase_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "373426be-dff9-4bc0-a520-7500de348ae3", - "metadata": {}, - "source": [ - "### Creating the RAW Files " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "de4dd002-5f74-4796-8cda-0120b341e4ec", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "mkdir input_ids\n", - "mkdir attention_mask\n", - "mkdir token_type_ids" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e1a2ed11-4f86-43c6-97fc-47ecb77973ca", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, ElectraForQuestionAnswering\n", - "import torch\n", - "tokenizer = AutoTokenizer.from_pretrained(\"bhadresh-savani/electra-base-squad2\")\n", - "question_token={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "93269c6f-1372-4c80-8ebe-8e7f656e5530", - "metadata": {}, - "source": [ - "## Creating List Files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "77d5bf5d-8e37-4a1d-8e3d-3d25fd9a06ac", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 30\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw token_type_ids:=token_type_ids/token_type_id_{}.raw\\n\".format(i,i,i))" - ] - }, - { - "cell_type": "markdown", - "id": "532c7fdc-3e00-4955-b07d-ddc306a7da48", - "metadata": {}, - "source": [ - "## Creating W16A16 Precision Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7e306f4-35a2-4204-92b1-f05d9a5f05dd", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc models/Electrabase_fp32.dlc --input_list tf_raw_list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --output_dlc models/Electrabase_w16a16.dlc --enable_htp --htp_socs sm8550 --weights_bitwidth 16 --act_bitwidth 16" - ] - }, - { - "cell_type": "markdown", - "id": "ab9706b5-d62e-4cdd-a6a9-1568ce806831", - "metadata": {}, - "source": [ - "## Offline Preparation of W16A16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1bbe01bb-8ff3-4b79-aa64-35a76c8f48b2", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/Electrabase_w16a16.dlc --output_dlc models/Electrabase_w16a16_offline.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "markdown", - "id": "5752915d-0432-4590-8d60-fd475d840cdf", - "metadata": {}, - "source": [ - "## Creating FP16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fcd7cd1a-640a-40a1-8370-ad09e4a01bcf", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/Electrabase_fp32.dlc --use_float_io --output_dlc models/Electrabase_fp16.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a8a9e745-6086-4263-835c-2afc9e78f279", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/05-nlp-nlu/MobileBert/README.md b/models-for-solutions/05-nlp-nlu/MobileBert/README.md deleted file mode 100644 index b9d02097..00000000 --- a/models-for-solutions/05-nlp-nlu/MobileBert/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# "Question Answering" using MobileBert - -| Field | Description | -| --- | --- | -| Model Name | MobileBert | -| DNN Framwork | ONNX | -| Public Repo | https://github.com/gemde001/MobileBERT | -| Paper | https://arxiv.org/abs/2004.02984 | -| DLC Number of Inputs | 3 | -| DLC Input Ids Dimension | (1,384) | -| DLC Attention Mask Dimension | (1,384) | -| DLC Token Type Ids | (1,384) | -| Pre-Processing | Use Model Specific Tokenizer | -| Post-Processing | Again Use Model Specific Tokenizer to Post Process the Output | - -## Pre-Requisites - -- Qualcomm® Neural Processing SDK setup should be completed by following the guide [here](https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html) -- Install onnx v1.6.0. Installation instruction can be found [here](https://qdn-drekartst.qualcomm.com/hardware/qualcomm-innovators-development-kit/frameworks-qualcomm-neural-processing-sdk-for-ai) -- Install onnxsim ```pip install onnxsim``` and onnxruntime ```pip install onnxruntime```. -- Install optimum ```pip install optimum```. - -## How to get the model ? - -For this demo, you can directly get the [onnx model](Models/onnx-model/model.onnx) from the directory onnx_model or you can generate it from this [jupyter_notebook](generating_model). - - -## Convert model to DLC - -- Convert the onnx model to DLC with below command. Below, command will also fix the input dimension for the dlc. - -```python -snpe-onnx-to-dlc -i mobilebert-onnx/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o mobile_bert.dlc -``` - -## Quantization of DLC -- Quantization can improve model performance in terms of latency and make the model light weight. -- Before Running this command make sure you've created the raw file and the list.txt -```python -snpe-dlc-graph-prepare --input_dlc mobile_bert.dlc --input_list tf_raw_list.txt --output_dlc mobile_bert_int.dlc -``` - -# Accuracy analysis -- To check accuracy please run "accuracy_analyzer.ipynb" a jupyter notebook present in accuracy folder. -- To run any jupyter notebook, run below command. It will generate few links on the screen, pick the link with your machine name on it (host-name) and paste it in any browser. -- Navigate to the notebook ".ipynb" file and simply click that file. -```python -jupyter notebook --no-browser --port=8080 --ip 0.0.0.0 --allow-root -``` - - - -# References - -1. https://arxiv.org/abs/2004.02984 -2. https://github.com/gemde001/MobileBERT - - -###### *Qualcomm Neural Processing SDK and Snapdragon are products of Qualcomm Technologies, Inc. and/or its subsidiaries.* diff --git a/models-for-solutions/05-nlp-nlu/MobileBert/accuracy_analyzer.ipynb b/models-for-solutions/05-nlp-nlu/MobileBert/accuracy_analyzer.ipynb deleted file mode 100644 index 86b689a8..00000000 --- a/models-for-solutions/05-nlp-nlu/MobileBert/accuracy_analyzer.ipynb +++ /dev/null @@ -1,504 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "44f101e2-127d-40de-9a41-9cc5833de9de", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7a466adf-b59c-4a8d-b413-888254a429ff", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"Mobilebert\"\n", - "os.environ['DLCFP16']=\"models/Mobilebert_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/Mobilebert_w16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/Mobilbert_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"Mobilebert_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7289dc43-e83f-418d-9adb-d80fc256549f", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4db52ae9-96ac-4300-b3e9-5e3ad98b13ad", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, MobileBertForQuestionAnswering\n", - "import torch\n", - "tokenizer = AutoTokenizer.from_pretrained(\"csarron/mobilebert-uncased-squad-v2\")\n", - "question_token={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "98894402-8380-440a-977f-e358f0633329", - "metadata": {}, - "source": [ - "#### F1 Score calculation custom code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6a8e94b0-7e7d-4b57-b65b-67ef6520284e", - "metadata": {}, - "outputs": [], - "source": [ - "from sklearn.metrics import f1_score\n", - "def f1_scores_custom(prediction,ground_truth):\n", - " prediction_tokens=prediction.lower().split()\n", - " ground_truth_tokens=ground_truth.lower().split()\n", - " common_tokens=[token for token in prediction_tokens if token in ground_truth_tokens] \n", - " if (len(prediction_tokens)==0 and len(ground_truth_tokens)==0):\n", - " return [1.0,1.0,1.0]\n", - " elif len(prediction_tokens)==0 or len(ground_truth_tokens)==0:\n", - " return [0.0,0.0,0.0]\n", - " precision=len(common_tokens)/len(prediction_tokens)\n", - " recall=len(common_tokens)/len(ground_truth_tokens)\n", - " if precision+recall==0:\n", - " return [0.0,0.0,0.0]\n", - " f1= 2*(precision*recall)/(precision+recall) \n", - " return [f1,precision,recall]" - ] - }, - { - "cell_type": "markdown", - "id": "9206f41a-c5f7-4931-9d42-e71c36c2bd30", - "metadata": {}, - "source": [ - "### Normal Model Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "32b3b7b5-5913-4e07-9d39-c6a86350a6c8", - "metadata": {}, - "outputs": [], - "source": [ - "from transformers import AutoTokenizer, MobileBertForQuestionAnswering\n", - "import torch\n", - "import numpy as np\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "tokenizer = AutoTokenizer.from_pretrained(\"csarron/mobilebert-uncased-squad-v2\")\n", - "model = MobileBertForQuestionAnswering.from_pretrained(\"csarron/mobilebert-uncased-squad-v2\")\n", - "question_answer={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"pt\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " outputs = model(**inputs)\n", - " answer_start_index = outputs.start_logits.argmax()\n", - " answer_end_index = outputs.end_logits.argmax()\n", - " \n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " predicted_answer=tokenizer.decode(predict_answer_tokens)\n", - " question_answer[question]=predicted_answer\n", - " #print(\"predicted_answer\",predicted_answer,\"Actual Answer\",answer)\n", - " f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " f1_scores.append(f1)\n", - " precision_scores.append(precision)\n", - " recall_scores.append(recall)\n", - "\n", - "mean_f1_score=np.mean(f1_scores)\n", - "mean_precision_score=np.mean(precision_scores)\n", - "mean_recall_score=np.mean(recall_scores)\n", - "mean_f1_score,mean_recall_score,mean_precision_score" - ] - }, - { - "cell_type": "markdown", - "id": "e364927f-cc45-4935-b759-54b99f0967d9", - "metadata": {}, - "source": [ - "## Creating Directory on Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1c18dbc6-581f-4c10-b696-25769765c8c7", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\" && $DEVICE_SHELL shell \"mkdir -p /data/local/tmp/snpeexample/dsp/lib\"" - ] - }, - { - "cell_type": "markdown", - "id": "9d2124e8-3f9a-4364-9087-22f679a5b2bb", - "metadata": {}, - "source": [ - "## Pushing All SNPE Lib and Bin folders onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "318e736b-73db-42e8-8dc0-19a4f2a84c8b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/$SNPE_TARGET_STL /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/bin/$SNPE_TARGET_ARCH/snpe-net-run /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/bin\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/hexagon-v75/unsigned/*.so /data/local/tmp/snpeexample/dsp/lib\n", - "$DEVICE_SHELL push $SNPE_ROOT/lib/$SNPE_TARGET_ARCH/*.so /data/local/tmp/snpeexample/$SNPE_TARGET_ARCH/lib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d39d7a50-e7d0-469e-8c51-1ef096dfc2b6", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell \"mkdir -p /data/local/tmp/$ONDEVICE_FOLDER\"" - ] - }, - { - "cell_type": "markdown", - "id": "c60013e4-cb63-40e5-a45d-da8aa9bb0c08", - "metadata": {}, - "source": [ - "## Pushing all Model Artifacts onto Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "babc4940-54c9-4464-9943-8eb0336e4a7f", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL push $DLCFP16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCW16A16 /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $DLCFP32 /data/local/tmp/$ONDEVICE_FOLDER \n", - "$DEVICE_SHELL push attention_mask /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push input_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push token_type_ids /data/local/tmp/$ONDEVICE_FOLDER\n", - "$DEVICE_SHELL push $TARGET_INPUT_LIST /data/local/tmp/$ONDEVICE_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "a479f135-417c-445b-85e3-bc1fc5340873", - "metadata": {}, - "source": [ - "## Inferencing FP32 Model on CPU Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "32c51429-2569-4ef4-bde1-3d29350cc1d1", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export OUTPUT_FOLDER=OUTPUT_32b_CPU\n", - "export OUTPUT_DLC_32=Mobilbert_fp32.dlc\n", - "export ONDEVICE_FOLDER=\"Mobilebert_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_DLC_32 --input_list tf_raw_list.txt --set_unconsumed_as_output --output_dir $OUTPUT_FOLDER" - ] - }, - { - "cell_type": "markdown", - "id": "69ce3a30-3f65-4465-bff5-9fea8cbfa4df", - "metadata": {}, - "source": [ - "## Inferencing FP16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a07fd57f-9c52-431a-98d4-1b304a2dea99", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_FP16\n", - "export OUTPUT_FP_16=Mobilebert_fp16.dlc\n", - "export ONDEVICE_FOLDER=\"Mobilebert_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $OUTPUT_FP_16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp" - ] - }, - { - "cell_type": "markdown", - "id": "350d1f77-ff56-485f-8285-755b12d7c7a6", - "metadata": {}, - "source": [ - "## Inferencing W16A16 on DSP Runtime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d28aa354-f308-49af-bf6f-b4e796774aa7", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL shell\n", - "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/snpeexample/aarch64-android/lib\n", - "export PATH=$PATH:/data/local/tmp/snpeexample/aarch64-android/bin\n", - "export ADSP_LIBRARY_PATH=\"/data/local/tmp/snpeexample/dsp/lib;/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp\"\n", - "export OUTPUT_FOLDER=OUTPUT_DSP_W16A16\n", - "export DLC_W16A16=Mobilebert_w16a16_offline.dlc\n", - "export ONDEVICE_FOLDER=\"Mobilebert_device\"\n", - "cd /data/local/tmp/$ONDEVICE_FOLDER &&\n", - "snpe-net-run --container $DLC_W16A16 --input_list tf_raw_list.txt --set_output_tensors start_logits,end_logits --output_dir $OUTPUT_FOLDER --use_dsp --enable_cpu_fallback" - ] - }, - { - "cell_type": "markdown", - "id": "09922e83-c25b-4d14-9e3a-fd97ace72f7c", - "metadata": {}, - "source": [ - "## Pulling the Output from Device" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3e2a8176-2718-474b-934a-185ce3314cd9", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "export DEVICE_SHELL=\"adb -H $DEVICE_HOST -s $DEVICE_ID\"\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_W16A16 OUTPUT_DSP_W16A16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_DSP_FP16 OUTPUT_DSP_FP16\n", - "$DEVICE_SHELL pull /data/local/tmp/$ONDEVICE_FOLDER/OUTPUT_32b_CPU OUTPUT_32b_CPU" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ad1cf627-8d83-4a1e-8b9c-4cc42b1ef9de", - "metadata": {}, - "outputs": [], - "source": [ - "def func(start_logits,end_logits,inputs):\n", - " answer_start_index = int(tf.math.argmax(start_logits, axis=-1)[0])\n", - " answer_end_index = int(tf.math.argmax(end_logits, axis=-1)[0])\n", - " predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]\n", - " return tokenizer.decode(predict_answer_tokens)" - ] - }, - { - "cell_type": "markdown", - "id": "1bdbf8e6-cc32-4665-a754-1d53341f0ed8", - "metadata": {}, - "source": [ - "## Comparing Accuracy of FP32 Vs FP16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c582a738-455c-4481-959d-7e1c77498419", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_FP16\"]\n", - "dlc_type = [\"fp32\",\"fp16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - }, - { - "cell_type": "markdown", - "id": "6bd08ca0-941c-43e5-9a5c-64c183062db3", - "metadata": {}, - "source": [ - "## Comparing Accuracy of FP32 Vs W16A16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5fe94285-6384-418c-bcbe-6cbe99c7cce4", - "metadata": {}, - "outputs": [], - "source": [ - "import glob\n", - "import tensorflow as tf\n", - "import os\n", - "folder = [\"OUTPUT_32b_CPU\",\"OUTPUT_DSP_W16A16\"]\n", - "dlc_type = [\"fp32\",\"W16A16\"]\n", - "data=[]\n", - "f1_scores,precision_scores,recall_scores=[],[],[]\n", - "for j in range(0,2):\n", - " print(\"-----------------------\",folder[j],\"-----------------------------\")\n", - " for result_path in glob.glob(os.path.join(folder[j], '*')):\n", - " if \".log\" not in result_path:\n", - " start_logits = np.fromfile(result_path+'/start_logits.raw', dtype=\"float32\")\n", - " end_logits=np.fromfile(result_path+'/end_logits.raw', dtype=\"float32\")\n", - " start_logits=start_logits.reshape((1,384))\n", - " end_logits=end_logits.reshape((1,384))\n", - " question,inputs,answer,text=question_token[int(result_path.split(\"/\")[1].split(\"_\")[1])]\n", - " predicted_answer=func(start_logits,end_logits,inputs)\n", - " data.append({\"Model_Type\":dlc_type[j],\"question\":question,\"predicted_answer\":predicted_answer,\"Actual Model Answer\":question_answer[question],\"answer\":answer,\"context\":text})\n", - " #f1,precision,recall=f1_scores_custom(predicted_answer,answer)\n", - " #f1_scores.append(f1)\n", - " #precision_scores.append(precision)\n", - " #recall_scores.append(recall)\n", - " \n", - " mean_f1_score=np.mean(f1_scores)\n", - " mean_precision_score=np.mean(precision_scores)\n", - " mean_recall_score=np.mean(recall_scores)\n", - " \n", - " print(\"F1_Score:\",mean_f1_score,\"Recall:\",mean_recall_score,\"Precision:\",mean_precision_score)\n", - "data=pd.DataFrame(data)\n", - "data.head(40)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1f6ee2c5-4a9e-4b00-8c52-1da73a637d21", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/05-nlp-nlu/MobileBert/generating_model.ipynb b/models-for-solutions/05-nlp-nlu/MobileBert/generating_model.ipynb deleted file mode 100644 index 59cbe963..00000000 --- a/models-for-solutions/05-nlp-nlu/MobileBert/generating_model.ipynb +++ /dev/null @@ -1,324 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "522236b9-b19b-45fa-ade6-267112b8dffb", - "metadata": {}, - "source": [ - "## Setting Up All Artifacts details" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fe934c66-aabc-4959-b5d1-395ab022297f", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['SNPE_ROOT']=\"/local/mnt/workspace/aditya/qaisw-v2.15.1.230926150623_62883\"#set up your snpe path here.\n", - "os.environ['RAW_FILE_FOLDER']=\"raw\"\n", - "os.environ['FOLDER_WITH_ARTIFACTS']=\"Mobilebert\"\n", - "os.environ['DLCFP16']=\"models/Mobilebert_fp16.dlc\"\n", - "os.environ['DLCW16A16']=\"models/Mobilebert_w16a16_offline.dlc\"\n", - "os.environ['DLCFP32']=\"models/Mobilebert_fp32.dlc\"\n", - "os.environ['TARGET_INPUT_LIST']=\"tf_raw_list.txt\"\n", - "os.environ['ONDEVICE_FOLDER']=\"Mobilebert_device\"\n", - "os.environ['DEVICE_HOST']=\"localhost\"\n", - "os.environ['DEVICE_ID']=\"2dce6316\" #fill your device-id. Use command \"adb devices\" to get devices names. example :\"e18d5d0\"\n", - "os.environ['SNPE_TARGET_ARCH']=\"aarch64-android\"\n", - "os.environ['SNPE_TARGET_STL']=\"libc++_shared.so\"\n", - "os.environ['SNPE_TARGET_DSPARCH']=\"hexagon-v73\"" - ] - }, - { - "cell_type": "markdown", - "id": "ad02680e-b6b4-4452-9dfb-ece189c11e44", - "metadata": {}, - "source": [ - "## Downloading Data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "643c6414-9ab9-4fbe-94d2-190935c553af", - "metadata": {}, - "outputs": [], - "source": [ - "!wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "924a1972-bbcc-4a64-a162-b74f54d876ec", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import pandas as pd\n", - "data_path=\"dev-v2.0.json\"\n", - "with open(data_path,\"r\") as f:\n", - " squad_data=json.load(f)\n", - "context_qa_triples=[]\n", - "for article in squad_data['data']:\n", - " for paragraph in article['paragraphs']:\n", - " context=paragraph['context']\n", - " for qa in paragraph['qas']:\n", - " question=qa['question']\n", - " if qa['answers']:\n", - " answer=qa['answers'][0]['text']\n", - " elif qa['plausible_answers']:\n", - " plausible_answers=qa['plausible_answers']\n", - " answer=plausible_answers[0]['text']\n", - " else:\n", - " answer=''\n", - "\n", - " context_qa_triples.append({'context':context,'question':question,'answers':answer})\n", - "df=pd.DataFrame(context_qa_triples[:30])\n", - "df.head(3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "28273d83-bd6c-423a-a956-cebf8b9fff0f", - "metadata": {}, - "outputs": [], - "source": [ - "!pip install optimum\n", - "!pip install sentencepiece" - ] - }, - { - "cell_type": "markdown", - "id": "cbe0b777-717c-49c2-b98a-f95cb4e23b28", - "metadata": {}, - "source": [ - "## Converting the Model to ONNX format using optimum" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7641bac4-3957-4cce-948a-17193ad0828b", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "optimum-cli export onnx --model csarron/mobilebert-uncased-squad-v2 models/" - ] - }, - { - "cell_type": "markdown", - "id": "3825042d-82b3-4660-a1dd-b9cb4e19d436", - "metadata": {}, - "source": [ - "### Getting Model Input name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3b28296d-7d1e-4912-8d8a-f8e312cf98e1", - "metadata": {}, - "outputs": [], - "source": [ - "import onnxruntime\n", - "model_path='models/model.onnx'\n", - "sess=onnxruntime.InferenceSession(model_path)\n", - "input_layer_names=sess.get_inputs()\n", - "for input_layer in input_layer_names:\n", - " print(input_layer)" - ] - }, - { - "cell_type": "markdown", - "id": "20ebe381-247d-4808-bbad-541e93fe0812", - "metadata": {}, - "source": [ - "## Converting ONNX to FP32 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "09290fc4-dce7-426f-96c3-123c9a55eb23", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-onnx-to-dlc -i models/model.onnx -d input_ids 1,384 -d attention_mask 1,384 -d token_type_ids 1,384 -o models/Mobilbert_fp32.dlc" - ] - }, - { - "cell_type": "markdown", - "id": "f2ddfc58-6eee-4375-a386-6c35dddfe8e8", - "metadata": {}, - "source": [ - "### Creating the RAW Files " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "abd155ed-ae29-411f-9dc2-f076ee83b5e3", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "mkdir input_ids\n", - "mkdir attention_mask\n", - "mkdir token_type_ids" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "eb3ba2fb-d023-45ec-be25-6119eaac0bef", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from transformers import AutoTokenizer, MobileBertForQuestionAnswering\n", - "import torch\n", - "tokenizer = AutoTokenizer.from_pretrained(\"csarron/mobilebert-uncased-squad-v2\")\n", - "question_token={}\n", - "for i in range(df.shape[0]):\n", - " question,text,answer=df.iloc[i].question,df.iloc[i].context,df.iloc[i].answers\n", - " inputs = tokenizer(question, text, return_tensors=\"np\",\n", - " padding='max_length',\n", - " truncation=\"longest_first\",\n", - " max_length=384)\n", - " question_token[i]=[question,inputs,answer,text]\n", - " inp_ids = inputs.input_ids\n", - " inp_ids=inp_ids.astype(np.float32)\n", - " with open(\"input_ids/inp_ids_\"+str(i)+\".raw\", 'wb') as f:\n", - " inp_ids.tofile(f)\n", - " \n", - " mask = inputs.attention_mask\n", - " mask=mask.astype(np.float32)\n", - " with open(\"attention_mask/attn_mask_\"+str(i)+\".raw\", 'wb') as f:\n", - " mask.tofile(f)\n", - "\n", - " token_type= inputs.token_type_ids\n", - " token_type=token_type.astype(np.float32)\n", - " with open(\"token_type_ids/token_type_id_\"+str(i)+\".raw\", 'wb') as f:\n", - " token_type.tofile(f)" - ] - }, - { - "cell_type": "markdown", - "id": "efc6ec65-3cda-431a-ba9e-9e5e68bb0956", - "metadata": {}, - "source": [ - "## Creating List Files" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7b793098-2aeb-4b84-a8bf-665b58469644", - "metadata": {}, - "outputs": [], - "source": [ - "total_iter = 30\n", - "print(\"Generating input_list \\\"small_raw_list.txt\\\" with {} iterations\".format(total_iter))\n", - "with open(\"tf_raw_list.txt\",'w') as f:\n", - " for i in range(total_iter):\n", - " f.write(\"input_ids:=input_ids/inp_ids_{}.raw attention_mask:=attention_mask/attn_mask_{}.raw token_type_ids:=token_type_ids/token_type_id_{}.raw\\n\".format(i,i,i))" - ] - }, - { - "cell_type": "markdown", - "id": "2e18840f-6c49-4965-8aac-da46dc2365a7", - "metadata": {}, - "source": [ - "## Creating W16A16 Precision Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bc68c085-8bdc-4270-ac72-c81e927e0eab", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-quantize --input_dlc models/Mobilbert_fp32.dlc --input_list tf_raw_list.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --output_dlc models/Mobilebert_w16a16.dlc --enable_htp --htp_socs sm8550 --weights_bitwidth 16 --act_bitwidth 16" - ] - }, - { - "cell_type": "markdown", - "id": "d8f6f79f-7d17-46da-a0d3-e23ae06459a8", - "metadata": {}, - "source": [ - "## Offline Preparation of W16A16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5e68ca4a-d944-416b-8167-63561e12f9f9", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/Mobilebert_w16a16.dlc --output_dlc models/Mobilebert_w16a16_offline.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "markdown", - "id": "3179275d-bb63-496b-947d-26389b1cef8f", - "metadata": {}, - "source": [ - "## Creating FP16 Precision" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2e4d4740-27bc-4b19-86c8-b27ccd06c367", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "source $SNPE_ROOT/bin/envsetup.sh\n", - "snpe-dlc-graph-prepare --input_dlc models/Mobilbert_fp32.dlc --use_float_io --output_dlc models/Mobilebert_fp16.dlc --set_output_tensors start_logits,end_logits" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "70fdde7c-4d95-4d07-930a-9ade707d4202", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/models-for-solutions/Readme.md b/models-for-solutions/Readme.md deleted file mode 100644 index 2d9ad0eb..00000000 --- a/models-for-solutions/Readme.md +++ /dev/null @@ -1,50 +0,0 @@ -# Qualcomm AI Stack Models - -## Models for Solutions - -Contains functionally verified models used in end-to-end solutions. - -Workflow to for model enablement, and AI solutions is given below. - -

                          - - -## Getting Started - Android - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - -- Proceed to model conversion, for each model - -- Host - Ubuntu (Version defined by SDK) - -- Target - Android (Snapdragon 8 Gen3, i.e. SM8650) - -## Getting Started - Windows - -- Setup AI SDK for Windows Qualcomm® Neural Processing SDK (Windows). - -- Proceed to model conversion, for each model - -- Platform for Model Conversion - Ubuntu X86 (Version defined by SDK) - -- Platform for Model Execution - Windows on Snapdragon ARM64 (SC8380X) - -- More details about setting up SDK, and convering first model as prototype are given at Qualcomm Windows-on-Snapdragon developer page Qualcomm® Windows on Snapdragon Developer Portal - -## Getting Started - RB5 - -- Setup AI SDK Qualcomm® Neural Processing SDK (Linux). - -- Follow the insturctions given in SDK to setup the SDK - -- Proceed to model conversion, for each model - -- Host - Ubuntu (Version defined by SDK) - -- Target - RB5 (QRB5165-embedded-linux, or RB5 with Linux Ubuntu) - -- More details about setting up SDK, and convering first model as prototype are given at Qualcomm RB5 developer page Qualcomm® RB5 Developer Portal - diff --git a/models-for-solutions/readme-assets/ai-solutions-workflow.jpg b/models-for-solutions/readme-assets/ai-solutions-workflow.jpg deleted file mode 100644 index 8efe107f..00000000 Binary files a/models-for-solutions/readme-assets/ai-solutions-workflow.jpg and /dev/null differ diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..bac83c81 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,78 @@ +[mypy] +# https://mypy.readthedocs.io/en/stable/config_file.html#import-discovery +exclude = "qai_hub_models/models" + +[mypy-huggingface_hub.*] +ignore_missing_imports = True + +[mypy-onnx.*] +ignore_missing_imports = True + +[mypy-onnxsim.*] +ignore_missing_imports = True + +[mypy-onnxruntime.*] +ignore_missing_imports = True + +[mypy-pytimeparse.*] +ignore_missing_imports = True + +[mypy-skimage.*] +ignore_missing_imports = True + +[mypy-setuptools.*] +ignore_missing_imports = True + +[mypy-tensorflow.*] +ignore_missing_imports = True + +[mypy-torchvision.*] +ignore_missing_imports = True + +[mypy-transformers.*] +ignore_missing_imports = True + +[mypy-tqdm.*] +ignore_missing_imports = True + +[mypy-tap.*] +ignore_missing_imports = True + +[mypy-h5py.*] +ignore_missing_imports = True + +[mypy-flatbuffers.*] +ignore_missing_imports = True + +[mypy-soundfile.*] +ignore_missing_imports = True + +[mypy-datasets.*] +ignore_missing_imports = True + +[mypy-keras.*] +ignore_missing_imports = True + +[mypy-rangefilter.filters.*] +ignore_missing_imports = True + +[mypy-schema.*] +ignore_missing_imports = True + +[mypy-gdown.*] +ignore_missing_imports = True + +[mypy-aimet_torch.*] +ignore_missing_imports = True + +[mypy-boto3.*] +ignore_missing_imports = True + +[mypy-botocore.*] +ignore_missing_imports = True + +[mypy-ruamel.*] +ignore_missing_imports = True + +[mypy-qai_hub_models.models.*] +ignore_errors = true diff --git a/pyrightconfig.json b/pyrightconfig.json new file mode 100644 index 00000000..0bb3304c --- /dev/null +++ b/pyrightconfig.json @@ -0,0 +1,26 @@ +{ + "exclude": [ + "**/.mypy_cache", + "**/.pytest_cache", + "**/__pycache__", + "**/node_modules", + "build/tungsten", + "src/public/staging_python/qai_hub_staging", + "src/tungsten", + "src/www/onnx-optimizer/third_party", + "src/www/onnx-simplifier/third_party", + ], + + "extraPaths": [ + "./build/proto" + ], + + "reportMissingModuleSource": "none", + "reportMissingImports": true, + "reportMissingTypeStubs": false, + "reportShadowedImports": false, + "verboseOutput": false, + + "venvPath": ".", + "venv": "qaism-dev" +} diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 00000000..6b6221ac --- /dev/null +++ b/pytest.ini @@ -0,0 +1,16 @@ +[pytest] +testpaths = qai_hub_models +norecursedirs = build +python_files = tests.py test_*.py test.py +filterwarnings = + ignore::DeprecationWarning:coremltools.*: + ignore::DeprecationWarning:torch.*: + ignore::DeprecationWarning:torchvision.*: + ignore::DeprecationWarning:tensorflow.*: + ignore::DeprecationWarning:tensorflow-macos.*: + ignore::DeprecationWarning:tensorflow-metal.*: + ignore::DeprecationWarning:tensorflow-probability.*: +markers = + serial: test must not be run in parallel + slow: marks tests as slow + slow_cloud: marks test as slow and cloud-dependent diff --git a/qai_hub_models/__init__.py b/qai_hub_models/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/_version.py b/qai_hub_models/_version.py new file mode 100644 index 00000000..2946e3c0 --- /dev/null +++ b/qai_hub_models/_version.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +__version__ = "0.3.0" diff --git a/qai_hub_models/asset_bases.yaml b/qai_hub_models/asset_bases.yaml new file mode 100644 index 00000000..1b110e24 --- /dev/null +++ b/qai_hub_models/asset_bases.yaml @@ -0,0 +1,12 @@ +store_url: https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models +web_asset_folder: models/{model_id}/web-assets +static_web_banner_filename: banner.png +animated_web_banner_filename: banner.mp4 +model_asset_folder: models/{model_id}/v{version} +dataset_asset_folder: datasets/{dataset_id}/v{version} +repo_url: https://github.com/quic/ai-hub-models/blob/main +qaihm_repo: qai_hub_models/models/{model_id} +example_use: qai_hub_models/models/{model_id}#example--usage +huggingface_path: qualcomm/{model_name} +models_website_url: https://aihub.qualcomm.com +models_website_relative_path: models/{model_id} diff --git a/qai_hub_models/conftest.py b/qai_hub_models/conftest.py new file mode 100644 index 00000000..6f57c5d0 --- /dev/null +++ b/qai_hub_models/conftest.py @@ -0,0 +1,8 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +def pytest_configure(config): + config.addinivalue_line("markers", "compile: Run compile tests.") + config.addinivalue_line("markers", "profile: Run profile tests.") + config.addinivalue_line("markers", "inference: Run inference tests.") diff --git a/qai_hub_models/datasets/__init__.py b/qai_hub_models/datasets/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/datasets/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/datasets/bsd300.py b/qai_hub_models/datasets/bsd300.py new file mode 100644 index 00000000..a6d534b7 --- /dev/null +++ b/qai_hub_models/datasets/bsd300.py @@ -0,0 +1,128 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +from typing import Tuple + +import numpy as np +import torch +from PIL import Image + +from qai_hub_models.datasets.common import BaseDataset +from qai_hub_models.utils.asset_loaders import CachedWebDatasetAsset + +BSD300_URL = ( + "https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz" +) +BSD300_FOLDER_NAME = "BSDS300" +BSD300_VERSION = 1 +BSD300_ASSET = CachedWebDatasetAsset( + BSD300_URL, BSD300_FOLDER_NAME, BSD300_VERSION, "BSDS300.tgz" +) +DATASET_LENGTH = 200 + + +class BSD300Dataset(BaseDataset): + """ + BSD300 published here: https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/ + """ + + def __init__(self, scaling_factor=4): + self.bsd_path = BSD300_ASSET.path(extracted=True) + self.images_path = os.path.join(self.bsd_path, "images/train") + BaseDataset.__init__(self, self.bsd_path) + self.scaling_factor = scaling_factor + + def _validate_data(self) -> bool: + images_path = os.path.join(self.dataset_path, "images/train") + + # Check image path exists + if not os.path.exists(images_path): + return False + + # Ensure the correct number of images are there + files = os.listdir(images_path) + images = [f for f in files if ".jpg" in f] + if len(images) != DATASET_LENGTH: + return False + + return True + + def _prepare_data(self): + # Rename images to be more friendly to enumeration + directory = os.path.join(self.dataset_path, "images/train") + files = os.listdir(directory) + for i, filename in enumerate(files): + if filename.endswith(".jpg"): + # Open the image and convert it to png + try: + with Image.open(os.path.join(directory, filename)) as img: + img.save(os.path.join(directory, f"img_{i + 1:03d}_HR.jpg")) + # delete the old image + os.remove(os.path.join(directory, filename)) + except ValueError: + print(f"File {filename} does not exist!") + + def __len__(self): + return DATASET_LENGTH + + def __getitem__(self, item) -> Tuple[torch.Tensor, torch.Tensor]: + # We use the super resolution GT-and-test image preparation from AIMET zoo: + # https://github.com/quic/aimet-model-zoo/blob/d09d2b0404d10f71a7640a87e9d5e5257b028802/aimet_zoo_torch/quicksrnet/dataloader/utils.py#L51 + + img = np.asarray( + Image.open(os.path.join(self.images_path, f"img_{item + 1:03d}_HR.jpg")) + ) + height, width = img.shape[0:2] + + # If portrait, transpose to landscape so that all tensors are equal size + if height > width: + img = np.transpose(img, (1, 0, 2)) + height, width = img.shape[0:2] + + # Take the largest possible center-crop of it such that its dimensions are perfectly divisible by the scaling factor + x_remainder = width % ( + 2 * self.scaling_factor + if self.scaling_factor == 1.5 + else self.scaling_factor + ) + y_remainder = height % ( + 2 * self.scaling_factor + if self.scaling_factor == 1.5 + else self.scaling_factor + ) + left = int(x_remainder // 2) + top = int(y_remainder // 2) + right = int(left + (width - x_remainder)) + bottom = int(top + (height - y_remainder)) + hr_img = img[top:bottom, left:right] + + hr_height, hr_width = hr_img.shape[0:2] + + hr_img = np.array(hr_img, dtype="uint8") + new_size = (int(width / self.scaling_factor), int(height / self.scaling_factor)) + lr_img = np.asarray(Image.fromarray(hr_img).resize(new_size)) + lr_img = np.clip(lr_img, 0.0, 255.0).astype(np.uint8) + + lr_height, lr_width = lr_img.shape[0:2] + + # Sanity check + assert ( + hr_width == lr_width * self.scaling_factor + and hr_height == lr_height * self.scaling_factor + ) + + lr_img_tensor = torch.from_numpy(lr_img.transpose((2, 0, 1))).contiguous() + lr_img_tensor = lr_img_tensor.to(dtype=torch.float32).div(255) + + hr_img_tensor = torch.from_numpy(hr_img.transpose((2, 0, 1))).contiguous() + hr_img_tensor = hr_img_tensor.to(dtype=torch.float32).div(255) + + return lr_img_tensor, hr_img_tensor + + def _download_data(self) -> None: + BSD300_ASSET.fetch(extract=True) + self._prepare_data() diff --git a/qai_hub_models/datasets/coco.py b/qai_hub_models/datasets/coco.py new file mode 100644 index 00000000..943d42a6 --- /dev/null +++ b/qai_hub_models/datasets/coco.py @@ -0,0 +1,121 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os +from typing import Tuple, Union + +import torch +from torch.utils.data.dataloader import default_collate +from torchvision.datasets.coco import CocoDetection + +from qai_hub_models.datasets.common import BaseDataset +from qai_hub_models.utils.asset_loaders import CachedWebDatasetAsset +from qai_hub_models.utils.image_processing import app_to_net_image_inputs + +DATASET_ID = "coco" +DATASET_ASSET_VERSION = 1 +COCO_DATASET = CachedWebDatasetAsset( + "http://images.cocodataset.org/zips/val2017.zip", + DATASET_ID, + DATASET_ASSET_VERSION, + "val2017.zip", +) +COCO_ANNOTATIONS = CachedWebDatasetAsset( + "http://images.cocodataset.org/annotations/annotations_trainval2017.zip", + DATASET_ID, + DATASET_ASSET_VERSION, + "annotations_trainval2017.zip", +) + + +def collate_fn(batch): + try: + image, gt = batch[0][0], batch[0][1] + image_id, height, width, boxes, labels = gt + new_list = [] + new_list.append(default_collate([i for i in image if torch.is_tensor(i)])) + target = ( + torch.tensor(image_id), + torch.tensor(height), + torch.tensor(width), + default_collate([i for i in boxes if torch.is_tensor(i)]), + default_collate([i for i in labels if torch.is_tensor(i)]), + ) + new_list.append(target) + return new_list + except Exception: + return [], ([], [], [], [], []) + + +class CocoDataset(BaseDataset, CocoDetection): + """ + Class for using the COCODetection dataset published here: + + + Contains ~5k images spanning 80 classes. + """ + + def __init__(self, target_image_size: Union[int, Tuple[int, int]] = 640): + BaseDataset.__init__(self, str(COCO_DATASET.path(extracted=True))) + CocoDetection.__init__( + self, + root=COCO_DATASET.path() / "val2017", + annFile=COCO_ANNOTATIONS.path() / "annotations" / "instances_val2017.json", + ) + + categories = self.coco.loadCats(self.coco.getCatIds()) + categories.sort(key=lambda x: x["id"]) + self.label_map = {} + counter = 0 + for c in categories: + self.label_map[c["id"]] = counter + counter += 1 + self.target_image_size = ( + target_image_size + if isinstance(target_image_size, tuple) + else (target_image_size, target_image_size) + ) + + def __getitem__(self, item): + image, target = super(CocoDataset, self).__getitem__(item) + width, height = image.size + boxes = [] + labels = [] + if len(target) == 0: + return None, (None, None) + for annotation in target: + bbox = annotation.get("bbox") + boxes.append( + [ + bbox[0] / width, + bbox[1] / height, + (bbox[0] + bbox[2]) / width, + (bbox[1] + bbox[3]) / height, + ] + ) + labels.append(self.label_map[annotation.get("category_id")]) + boxes = torch.tensor(boxes) + labels = torch.tensor(labels) + image = image.resize(self.target_image_size) + image = app_to_net_image_inputs(image)[1] + return image, (target[0]["image_id"], height, width, boxes, labels) + + def _validate_data(self) -> bool: + # Check validation data exists + if not COCO_DATASET.path().exists(): + return False + + # Check annotations exist + if not COCO_ANNOTATIONS.path().exists(): + return False + + # Ensure there are 5000 samples + if len(os.listdir(COCO_DATASET.path() / "val2017")) < 5000: + return False + + return True + + def _download_data(self) -> None: + COCO_DATASET.fetch(extract=True) + COCO_ANNOTATIONS.fetch(extract=True) diff --git a/qai_hub_models/datasets/common.py b/qai_hub_models/datasets/common.py new file mode 100644 index 00000000..6b841830 --- /dev/null +++ b/qai_hub_models/datasets/common.py @@ -0,0 +1,50 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +import shutil +from abc import ABC, abstractmethod +from typing import final + +from torch.utils.data import Dataset + + +class BaseDataset(Dataset, ABC): + """ + Base class to be extended by Datasets used in this repo for quantizing models. + """ + + def __init__(self, dataset_path: str): + self.dataset_path = dataset_path + self.download_data() + + @final + def download_data(self) -> None: + if self._validate_data(): + return + if os.path.exists(self.dataset_path): + # Data is corrupted, delete and re-download + if os.path.isdir(self.dataset_path): + shutil.rmtree(self.dataset_path) + else: + os.remove(self.dataset_path) + + self._download_data() + if not self._validate_data(): + raise ValueError("Something went wrong during download.") + + @abstractmethod + def _download_data(self) -> None: + """ + Method to download necessary data to disk. To be implemented by subclass. + """ + pass + + def _validate_data(self) -> bool: + """ + Validates data downloaded on disk. By default just checks that folder exists. + """ + return os.path.exists(self.dataset_path) diff --git a/qai_hub_models/datasets/imagenette.py b/qai_hub_models/datasets/imagenette.py new file mode 100644 index 00000000..646bbb62 --- /dev/null +++ b/qai_hub_models/datasets/imagenette.py @@ -0,0 +1,100 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os +import stat + +from torchvision.datasets import ImageNet + +from qai_hub_models.datasets.common import BaseDataset +from qai_hub_models.models._shared.imagenet_classifier.app import IMAGENET_TRANSFORM +from qai_hub_models.utils.asset_loaders import CachedWebDatasetAsset + +IMAGENETTE_FOLDER_NAME = "imagenette2-320" +IMAGENETTE_VERSION = 1 +DEVKIT_ASSET = CachedWebDatasetAsset( + "https://image-net.org/data/ILSVRC/2012/ILSVRC2012_devkit_t12.tar.gz", + IMAGENETTE_FOLDER_NAME, + IMAGENETTE_VERSION, + "ILSVRC2012_devkit_t12.tar.gz", +) +IMAGENETTE_ASSET = CachedWebDatasetAsset( + "https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-320.tgz", + IMAGENETTE_FOLDER_NAME, + IMAGENETTE_VERSION, + "imagenette2-320.tgz", +) + +# Imagenette data has 10 classes and are labeled 0-9. +# This maps the Imagenette class id to the actual Imagenet_1K class id. +IMAGENETTE_CLASS_MAP = { + 0: 0, + 1: 217, + 2: 482, + 3: 491, + 4: 497, + 5: 566, + 6: 569, + 7: 571, + 8: 574, + 9: 701, +} + + +class ImagenetteDataset(BaseDataset, ImageNet): + """ + Class for using the Imagenette dataset published here: + https://github.com/fastai/imagenette + + Contains ~4k images spanning 10 of the imagenet classes. + """ + + def __init__(self): + self._download_data() + BaseDataset.__init__(self, str(IMAGENETTE_ASSET.path(extracted=True))) + ImageNet.__init__( + self, + root=IMAGENETTE_ASSET.path(), + split="val", + transform=IMAGENET_TRANSFORM, + target_transform=lambda val: IMAGENETTE_CLASS_MAP[val], + ) + + def _validate_data(self) -> bool: + devkit_path = DEVKIT_ASSET.path() + + # Check devkit exists + if not devkit_path.exists(): + return False + + # Check devkit permissions + devkit_permissions = os.stat(devkit_path).st_mode + if devkit_permissions & stat.S_IEXEC != stat.S_IEXEC: + return False + + # Check val data exists + val_data_path = os.path.join(self.dataset_path, "val") + if not os.path.exists(val_data_path): + return False + + # Ensure 10 classes + subdirs = os.listdir(val_data_path) + if len(subdirs) != 10: + return False + + # Ensure >= 300 samples per classes + for subdir in subdirs: + if len(os.listdir(os.path.join(val_data_path, subdir))) < 300: + return False + return True + + def _download_data(self) -> None: + IMAGENETTE_ASSET.fetch(extract=True) + devkit_path = DEVKIT_ASSET.fetch() + devkit_st = os.stat(devkit_path) + os.chmod(devkit_path, devkit_st.st_mode | stat.S_IEXEC) + os.symlink( + DEVKIT_ASSET.path(), + IMAGENETTE_ASSET.path() / os.path.basename(DEVKIT_ASSET.path()), + ) diff --git a/qai_hub_models/evaluators/__init__.py b/qai_hub_models/evaluators/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/evaluators/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/evaluators/base_evaluators.py b/qai_hub_models/evaluators/base_evaluators.py new file mode 100644 index 00000000..a9e7e4e0 --- /dev/null +++ b/qai_hub_models/evaluators/base_evaluators.py @@ -0,0 +1,178 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Callable, Collection, Tuple, Union + +import torch +from torch.utils.data.dataloader import DataLoader +from tqdm import tqdm +from typing_extensions import TypeAlias + +_ModelIO: TypeAlias = Union[Collection[torch.Tensor], torch.Tensor] +# Typically is a torch DataLoader, but anything with the collection signature is acceptable. +_DataLoader: TypeAlias = Union[ + DataLoader, Collection[Union[_ModelIO, Tuple[_ModelIO, _ModelIO]]] +] + + +class BaseEvaluator(ABC): + """ + Evaluates one or more outputs of a model in comparison to a ground truth. + """ + + @abstractmethod + def add_batch( + self, + output, # torch.Tensor | Collection[torch.Tensor] + ground_truth, # torch.Tensor | Collection[torch.Tensor] + ) -> None: + """ + Add a batch of data to this evaluator. + + Parameters: + output: torch.Tensor | Collection[torch.Tensor] + Torch model output(s) for a single inference. + + If the model forward() function has 1 output, this is a tensor. + If the model forward() function outputs multiple tensors, this is a tuple of tensors. + + gt: torch.Tensor | Collection[torch.Tensor] + The ground truth(s) for this output. + + Some evaluators may accept only a Collection. Others may accept only a tensor. + The meaning of the ground truth is dependent on this method's implementation. + """ + pass + + @abstractmethod + def reset(self) -> None: + """Reset the state of this evaluator.""" + pass + + @abstractmethod + def get_accuracy_score(self) -> float: + """Single float value representing model accuracy. Higher is better.""" + pass + + def add_from_dataset( + self, + model: torch.nn.Module, + data: _DataLoader, + eval_iterations: int | None = None, + device: str = "cpu", + ) -> None: + """ + Populates this evaluator with data from the provided the data loader. + + Parameters: + model: torch.nn.Module + Model to use to compute model outputs. + + data: torch DataLoader | Collection + Data loader for the dataset to use for evaluation. Iterator should return: + tuple(inputs: Collection[torch.Tensor] | torch.Tensor, + ground_truth: Collection[torch.Tensor] | torch.Tensor) + + eval_iterations: int | None + Number of samples to use for evaluation. One sample is one iteration from iter(data). + If none, defaults to the number of samples in the dataset. + + device: str + Name of device on which inference should be run. + """ + + def _add_batch( + _: torch.Tensor, outputs: torch.Tensor, ground_truth: torch.Tensor + ): + self.add_batch(outputs, ground_truth) + + _for_each_batch(model, data, eval_iterations, device, True, _add_batch) + + +def _for_each_batch( + model: torch.nn.Module, + data: _DataLoader, + num_samples: int | None = None, + device: str = "cpu", + data_has_gt: bool = False, + callback: Callable | None = None, +) -> None: + """ + Run the model on each batch of data. + + Parameters: + model: torch.nn.Module + Model to use to compute model outputs. + + data: torch DataLoader | Collection + Data loader for the dataset. Iterator should return: + if data_has_gt: + tuple(inputs: Collection[torch.Tensor] | torch.Tensor, + ground_truth: Collection[torch.Tensor] | torch.Tensor) + else: + Collection[torch.Tensor] | torch.Tensor + + num_samples: int | None + Number of samples to use for evaluation. One sample is one iteration from iter(data). + If none, defaults to the number of samples in the dataset. + + device: str + Name of device on which inference should be run. + + data_has_gt: bool + If true, changes the type this function expects the dataloader to return. See `data` parameter. + + callback: Callable | None + The input, output, and (if provided) ground_truth will be passed to this function after each inference. + """ + torch_device = torch.device(device) + model.eval() + model.to(torch_device) + total_samples = 0 + num_samples = num_samples or len(data) + + if isinstance(data, DataLoader): + batch_size = data.batch_size or 1 + else: + batch_size = 1 + counting_obj = "batches" if batch_size != 1 else "samples" + + with tqdm( + total=batch_size * num_samples, + desc=f"Number of {counting_obj} completed", + ) as pbar: + for sample in data: + + if data_has_gt: + inputs, ground_truth, *_ = sample + else: + inputs, ground_truth = sample, None + + if len(inputs) > 0: + if isinstance(inputs, torch.Tensor): + inputs = inputs.to(torch_device) + outputs = model(inputs) + else: + inputs = [input.to(torch_device) for input in inputs] + outputs = model(*inputs) + + if data_has_gt: + if isinstance(ground_truth, torch.Tensor): + ground_truth = ground_truth.to("cpu") + else: + ground_truth = [gt.to("cpu") for gt in ground_truth] # type: ignore + + if callback: + if data_has_gt: + callback(inputs, outputs, ground_truth) + else: + callback(inputs, outputs) + + total_samples += 1 + pbar.update(batch_size) + if total_samples >= num_samples: + break diff --git a/qai_hub_models/evaluators/classification_evaluator.py b/qai_hub_models/evaluators/classification_evaluator.py new file mode 100644 index 00000000..a0286582 --- /dev/null +++ b/qai_hub_models/evaluators/classification_evaluator.py @@ -0,0 +1,35 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator + + +class ClassificationEvaluator(BaseEvaluator): + """Evaluator for tracking accuracy of a Classifier Model.""" + + def __init__(self, num_classes: int = 1000): + self.num_classes = num_classes + self.reset() + + def add_batch(self, output: torch.Tensor, gt: int | torch.Tensor): + # This evaluator supports only 1 output tensor at a time. + assert len(output.shape) == 2 and output.shape[-1] == self.num_classes + gt_tensor = torch.Tensor(gt) + assert len(gt_tensor.shape) == 1 and gt_tensor.shape[0] == output.shape[0] + batch_size = output.shape[0] + self.total_samples += batch_size + self.num_correct += sum(torch.argmax(output, dim=-1) == gt_tensor) + + def reset(self): + self.num_correct = 0 + self.total_samples = 0 + + def get_accuracy_score(self) -> float: + if self.total_samples == 0: + return 0 + return self.num_correct / self.total_samples diff --git a/qai_hub_models/evaluators/detection_evaluator.py b/qai_hub_models/evaluators/detection_evaluator.py new file mode 100644 index 00000000..aef6269d --- /dev/null +++ b/qai_hub_models/evaluators/detection_evaluator.py @@ -0,0 +1,111 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Collection + +import torch +from podm.metrics import ( # type: ignore + BoundingBox, + MetricPerClass, + get_pascal_voc_metrics, +) + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.utils.bounding_box_processing import batched_nms + + +class DetectionEvaluator(BaseEvaluator): + """Evaluator for comparing a batched image output.""" + + def __init__( + self, + image_height: int, + image_width: int, + nms_score_threshold: float = 0.45, + nms_iou_threshold: float = 0.7, + ): + self.reset() + self.nms_score_threshold = nms_score_threshold + self.nms_iou_threshold = nms_iou_threshold + self.scale_x = 1 / image_height + self.scale_y = 1 / image_width + + def add_batch(self, output: Collection[torch.Tensor], gt: Collection[torch.Tensor]): + # This evaluator supports 1 output tensor at a time. + image_id, _, _, bboxes, classes = gt + pred_boxes, pred_scores, pred_class_idx = output + + # Seeing memory issues, initentionally deleting these variables to free memory. + del gt + del output + + # Reuse NMS utility + ( + after_nms_pred_boxes, + after_nms_pred_scores, + after_nms_pred_class_idx, + ) = batched_nms( + self.nms_iou_threshold, + self.nms_score_threshold, + pred_boxes, + pred_scores, + pred_class_idx, + ) + + del pred_boxes + del pred_scores + del pred_class_idx + + # Collect GT and prediction boxes + gt_bb_entry = [ + BoundingBox.of_bbox(image_id, cat, *bbox, 1.0) + for cat, bbox in zip(classes.tolist(), bboxes.tolist()) + ] + del classes + del bboxes + + pd_bb_entry = [ + BoundingBox.of_bbox( + image_id, + pred_cat, + pred_bbox[0] * self.scale_x, + pred_bbox[1] * self.scale_y, + pred_bbox[2] * self.scale_x, + pred_bbox[3] * self.scale_y, + pred_score, + ) + for pred_cat, pred_score, pred_bbox in zip( + after_nms_pred_class_idx[0].tolist(), + after_nms_pred_scores[0].tolist(), + after_nms_pred_boxes[0].tolist(), + ) + ] + + del after_nms_pred_boxes + del after_nms_pred_scores + del after_nms_pred_class_idx + + # Compute mean average precision + self._update_mAP(gt_bb_entry, pd_bb_entry) + + def reset(self): + self.gt_bb = [] + self.pd_bb = [] + self.results = {} + + def _update_mAP(self, gt_bb_entry, pd_bb_entry): + self.gt_bb += gt_bb_entry + self.pd_bb += pd_bb_entry + + del gt_bb_entry + del pd_bb_entry + self.results = get_pascal_voc_metrics( + self.gt_bb, self.pd_bb, self.nms_iou_threshold + ) + self.mAP = MetricPerClass.mAP(self.results) + + def get_accuracy_score(self): + return self.mAP diff --git a/qai_hub_models/evaluators/image_evaluator.py b/qai_hub_models/evaluators/image_evaluator.py new file mode 100644 index 00000000..a5439a5d --- /dev/null +++ b/qai_hub_models/evaluators/image_evaluator.py @@ -0,0 +1,64 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator + + +class SegmentationOutputEvaluator(BaseEvaluator): + """Evaluator for comparing a batched image output.""" + + def __init__(self, num_classes): + self.num_classes = num_classes + self.reset() + + def add_batch(self, output: torch.Tensor, gt: torch.Tensor): + # This evaluator supports only 1 output tensor at a time. + assert gt.shape == output.shape + self.confusion_matrix += self._generate_matrix(gt, output) + + def reset(self): + self.confusion_matrix = torch.zeros((self.num_classes, self.num_classes)) + + def Pixel_Accuracy(self): + Acc = torch.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum() + return Acc + + def Pixel_Accuracy_Class(self): + Acc = torch.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1) + Acc = torch.nanmean(Acc) + return Acc + + def Intersection_over_Union(self): + return torch.diag(self.confusion_matrix) / ( + torch.sum(self.confusion_matrix, axis=1) + + torch.sum(self.confusion_matrix, axis=0) + - torch.diag(self.confusion_matrix) + ) + + def Mean_Intersection_over_Union(self): + return torch.nanmean(self.Intersection_over_Union()) + + def Frequency_Weighted_Intersection_over_Union(self): + freq = torch.sum(self.confusion_matrix, axis=1) / torch.sum( + self.confusion_matrix + ) + iu = torch.diag(self.confusion_matrix) / ( + torch.sum(self.confusion_matrix, axis=1) + + torch.sum(self.confusion_matrix, axis=0) + - torch.diag(self.confusion_matrix) + ) + + FWIoU = (freq[freq > 0] * iu[freq > 0]).sum() + return FWIoU + + def _generate_matrix(self, gt_image, pre_image): + mask = (gt_image >= 0) & (gt_image < self.num_classes) + label = self.num_classes * gt_image[mask].int() + pre_image[mask] + count = torch.bincount(label, minlength=self.num_classes**2) + confusion_matrix = count.reshape(self.num_classes, self.num_classes) + return confusion_matrix diff --git a/qai_hub_models/evaluators/superres_evaluator.py b/qai_hub_models/evaluators/superres_evaluator.py new file mode 100644 index 00000000..a8f3a03a --- /dev/null +++ b/qai_hub_models/evaluators/superres_evaluator.py @@ -0,0 +1,67 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import numpy as np +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator + + +class SuperResolutionOutputEvaluator(BaseEvaluator): + """Evaluator for comparing a batched image output.""" + + def __init__(self): + self.psnr_list = [] + self.reset() + + def _rgb_to_yuv(self, img): + # Convert to YUV as this is closer to human perception, + # so PSNR will be more meaningful + # Source: + # https://github.com/quic/aimet-model-zoo/blob/main/aimet_zoo_torch/common/super_resolution/psnr.py#L18 + rgb_weights = np.array([65.481, 128.553, 24.966]) + img = np.matmul(img, rgb_weights) + 16.0 + + return img + + def _compute_psnr(self, img, gt): + # Compute PSNR between two images + # Assumed that they are in YUV format + diff = (img - gt) ** 2 + error = np.mean(diff) + eps = 1e-8 # a tiny amount to ensure no division by 0 + data_range = 255.0 # 8-bit data range + + return 10 * np.log10((data_range**2) / (error + eps)) + + def add_batch(self, output: torch.Tensor, gt: torch.Tensor): + assert gt.shape == output.shape + + output = output.detach() + gt = gt.detach() + + batch_size = gt.shape[0] + + for i in range(batch_size): + # Convert each to HWC and YUV for PSNR + pred = output[i].permute((1, 2, 0)).numpy() + truth = gt[i].permute((1, 2, 0)).numpy() + + pred = self._rgb_to_yuv(pred) + truth = self._rgb_to_yuv(truth) + + psnr = self._compute_psnr(pred, truth) + self.psnr_list.append(psnr.item()) + + def reset(self): + self.psnr_list = [] + + def compute_average_psnr(self): + average_psnr = np.mean(np.array(self.psnr_list)) + return average_psnr + + def get_accuracy_score(self) -> float: + return self.compute_average_psnr() diff --git a/qai_hub_models/models/__init__.py b/qai_hub_models/models/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/__init__.py b/qai_hub_models/models/_shared/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/cityscapes_segmentation/__init__.py b/qai_hub_models/models/_shared/cityscapes_segmentation/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/cityscapes_segmentation/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/cityscapes_segmentation/app.py b/qai_hub_models/models/_shared/cityscapes_segmentation/app.py new file mode 100644 index 00000000..ff51e7f7 --- /dev/null +++ b/qai_hub_models/models/_shared/cityscapes_segmentation/app.py @@ -0,0 +1,130 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +from typing import Optional + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision.transforms as standard_transforms +from PIL import Image as ImageModule +from PIL.Image import Image + +from qai_hub_models.models._shared.cityscapes_segmentation.model import ( + CITYSCAPES_MEAN, + CITYSCAPES_STD, + FFNET_SOURCE_PATCHES, + FFNET_SOURCE_REPO_COMMIT, + FFNET_SOURCE_REPOSITORY, + FFNET_SOURCE_VERSION, + MODEL_ASSET_VERSION, + MODEL_ID, +) +from qai_hub_models.utils.asset_loaders import ASSET_CONFIG, SourceAsRoot + + +def _load_cityscapes_loader(cityscapes_path: Optional[str] = None) -> object: + if cityscapes_path is None: + # Allow a loader without data. There are useful auxiliary functions. + cityscapes_path = ASSET_CONFIG.get_local_store_model_path( + MODEL_ID, + MODEL_ASSET_VERSION, + "cityscapes_dummy", + ) + + os.makedirs( + os.path.join(cityscapes_path, "leftImg8bit", "train"), exist_ok=True + ) + os.makedirs(os.path.join(cityscapes_path, "leftImg8bit", "val"), exist_ok=True) + + # Resolve absolute path outside SourceAsRoot, since cwd changes + cityscapes_path = os.path.abspath(cityscapes_path) + + with SourceAsRoot( + FFNET_SOURCE_REPOSITORY, + FFNET_SOURCE_REPO_COMMIT, + MODEL_ID, + FFNET_SOURCE_VERSION, + source_repo_patches=FFNET_SOURCE_PATCHES, + ): + import config + + config.cityscapes_base_path = cityscapes_path + from ffnet_datasets.cityscapes.dataloader.get_dataloaders import ( + return_dataloader, + ) + + dataloader = return_dataloader(num_workers=1, batch_size=1) + return dataloader + + +def preprocess_cityscapes_image(image: Image) -> torch.Tensor: + transform = standard_transforms.Compose( + [ + standard_transforms.ToTensor(), + standard_transforms.Normalize(CITYSCAPES_MEAN, CITYSCAPES_STD), + ] + ) + out_tensor: torch.Tensor = transform(image) # type: ignore + return out_tensor.unsqueeze(0) + + +class CityscapesSegmentationApp: + """ + This class consists of light-weight "app code" that is required to perform + end to end inference for single-view (left) semantic segmentation of the + Cityscapes (https://cityscapes-dataset.com/) dataset. + + The app uses 1 model: + * Cityscapes segmentation model + + For a given image input, the app will: + * Pre-process the image + * Run model inference + * Resize predictions to map image size + * Visualize results by super-imposing on input image + """ + + def __init__( + self, + model: torch.nn.Module, + ): + self.model = model + self.color_mapping = _load_cityscapes_loader().dataset.color_mapping + + def predict(self, image: Image, raw_output: bool = False) -> Image | np.ndarray: + """ + From the provided image or tensor, predict semantic segmentation over + the Cityscapes classes. + + Parameters: + image: A PIL Image in RGB format. + + Returns: + If raw_output is False it will return an annotated image of the + same size as the input image. If True, it will return raw logit + probabilities as an numpy array of shape [1, CLASSES, HEIGHT, + WIDTH]. Note, that WIDTH and HEIGHT will be smaller than the input + image. + """ + + input_tensor = preprocess_cityscapes_image(image) + with torch.no_grad(): + small_res_output = self.model(input_tensor) + + output = F.interpolate( + small_res_output, (image.height, image.width), mode="bilinear" + ) + if raw_output: + return output.detach().numpy() + predictions = output[0].argmax(0).byte().cpu().numpy() + + color_mask = ImageModule.fromarray(predictions.astype(np.uint8)).convert("P") + color_mask.putpalette(self.color_mapping) + out = ImageModule.blend(image, color_mask.convert("RGB"), 0.5) + + return out diff --git a/qai_hub_models/models/_shared/cityscapes_segmentation/demo.py b/qai_hub_models/models/_shared/cityscapes_segmentation/demo.py new file mode 100644 index 00000000..25630f71 --- /dev/null +++ b/qai_hub_models/models/_shared/cityscapes_segmentation/demo.py @@ -0,0 +1,82 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os +from typing import Type + +from qai_hub_models.models._shared.cityscapes_segmentation.app import ( + CityscapesSegmentationApp, +) +from qai_hub_models.models._shared.cityscapes_segmentation.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + CityscapesSegmentor, +) +from qai_hub_models.utils.args import ( + TargetRuntime, + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.display import display_or_save_image +from qai_hub_models.utils.image_processing import pil_resize_pad, pil_undo_resize_pad + +# This image showcases the Cityscapes classes (but is not from the dataset) +TEST_CITYSCAPES_LIKE_IMAGE_NAME = "cityscapes_like_demo_2048x1024.jpg" +TEST_CITYSCAPES_LIKE_IMAGE_ASSET = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, TEST_CITYSCAPES_LIKE_IMAGE_NAME +) + + +# Run Imagenet Classifier end-to-end on a sample image. +# The demo will print the predicted class to terminal. +def cityscapes_segmentation_demo( + model_type: Type[CityscapesSegmentor], + model_id: str, + is_test: bool = False, +): + # Demo parameters + parser = get_model_cli_parser(model_type) + parser = get_on_device_demo_parser( + parser, available_target_runtimes=[TargetRuntime.TFLITE], add_output_dir=True + ) + parser.add_argument( + "--image", + type=str, + help="File path or URL to an input image to use for the demo.", + ) + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, model_type.get_model_id()) + + if args.image is None: + image = TEST_CITYSCAPES_LIKE_IMAGE_ASSET.fetch() + image_name = TEST_CITYSCAPES_LIKE_IMAGE_NAME + else: + image = args.image + image_name = os.path.basename(image) + + input_spec = model_type.get_input_spec() + + inference_model = demo_model_from_cli_args(model_type, args) + app = CityscapesSegmentationApp(inference_model) + + (_, _, height, width) = input_spec["image"][0] + orig_image = load_image(image) + image, _, padding = pil_resize_pad(orig_image, (height, width)) + + # Run app + image_annotated = app.predict(image) + + # Resize / unpad annotated image + image_annotated = pil_undo_resize_pad(image_annotated, orig_image.size, padding) + + if not is_test: + display_or_save_image( + image_annotated, + args.output_dir, + "annotated_" + image_name, + "predicted image", + ) diff --git a/qai_hub_models/models/_shared/cityscapes_segmentation/evaluator.py b/qai_hub_models/models/_shared/cityscapes_segmentation/evaluator.py new file mode 100644 index 00000000..d1eb4a7c --- /dev/null +++ b/qai_hub_models/models/_shared/cityscapes_segmentation/evaluator.py @@ -0,0 +1,22 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import torch.nn.functional as F +from torch import Tensor + +from qai_hub_models.evaluators.image_evaluator import SegmentationOutputEvaluator + + +class CityscapesSegmentationEvaluator(SegmentationOutputEvaluator): + """ + Evaluates the output of Cityscapes semantics segmentation. + """ + + def add_batch(self, output: Tensor, gt: Tensor): + output_match_size = F.interpolate(output, gt.shape[1:3], mode="bilinear") + output_class = output_match_size.argmax(1).cpu() + return super().add_batch(output_class, gt) + + def get_accuracy_score(self) -> float: + return super().Mean_Intersection_over_Union() diff --git a/qai_hub_models/models/_shared/cityscapes_segmentation/model.py b/qai_hub_models/models/_shared/cityscapes_segmentation/model.py new file mode 100644 index 00000000..a64d4cab --- /dev/null +++ b/qai_hub_models/models/_shared/cityscapes_segmentation/model.py @@ -0,0 +1,92 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os + +import torch +from torch import nn + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.models._shared.cityscapes_segmentation.evaluator import ( + CityscapesSegmentationEvaluator, +) +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +# The FFNet repo contains some utility functions for Cityscapes, so the +# repo source lives here +FFNET_SOURCE_REPOSITORY = "https://github.com/Qualcomm-AI-research/FFNet.git" +FFNET_SOURCE_REPO_COMMIT = "0887620d3d570b0848c40ce6db6f048a128ee58a" +FFNET_SOURCE_PATCHES = [ + os.path.abspath( + os.path.join(os.path.dirname(__file__), "patches", "move_datasets.diff") + ) +] +FFNET_SOURCE_VERSION = 2 # bump if repo/sha/patches are updated + +MODEL_ASSET_VERSION = 1 +MODEL_ID = __name__.split(".")[-2] +CITYSCAPES_NUM_CLASSES = 19 +CITYSCAPES_IGNORE_LABEL = 255 +# Cityscapes has 30 classes, but only 19 are in use +CITYSCAPES_LABELS = [ + "road", + "sidewalk", + "building", + "wall", + "fence", + "pole", + "traffic light", + "traffic sign", + "vegetation", + "terrain", + "sky", + "person", + "rider", + "car", + "truck", + "bus", + "train", + "motorcycle", + "bicycle", +] +CITYSCAPES_MEAN = [0.485, 0.456, 0.406] +CITYSCAPES_STD = [0.229, 0.224, 0.225] + + +class CityscapesSegmentor(BaseModel): + def __init__(self, model: nn.Module) -> None: + super().__init__() + self.model = model + + def get_evaluator(self) -> BaseEvaluator: + return CityscapesSegmentationEvaluator(CITYSCAPES_NUM_CLASSES) + + def forward(self, image: torch.Tensor): + """ + Predict semantic segmentation an input `image`. + + Parameters: + image: A [1, 3, height, width] image. + Assumes image has been resized and normalized using the + Cityscapes preprocesser (in cityscapes_segmentation/app.py). + + Returns: + A [1, 1000] where each value is the log-likelihood of + the image belonging to the corresponding Imagenet class. + """ + return self.model(image) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 1024, + width: int = 2048, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a compile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} diff --git a/qai_hub_models/models/_shared/cityscapes_segmentation/patches/move_datasets.diff b/qai_hub_models/models/_shared/cityscapes_segmentation/patches/move_datasets.diff new file mode 100644 index 00000000..869a86b8 --- /dev/null +++ b/qai_hub_models/models/_shared/cityscapes_segmentation/patches/move_datasets.diff @@ -0,0 +1,182 @@ +commit 893641b40d95d14bc1da70e404b43754b6784ab8 +Author: Gustav Larsson +Date: Wed Feb 14 09:26:52 2024 -0800 + + datasets -> ffnet_datasets + + This prevents collision with popular HF package. + +diff --git a/datasets/.DS_Store b/ffnet_datasets/.DS_Store +similarity index 100% +rename from datasets/.DS_Store +rename to ffnet_datasets/.DS_Store +diff --git a/datasets/cityscapes/.DS_Store b/ffnet_datasets/cityscapes/.DS_Store +similarity index 100% +rename from datasets/cityscapes/.DS_Store +rename to ffnet_datasets/cityscapes/.DS_Store +diff --git a/datasets/cityscapes/attribution.txt b/ffnet_datasets/cityscapes/attribution.txt +similarity index 100% +rename from datasets/cityscapes/attribution.txt +rename to ffnet_datasets/cityscapes/attribution.txt +diff --git a/datasets/cityscapes/cityscapes.py b/ffnet_datasets/cityscapes/cityscapes.py +similarity index 86% +rename from datasets/cityscapes/cityscapes.py +rename to ffnet_datasets/cityscapes/cityscapes.py +index a441c91..f43c98c 100644 +--- a/datasets/cityscapes/cityscapes.py ++++ b/ffnet_datasets/cityscapes/cityscapes.py +@@ -1,6 +1,6 @@ + import os + import os.path as path +-import datasets.cityscapes.cityscapes_labels as cityscapes_labels ++import ffnet_datasets.cityscapes.cityscapes_labels as cityscapes_labels + + + def find_directories(root): +diff --git a/datasets/cityscapes/cityscapes_labels.py b/ffnet_datasets/cityscapes/cityscapes_labels.py +similarity index 100% +rename from datasets/cityscapes/cityscapes_labels.py +rename to ffnet_datasets/cityscapes/cityscapes_labels.py +diff --git a/datasets/cityscapes/dataloader/__init__.py b/ffnet_datasets/cityscapes/dataloader/__init__.py +similarity index 100% +rename from datasets/cityscapes/dataloader/__init__.py +rename to ffnet_datasets/cityscapes/dataloader/__init__.py +diff --git a/datasets/cityscapes/dataloader/base_loader.py b/ffnet_datasets/cityscapes/dataloader/base_loader.py +similarity index 98% +rename from datasets/cityscapes/dataloader/base_loader.py +rename to ffnet_datasets/cityscapes/dataloader/base_loader.py +index b503b8a..f1a1b37 100644 +--- a/datasets/cityscapes/dataloader/base_loader.py ++++ b/ffnet_datasets/cityscapes/dataloader/base_loader.py +@@ -32,9 +32,9 @@ import torch + from PIL import Image + from torch.utils import data + from config import CITYSCAPES_IGNORE_LABEL, CITYSCAPES_NUM_CLASSES, cityscapes_base_path +-from datasets.cityscapes.utils.misc import tensor_to_pil +-from datasets.cityscapes.cityscapes import find_directories +-import datasets.cityscapes.cityscapes_labels as cityscapes_labels ++from ffnet_datasets.cityscapes.utils.misc import tensor_to_pil ++from ffnet_datasets.cityscapes.cityscapes import find_directories ++import ffnet_datasets.cityscapes.cityscapes_labels as cityscapes_labels + from scipy.ndimage.morphology import distance_transform_edt + + +diff --git a/datasets/cityscapes/dataloader/get_dataloaders.py b/ffnet_datasets/cityscapes/dataloader/get_dataloaders.py +similarity index 84% +rename from datasets/cityscapes/dataloader/get_dataloaders.py +rename to ffnet_datasets/cityscapes/dataloader/get_dataloaders.py +index 347f7db..5596f5a 100644 +--- a/datasets/cityscapes/dataloader/get_dataloaders.py ++++ b/ffnet_datasets/cityscapes/dataloader/get_dataloaders.py +@@ -1,11 +1,11 @@ +-# import datasets.cityscapes.dataloader.joint_transforms as joint_transforms +-import datasets.cityscapes.dataloader.transforms as extended_transforms ++# import ffnet_datasets.cityscapes.dataloader.joint_transforms as joint_transforms ++import ffnet_datasets.cityscapes.dataloader.transforms as extended_transforms + from torch.utils.data import DataLoader + + import importlib + import torchvision.transforms as standard_transforms + from config import CITYSCAPES_MEAN, CITYSCAPES_STD +-from datasets.cityscapes.dataloader.base_loader import Cityscapes ++from ffnet_datasets.cityscapes.dataloader.base_loader import Cityscapes + + + def return_dataloader(num_workers, batch_size): +diff --git a/datasets/cityscapes/dataloader/sampler.py b/ffnet_datasets/cityscapes/dataloader/sampler.py +similarity index 100% +rename from datasets/cityscapes/dataloader/sampler.py +rename to ffnet_datasets/cityscapes/dataloader/sampler.py +diff --git a/datasets/cityscapes/dataloader/transforms.py b/ffnet_datasets/cityscapes/dataloader/transforms.py +similarity index 100% +rename from datasets/cityscapes/dataloader/transforms.py +rename to ffnet_datasets/cityscapes/dataloader/transforms.py +diff --git a/datasets/cityscapes/utils/__init__.py b/ffnet_datasets/cityscapes/utils/__init__.py +similarity index 100% +rename from datasets/cityscapes/utils/__init__.py +rename to ffnet_datasets/cityscapes/utils/__init__.py +diff --git a/datasets/cityscapes/utils/attr_dict.py b/ffnet_datasets/cityscapes/utils/attr_dict.py +similarity index 100% +rename from datasets/cityscapes/utils/attr_dict.py +rename to ffnet_datasets/cityscapes/utils/attr_dict.py +diff --git a/datasets/cityscapes/utils/misc.py b/ffnet_datasets/cityscapes/utils/misc.py +similarity index 99% +rename from datasets/cityscapes/utils/misc.py +rename to ffnet_datasets/cityscapes/utils/misc.py +index 26a4f59..df84db8 100644 +--- a/datasets/cityscapes/utils/misc.py ++++ b/ffnet_datasets/cityscapes/utils/misc.py +@@ -9,7 +9,7 @@ import numpy as np + + import torchvision.transforms as standard_transforms + import torchvision.utils as vutils +-from datasets.cityscapes import cityscapes_labels ++from ffnet_datasets.cityscapes import cityscapes_labels + + # from tabulate import tabulate + from PIL import Image +diff --git a/datasets/cityscapes/utils/my_data_parallel.py b/ffnet_datasets/cityscapes/utils/my_data_parallel.py +similarity index 100% +rename from datasets/cityscapes/utils/my_data_parallel.py +rename to ffnet_datasets/cityscapes/utils/my_data_parallel.py +diff --git a/datasets/cityscapes/utils/progress_bar.py b/ffnet_datasets/cityscapes/utils/progress_bar.py +similarity index 100% +rename from datasets/cityscapes/utils/progress_bar.py +rename to ffnet_datasets/cityscapes/utils/progress_bar.py +diff --git a/datasets/cityscapes/utils/trnval_utils.py b/ffnet_datasets/cityscapes/utils/trnval_utils.py +similarity index 96% +rename from datasets/cityscapes/utils/trnval_utils.py +rename to ffnet_datasets/cityscapes/utils/trnval_utils.py +index 7bff368..5da25e3 100644 +--- a/datasets/cityscapes/utils/trnval_utils.py ++++ b/ffnet_datasets/cityscapes/utils/trnval_utils.py +@@ -31,10 +31,10 @@ import os + import torch + + from config import CITYSCAPES_IGNORE_LABEL, CITYSCAPES_NUM_CLASSES +-from datasets.cityscapes.utils.misc import fast_hist, fmt_scale ++from ffnet_datasets.cityscapes.utils.misc import fast_hist, fmt_scale + +-# from datasets.cityscapes.utils.misc import AverageMeter, eval_metrics +-# from datasets.cityscapes.utils.misc import metrics_per_image ++# from ffnet_datasets.cityscapes.utils.misc import AverageMeter, eval_metrics ++# from ffnet_datasets.cityscapes.utils.misc import metrics_per_image + import numpy as np + + +diff --git a/datasets/imagenet/imagenet_data_loader.py b/ffnet_datasets/imagenet/imagenet_data_loader.py +similarity index 100% +rename from datasets/imagenet/imagenet_data_loader.py +rename to ffnet_datasets/imagenet/imagenet_data_loader.py +diff --git a/scripts/evaluate_cityscapes.py b/scripts/evaluate_cityscapes.py +index 158daa6..afcfd11 100644 +--- a/scripts/evaluate_cityscapes.py ++++ b/scripts/evaluate_cityscapes.py +@@ -11,10 +11,10 @@ import numpy as np + import torch + import os + import sys +-from datasets.cityscapes.utils.misc import AverageMeter, eval_metrics +-from datasets.cityscapes.utils.trnval_utils import eval_minibatch +-from datasets.cityscapes.utils.progress_bar import printProgressBar +-from datasets.cityscapes.dataloader.get_dataloaders import return_dataloader ++from ffnet_datasets.cityscapes.utils.misc import AverageMeter, eval_metrics ++from ffnet_datasets.cityscapes.utils.trnval_utils import eval_minibatch ++from ffnet_datasets.cityscapes.utils.progress_bar import printProgressBar ++from ffnet_datasets.cityscapes.dataloader.get_dataloaders import return_dataloader + import warnings + + # from config import cityscapes_base_path +diff --git a/scripts/evaluate_imagenet.py b/scripts/evaluate_imagenet.py +index 4de201f..a7fae7f 100644 +--- a/scripts/evaluate_imagenet.py ++++ b/scripts/evaluate_imagenet.py +@@ -27,7 +27,7 @@ import torch.nn.functional as F + + torch.backends.cudnn.benchmark = True + from config import imagenet_base_path +-from datasets.imagenet.imagenet_data_loader import get_data_loader ++from ffnet_datasets.imagenet.imagenet_data_loader import get_data_loader + from models.model_registry import model_entrypoint + + diff --git a/qai_hub_models/models/_shared/common.py b/qai_hub_models/models/_shared/common.py new file mode 100644 index 00000000..5e2038d4 --- /dev/null +++ b/qai_hub_models/models/_shared/common.py @@ -0,0 +1,26 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from typing import Type + +import torch + + +def replace_module_recursively( + module: torch.nn.Module, + tgt_cls: Type[torch.nn.Module], + new_cls: Type[torch.nn.Module], + parent_module: Type[torch.nn.Module] = None, +): + """ + Replace all instances of `tgt_cls` with `new_cls`. If `parent_module` is + specified, `tgt_cls` instance must be an immediate member of + `parent_module` (useful for limiting replacement scope) + """ + for name, child in module.named_children(): + if isinstance(child, tgt_cls): + if parent_module is None or isinstance(module, parent_module): + setattr(module, name, new_cls(child)) + else: + replace_module_recursively(child, tgt_cls, new_cls) diff --git a/qai_hub_models/models/_shared/deeplab/__init__.py b/qai_hub_models/models/_shared/deeplab/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/deeplab/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/deeplab/app.py b/qai_hub_models/models/_shared/deeplab/app.py new file mode 100644 index 00000000..5fed7629 --- /dev/null +++ b/qai_hub_models/models/_shared/deeplab/app.py @@ -0,0 +1,85 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable + +import numpy as np +import PIL.Image +import torch +from PIL.Image import Image +from torchvision import transforms + +from qai_hub_models.utils.draw import create_color_map +from qai_hub_models.utils.image_processing import normalize_image_transform + + +def preprocess_image(image: Image) -> torch.Tensor: + """ + Preprocesses images to be run through torch DeepLabV3 segmenter + as prescribed here: + https://pytorch.org/hub/pytorch_vision_resnet/ + + Parameters: + image: Input image to be run through the classifier model. + + Returns: + torch tensor to be directly passed to the model. + """ + transform = transforms.Compose( + [ + transforms.ToTensor(), + normalize_image_transform(), + ] + ) + out_tensor: torch.Tensor = transform(image) # type: ignore + return out_tensor.unsqueeze(0) + + +class DeepLabV3App: + """ + This class consists of light-weight "app code" that is required to + perform end to end inference with DeepLabV3. + + For a given image input, the app will: + * Pre-process the image (normalize) + * Run image segmentation + * Convert the raw output into probabilities using softmax + """ + + def __init__(self, model: Callable[[torch.Tensor], torch.Tensor], num_classes: int): + self.model = model + self.num_classes = num_classes + + def predict(self, image: Image, raw_output: bool = False) -> Image | np.ndarray: + """ + From the provided image or tensor, segment the image + + Parameters: + image: A PIL Image in RGB format. + + Returns: + If raw_output is true, returns: + masks: np.ndarray + A list of predicted masks. + + Otherwise, returns: + segmented_images: List[PIL.Image] + Images with segmentation map overlaid with an alpha of 0.5. + """ + + input_tensor = preprocess_image(image) + with torch.no_grad(): + output = self.model(input_tensor) + output = output[0] + predictions = output.argmax(0).byte().cpu().numpy() + + if raw_output: + return predictions + + color_map = create_color_map(self.num_classes) + out = PIL.Image.blend(image, PIL.Image.fromarray(color_map[predictions]), 0.5) + + return out diff --git a/qai_hub_models/models/_shared/deeplab/demo.py b/qai_hub_models/models/_shared/deeplab/demo.py new file mode 100644 index 00000000..ccdc20c3 --- /dev/null +++ b/qai_hub_models/models/_shared/deeplab/demo.py @@ -0,0 +1,45 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Type + +from qai_hub_models.models._shared.deeplab.app import DeepLabV3App +from qai_hub_models.utils.args import ( + add_output_dir_arg, + get_model_cli_parser, + model_from_cli_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebAsset, load_image +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.display import display_or_save_image + + +def deeplabv3_demo( + model_type: Type[BaseModel], + default_image: str | CachedWebAsset, + num_classes: int, + is_test: bool, +): + # Demo parameters + parser = get_model_cli_parser(model_type) + parser.add_argument( + "--image", + type=str, + default=default_image, + help="image file path or URL.", + ) + add_output_dir_arg(parser) + args = parser.parse_args([] if is_test else None) + + # This DeepLabV3 ResNet 50 demo comes from + # https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101/ + # load image and model + image = load_image(args.image) + input_image = image.convert("RGB") + app = DeepLabV3App(model_from_cli_args(model_type, args), num_classes=num_classes) + output = app.predict(input_image, False) + if not is_test: + display_or_save_image(output, args.output_dir) diff --git a/qai_hub_models/models/_shared/deeplab/evaluator.py b/qai_hub_models/models/_shared/deeplab/evaluator.py new file mode 100644 index 00000000..32a836c1 --- /dev/null +++ b/qai_hub_models/models/_shared/deeplab/evaluator.py @@ -0,0 +1,24 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from torch import Tensor + +from qai_hub_models.evaluators.image_evaluator import SegmentationOutputEvaluator + + +class DeepLabV3Evaluator(SegmentationOutputEvaluator): + """ + Evaluates the output of DeepLabV3Plus + + Expected data format for this evaluator: + * output has the same shape & meaning as output of any deeplabV3 forward() function. + * gt is argmax'd on the first dimension (see add_batch). + """ + + def add_batch(self, output: Tensor, gt: Tensor): + output = output.argmax(1).cpu() + return super().add_batch(output, gt) + + def get_accuracy_score(self) -> float: + return super().Mean_Intersection_over_Union() diff --git a/qai_hub_models/models/_shared/detr/__init__.py b/qai_hub_models/models/_shared/detr/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/detr/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/detr/app.py b/qai_hub_models/models/_shared/detr/app.py new file mode 100644 index 00000000..64b03641 --- /dev/null +++ b/qai_hub_models/models/_shared/detr/app.py @@ -0,0 +1,111 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, Tuple + +import numpy as np +import torch +from PIL import Image +from transformers import DetrImageProcessor + +from qai_hub_models.models._shared.detr.coco_label_map import LABEL_MAP +from qai_hub_models.utils.bounding_box_processing import box_xywh_to_xyxy +from qai_hub_models.utils.draw import draw_box_from_xyxy +from qai_hub_models.utils.image_processing import app_to_net_image_inputs + + +class DETRApp: + """ + This class consists of light-weight "app code" that is required to + perform end to end inference with DETR. + + For a given image input, the app will: + * Preprocess the image (normalize, resize, etc) and get encoding to pass to the model. + * Run DETR Inference + * Convert the raw output into box coordinates and corresponding label and confidence. + """ + + def __init__( + self, + model: Callable[[torch.Tensor], torch.Tensor], + model_image_input_size: Tuple[int, int] | None = None, + ): + self.model = model + self.model_image_input_size = model_image_input_size + + def predict( + self, + image: Image.Image, + default_weights: str, + threshold: float = 0.9, + ) -> np.ndarray: + """ + From the provided image or tensor, generate the segmented mask. + + Parameters: + image: Tensor[B, 3, H, W] + A PIL Image in NCHW, RGB format. + default_weights: str + Default weights name for the model. + threshold: float + Prediction score threshold. + + + Returns: + numpy_array: Original image numpy array with the corresponding predictions. + score: Scores for every class per prediction where atleast + one prediction was above the threshold. + Shape is [Number of predictions above threshold] + label: Labels (class number) for the predicted class. + Shape is [Number of predictions above threshold] + box: Box coordinates (top left and bottom right) + Shape is [Number of predictions above threshold x top_left_x, top_left_y, bottom_right_x, bottom_right_y] + + """ + size = ( + { + "width": self.model_image_input_size[1], + "height": self.model_image_input_size[0], + } + if self.model_image_input_size + else None + ) + + image_processor = DetrImageProcessor.from_pretrained(default_weights, size=size) + encoding = image_processor(image, return_tensors="pt") + outputs = self.model(encoding["pixel_values"], encoding["pixel_mask"].float()) + target_sizes = torch.tensor(image.size[::-1]).unsqueeze(0) + + out_logits, out_bbox = outputs[0], outputs[1] + prob = torch.nn.functional.softmax(out_logits, -1) + scores, labels = prob[..., :-1].max(-1) + + # Convert to [x0, y0, x1, y1] format + boxes = box_xywh_to_xyxy(out_bbox.view(-1, 2, 2)).view(-1, 4) + + # Convert from relative [0, 1] to absolute [0, height] coordinates + img_h, img_w = target_sizes.unbind(1) + + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) + boxes = boxes * scale_fct[:, None, :] + + for s, l, b in zip(scores, labels, boxes): + score = s[s > threshold] + label = l[s > threshold] + box = b[s > threshold] + + NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(image) + for p, (xmin, ymin, xmax, ymax), l in zip(score, box.tolist(), label): + draw_box_from_xyxy( + NHWC_int_numpy_frames[0], + (int(xmin), int(ymin)), + (int(xmax), int(ymax)), + color=(0, 255, 0), + size=2, + text=f"{LABEL_MAP[l.item()]}: {p.item():0.2f}", + ) + + return NHWC_int_numpy_frames, score, label, box diff --git a/qai_hub_models/models/_shared/detr/coco_label_map.py b/qai_hub_models/models/_shared/detr/coco_label_map.py new file mode 100644 index 00000000..8ae9bd42 --- /dev/null +++ b/qai_hub_models/models/_shared/detr/coco_label_map.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +LABEL_MAP = { + 0: "unlabeled", + 1: "person", + 2: "bicycle", + 3: "car", + 4: "motorcycle", + 5: "airplane", + 6: "bus", + 7: "train", + 8: "truck", + 9: "boat", + 10: "traffic", + 11: "fire", + 12: "street", + 13: "stop", + 14: "parking", + 15: "bench", + 16: "bird", + 17: "cat", + 18: "dog", + 19: "horse", + 20: "sheep", + 21: "cow", + 22: "elephant", + 23: "bear", + 24: "zebra", + 25: "giraffe", + 26: "hat", + 27: "backpack", + 28: "umbrella", + 29: "shoe", + 30: "eye", + 31: "handbag", + 32: "tie", + 33: "suitcase", + 34: "frisbee", + 35: "skis", + 36: "snowboard", + 37: "sports", + 38: "kite", + 39: "baseball", + 40: "baseball", + 41: "skateboard", + 42: "surfboard", + 43: "tennis", + 44: "bottle", + 45: "plate", + 46: "wine", + 47: "cup", + 48: "fork", + 49: "knife", + 50: "spoon", + 51: "bowl", + 52: "banana", + 53: "apple", + 54: "sandwich", + 55: "orange", + 56: "broccoli", + 57: "carrot", + 58: "hot", + 59: "pizza", + 60: "donut", + 61: "cake", + 62: "chair", + 63: "couch", + 64: "potted", + 65: "bed", + 66: "mirror", + 67: "dining", + 68: "window", + 69: "desk", + 70: "toilet", + 71: "door", + 72: "tv", + 73: "laptop", + 74: "mouse", + 75: "remote", + 76: "keyboard", + 77: "cell", + 78: "microwave", + 79: "oven", + 80: "toaster", + 81: "sink", + 82: "refrigerator", + 83: "blender", + 84: "book", + 85: "clock", + 86: "vase", + 87: "scissors", + 88: "teddy", + 89: "hair", + 90: "toothbrush", + 91: "hair", + 92: "banner", + 93: "blanket", + 94: "branch", + 95: "bridge", + 96: "building", + 97: "bush", + 98: "cabinet", + 99: "cage", + 100: "cardboard", + 101: "carpet", + 102: "ceiling", + 103: "ceiling", + 104: "cloth", + 105: "clothes", + 106: "clouds", + 107: "counter", + 108: "cupboard", + 109: "curtain", + 110: "desk", + 111: "dirt", + 112: "door", + 113: "fence", + 114: "floor", + 115: "floor", + 116: "floor", + 117: "floor", + 118: "floor", + 119: "flower", + 120: "fog", + 121: "food", + 122: "fruit", + 123: "furniture", + 124: "grass", + 125: "gravel", + 126: "ground", + 127: "hill", + 128: "house", + 129: "leaves", + 130: "light", + 131: "mat", + 132: "metal", + 133: "mirror", + 134: "moss", + 135: "mountain", + 136: "mud", + 137: "napkin", + 138: "net", + 139: "paper", + 140: "pavement", + 141: "pillow", + 142: "plant", + 143: "plastic", + 144: "platform", + 145: "playingfield", + 146: "railing", + 147: "railroad", + 148: "river", + 149: "road", + 150: "rock", + 151: "roof", + 152: "rug", + 153: "salad", + 154: "sand", + 155: "sea", + 156: "shelf", + 157: "sky", + 158: "skyscraper", + 159: "snow", + 160: "solid", + 161: "stairs", + 162: "stone", + 163: "straw", + 164: "structural", + 165: "table", + 166: "tent", + 167: "textile", + 168: "towel", + 169: "tree", + 170: "vegetable", + 171: "wall", + 172: "wall", + 173: "wall", + 174: "wall", + 175: "wall", + 176: "wall", + 177: "wall", + 178: "water", + 179: "waterdrops", + 180: "window", + 181: "window", + 182: "wood", +} diff --git a/qai_hub_models/models/_shared/detr/demo.py b/qai_hub_models/models/_shared/detr/demo.py new file mode 100644 index 00000000..0a513b0a --- /dev/null +++ b/qai_hub_models/models/_shared/detr/demo.py @@ -0,0 +1,56 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Type + +from PIL import Image + +from qai_hub_models.models._shared.detr.app import DETRApp +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebAsset, load_image +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.display import display_or_save_image + + +# Run DETR app end-to-end on a sample image. +# The demo will display the predicted mask in a window. +def detr_demo( + model: Type[BaseModel], + default_weights: str, + default_image: str | CachedWebAsset, + is_test: bool = False, +): + # Demo parameters + parser = get_model_cli_parser(model) + parser = get_on_device_demo_parser(parser, add_output_dir=True) + parser.add_argument( + "--image", + type=str, + default=default_image, + help="test image file path or URL", + ) + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, model.get_model_id()) + + # Load image & model + detr = demo_model_from_cli_args(model, args) + + # Run app to scores, labels and boxes + img = load_image(args.image) + app = DETRApp(detr, model_image_input_size=[img.height, img.width]) + pred_images, _, _, _ = app.predict(img, default_weights) + pred_image = Image.fromarray(pred_images[0]) + + # Show the predicted boxes, scores and class names on the image. + if is_test: + assert isinstance(pred_image, Image.Image) + else: + display_or_save_image(pred_image, args.output_dir) diff --git a/qai_hub_models/models/_shared/detr/model.py b/qai_hub_models/models/_shared/detr/model.py new file mode 100644 index 00000000..426186a1 --- /dev/null +++ b/qai_hub_models/models/_shared/detr/model.py @@ -0,0 +1,66 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Tuple + +import torch +import torch.nn as nn +from transformers import DetrForObjectDetection + +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 + + +class DETR(BaseModel): + """Exportable DETR model, end-to-end.""" + + def __init__(self, model: nn.Module) -> None: + super().__init__() + self.model = model + + @classmethod + def from_pretrained(cls, ckpt_name: str): + model = DetrForObjectDetection.from_pretrained(ckpt_name) + model.eval() + return cls(model) + + def forward( + self, image: torch.Tensor, mask: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Run DETR on `image` and `mask`, and produce high quality detection results. + + Parameters: + image: Image tensor to run detection on. + mask: This represents the padding mask. True if padding was applied on that pixel else False. + + Returns: + predictions: Tuple of tensors (logits and coordinates) + Shape of logit tensor: [1, 100 (number of predictions), 92 (number of classes)] + Shape of coordinates: [1, 100, 4] + + """ + predictions = self.model(image, mask, return_dict=False) + return predictions + + def get_input_spec( + self, + batch_size: int = 1, + num_channels: int = 3, + height: int = 480, + width: int = 480, + ) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm® AI Hub. + """ + return { + "image": ((batch_size, num_channels, height, width), "float32"), + "mask": ((batch_size, height, width), "float32"), + } diff --git a/qai_hub_models/models/_shared/fastsam/__init__.py b/qai_hub_models/models/_shared/fastsam/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/fastsam/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/fastsam/app.py b/qai_hub_models/models/_shared/fastsam/app.py new file mode 100644 index 00000000..15daef1b --- /dev/null +++ b/qai_hub_models/models/_shared/fastsam/app.py @@ -0,0 +1,137 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, Tuple + +import numpy as np +import torch +from PIL import Image +from ultralytics.engine.results import Results +from ultralytics.models.fastsam import FastSAMPrompt +from ultralytics.models.fastsam.utils import bbox_iou +from ultralytics.utils import ops + +from qai_hub_models.utils.image_processing import preprocess_PIL_image + + +class FastSAMApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with FastSAM. + + The app uses 1 model: + * FastSAM + + For a given image input, the app will: + * pre-process the image (convert to range[0, 1]) + * Run FastSAM inference + * post-process the image + * display the input and output side-by-side + """ + + def __init__( + self, + fastsam_model: Callable[[torch.Tensor], torch.Tensor], + confidence: float = 0.4, + iou_threshold: float = 0.9, + retina_masks: bool = True, + model_image_input_shape: Tuple[int, int] = (640, 640), + ): + self.model = fastsam_model + self.confidence = confidence + self.iou_threshold = iou_threshold + self.retina_masks = retina_masks + self.model_image_input_shape = model_image_input_shape + + def predict(self, *args, **kwargs): + # See upscale_image. + return self.segment_image(*args, **kwargs) + + def segment_image(self, image_path: str) -> Results: + """ + Upscale provided images + + Parameters: + pixel_values_or_image: torch.Tensor + Input PIL image (before pre-processing) or pyTorch tensor (after image pre-processing). + + Returns: + images: List[PIL.Image.Image] + A list of upscaled images (one for each input image). + """ + original_image = Image.open(image_path) + resized_image = original_image.resize( + (self.model_image_input_shape[0], self.model_image_input_shape[1]) + ) + img = preprocess_PIL_image(resized_image) + original_image = np.array(original_image) + image_path = [image_path] + preds = self.model(img) + preds = tuple( + (preds[0], tuple(([preds[1], preds[2], preds[3]], preds[4], preds[5]))) + ) + p = ops.non_max_suppression( + preds[0], + self.confidence, + self.iou_threshold, + agnostic=False, + max_det=100, + nc=1, # set to 1 class since SAM has no class predictions + classes=None, + ) + + full_box = torch.zeros(p[0].shape[1], device=p[0].device) + full_box[2], full_box[3], full_box[4], full_box[6:] = ( + img.shape[3], + img.shape[2], + 1.0, + 1.0, + ) + full_box = full_box.view(1, -1) + critical_iou_index = bbox_iou( + full_box[0][:4], p[0][:, :4], iou_thres=0.9, image_shape=img.shape[2:] + ) + if critical_iou_index.numel() != 0: + full_box[0][4] = p[0][critical_iou_index][:, 4] + full_box[0][6:] = p[0][critical_iou_index][:, 6:] + p[0][critical_iou_index] = full_box + + results = [] + proto = ( + preds[1][-1] if len(preds[1]) == 3 else preds[1] + ) # second output is len 3 if pt, but only 1 if exported + for i, pred in enumerate(p): + orig_img = original_image + img_path = image_path[0][i] + # No predictions, no masks + if not len(pred): + masks = None + elif self.retina_masks: + pred[:, :4] = ops.scale_boxes( + img.shape[2:], pred[:, :4], orig_img.shape + ) + + masks = ops.process_mask_native( + proto[i], pred[:, 6:], pred[:, :4], orig_img.shape[:2] + ) # HWC + else: + masks = ops.process_mask( + proto[i], pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True + ) # HWC + pred[:, :4] = ops.scale_boxes( + img.shape[2:], pred[:, :4], orig_img.shape + ) + results.append( + Results( + orig_img, + path=img_path, + names="fastsam", + boxes=pred[:, :6], + masks=masks, + ) + ) + prompt_process = FastSAMPrompt(image_path[0], results, device="cpu") + segmented_result = prompt_process.everything_prompt() + return segmented_result, prompt_process diff --git a/qai_hub_models/models/_shared/fastsam/demo.py b/qai_hub_models/models/_shared/fastsam/demo.py new file mode 100644 index 00000000..cc1241ff --- /dev/null +++ b/qai_hub_models/models/_shared/fastsam/demo.py @@ -0,0 +1,56 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +import tempfile +from typing import Type + +from PIL import Image + +from qai_hub_models.models._shared.fastsam.app import FastSAMApp +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebAsset, load_path +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.display import display_or_save_image + + +def fastsam_demo( + model_type: Type[BaseModel], image_path: str | CachedWebAsset, is_test: bool +): + # Demo parameters + parser = get_model_cli_parser(model_type) + parser = get_on_device_demo_parser(parser, add_output_dir=True) + parser.add_argument( + "--image", + type=str, + default=image_path, + help="image file path or URL.", + ) + + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, model_type.get_model_id()) + + model = demo_model_from_cli_args(model_type, args) + app = FastSAMApp(model) + + with tempfile.TemporaryDirectory() as tmpdir: + image_path = load_path(args.image, tmpdir) + pred, prompt_process = app.segment_image(image_path) + + # Store the output image + output_dirname, _ = os.path.split(image_path) + output_path = os.path.join(output_dirname, "output.jpg") + prompt_process.plot(annotations=pred, output=output_path) + + # Display the output + output_image = Image.open(output_path) + if not is_test: + display_or_save_image(output_image, args.output_dir) diff --git a/qai_hub_models/models/_shared/fastsam/model.py b/qai_hub_models/models/_shared/fastsam/model.py new file mode 100644 index 00000000..092d44e6 --- /dev/null +++ b/qai_hub_models/models/_shared/fastsam/model.py @@ -0,0 +1,62 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +import torch.nn as nn +from ultralytics import FastSAM + +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + + +class Fast_SAM(BaseModel): + """Exportable FastSAM model, end-to-end.""" + + def __init__(self, model: nn.Module) -> None: + super().__init__() + self.model = model + + @classmethod + def from_pretrained(cls, ckpt_name: str): + model = FastSAM(ckpt_name).model + model.eval() + return cls(model) + + def forward(self, image: torch.Tensor): + """ + Run FastSAM on `image`, and produce high quality segmentation masks. + Faster than SAM as it is based on YOLOv8. + + Parameters: + image: Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: BGR + Returns: + + """ + predictions = self.model(image) + # Return predictions as a tuple instead of nested tuple. + return ( + predictions[0], + predictions[1][0][0], + predictions[1][0][1], + predictions[1][0][2], + predictions[1][1], + predictions[1][2], + ) + + def get_input_spec( + self, + batch_size: int = 1, + num_channels: int = 3, + height: int = 640, + width: int = 640, + ) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm® AI Hub. + """ + return {"image": ((batch_size, num_channels, height, width), "float32")} diff --git a/qai_hub_models/models/_shared/ffnet/__init__.py b/qai_hub_models/models/_shared/ffnet/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/ffnet/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/ffnet/model.py b/qai_hub_models/models/_shared/ffnet/model.py new file mode 100644 index 00000000..16834289 --- /dev/null +++ b/qai_hub_models/models/_shared/ffnet/model.py @@ -0,0 +1,122 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os + +import torch + +from qai_hub_models.models._shared.cityscapes_segmentation.model import ( + FFNET_SOURCE_PATCHES, + FFNET_SOURCE_REPO_COMMIT, + FFNET_SOURCE_REPOSITORY, + FFNET_SOURCE_VERSION, +) +from qai_hub_models.models._shared.cityscapes_segmentation.model import ( + MODEL_ID as CS_MODEL_ID, +) +from qai_hub_models.models._shared.cityscapes_segmentation.model import ( + CityscapesSegmentor, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, SourceAsRoot +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +FFNET_WEIGHTS_URL_ROOT = ( + "https://github.com/quic/aimet-model-zoo/releases/download/torch_segmentation_ffnet" +) +FFNET_SUBPATH_NAME_LOOKUP = { + # Variant name (in FFNet repo) to (subpath, src_name, dst_name) + "segmentation_ffnet40S_dBBB_mobile": ( + "ffnet40S", + "ffnet40S_dBBB_cityscapes_state_dict_quarts.pth", + "ffnet40S_dBBB_cityscapes_state_dict_quarts.pth", + ), + "segmentation_ffnet54S_dBBB_mobile": ( + "ffnet54S", + "ffnet54S_dBBB_cityscapes_state_dict_quarts.pth", + "ffnet54S_dBBB_cityscapes_state_dict_quarts.pth", + ), + "segmentation_ffnet78S_dBBB_mobile": ( + "ffnet78S", + "ffnet78S_dBBB_cityscapes_state_dict_quarts.pth", + "ffnet78S_dBBB_cityscapes_state_dict_quarts.pth", + ), + "segmentation_ffnet78S_BCC_mobile_pre_down": ( + "ffnet78S", + "ffnet78S_BCC_cityscapes_state_dict_quarts_pre_down.pth", + "ffnet78S_BCC_cityscapes_state_dict_quarts.pth", + ), + "segmentation_ffnet122NS_CCC_mobile_pre_down": ( + "ffnet122NS", + "ffnet122NS_CCC_cityscapes_state_dict_quarts_pre_down.pth", + "ffnet122NS_CCC_cityscapes_state_dict_quarts.pth", + ), +} + + +class FFNet(CityscapesSegmentor): + """Exportable FFNet fuss-free Cityscapes segmentation model.""" + + @classmethod + def from_pretrained(cls, variant_name: str) -> FFNet: + model = _load_ffnet_source_model(variant_name) + model.eval() + + return cls(model) + + +def _load_ffnet_source_model(variant_name) -> torch.nn.Module: + subpath, src_name, dst_name = FFNET_SUBPATH_NAME_LOOKUP[variant_name] + + weights_url = os.path.join(FFNET_WEIGHTS_URL_ROOT, src_name) + weights_path = CachedWebModelAsset( + weights_url, + MODEL_ID, + MODEL_ASSET_VERSION, + os.path.join(subpath, dst_name), + ).fetch() + root_weights_path = os.path.dirname(os.path.dirname(weights_path)) + + """ + orig_weights_path = download_data(weights_url, MODEL_ID) + + root_weights_path = os.path.dirname(orig_weights_path) + + # FFNet requires the weights to be located in a sub-directory + weights_path = os.path.join(root_weights_path, subpath, dst_name) + os.makedirs(os.path.dirname(weights_path), exist_ok=True) + shutil.move(src=orig_weights_path, dst=weights_path) + """ + + # Re-use the repo from _shared/cityscapes_segmentation + with SourceAsRoot( + FFNET_SOURCE_REPOSITORY, + FFNET_SOURCE_REPO_COMMIT, + CS_MODEL_ID, + FFNET_SOURCE_VERSION, + source_repo_patches=FFNET_SOURCE_PATCHES, + ): + + # config, models are top-level packages in the FFNet repo + import config + + config.model_weights_base_path = root_weights_path + from models.model_registry import model_entrypoint + + model = model_entrypoint(variant_name)().eval() + return model + + +class FFNetLowRes(FFNet): + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 512, + width: int = 1024, + ) -> InputSpec: + return FFNet.get_input_spec(batch_size, num_channels, height, width) diff --git a/qai_hub_models/models/_shared/ffnet/test_utils.py b/qai_hub_models/models/_shared/ffnet/test_utils.py new file mode 100644 index 00000000..3261464e --- /dev/null +++ b/qai_hub_models/models/_shared/ffnet/test_utils.py @@ -0,0 +1,36 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import numpy as np +import torch + +from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( + TEST_CITYSCAPES_LIKE_IMAGE_ASSET, +) +from qai_hub_models.models._shared.ffnet.model import FFNet, _load_ffnet_source_model +from qai_hub_models.utils.asset_loaders import load_image +from qai_hub_models.utils.image_processing import preprocess_PIL_image + + +def run_test_off_target_numerical( + ffnet_cls: FFNet, variant_name: str, relax_numerics: bool = False +): + """Verify that raw (numeric) outputs of both (qaism and non-qaism) networks are the same.""" + processed_sample_image = preprocess_PIL_image( + load_image(TEST_CITYSCAPES_LIKE_IMAGE_ASSET) + ) + source_model = _load_ffnet_source_model(variant_name) + qaism_model = ffnet_cls.from_pretrained() + + with torch.no_grad(): + source_out = source_model(processed_sample_image) + qaism_out = qaism_model(processed_sample_image) + + if relax_numerics: + # At least 90% of pixels should have original prediction + assert (source_out.argmax(1) == qaism_out.argmax(1)).float().mean() > 0.9 + else: + np.testing.assert_array_almost_equal(source_out, qaism_out) diff --git a/qai_hub_models/models/_shared/ffnet_quantized/__init__.py b/qai_hub_models/models/_shared/ffnet_quantized/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/ffnet_quantized/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/ffnet_quantized/aimet_config.json b/qai_hub_models/models/_shared/ffnet_quantized/aimet_config.json new file mode 100644 index 00000000..c81ef65f --- /dev/null +++ b/qai_hub_models/models/_shared/ffnet_quantized/aimet_config.json @@ -0,0 +1,68 @@ +{ + "defaults": + { + "ops": + { + "is_output_quantized": "True" + }, + "params": + { + "is_quantized": "True", + "is_symmetric": "True" + }, + "strict_symmetric": "False", + "unsigned_symmetric": "True", + "per_channel_quantization": "True" + }, + + "params": + { + "bias": + { + "is_quantized": "False" + } + }, + + "op_type": + { + "Squeeze": + { + "is_output_quantized": "False" + }, + "Pad": + { + "is_output_quantized": "False" + }, + "Mean": + { + "is_output_quantized": "False" + } + }, + + "supergroups": + [ + { + "op_list": ["Conv", "Relu"] + }, + { + "op_list": ["Conv", "Clip"] + }, + { + "op_list": ["Conv", "BatchNormalization", "Relu"] + }, + { + "op_list": ["Add", "Relu"] + }, + { + "op_list": ["Gemm", "Relu"] + } + ], + + "model_input": + { + "is_input_quantized": "True" + }, + + "model_output": + {} +} diff --git a/qai_hub_models/models/_shared/ffnet_quantized/model.py b/qai_hub_models/models/_shared/ffnet_quantized/model.py new file mode 100644 index 00000000..54cb1ee2 --- /dev/null +++ b/qai_hub_models/models/_shared/ffnet_quantized/model.py @@ -0,0 +1,91 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os + +import torch +from aimet_torch.batch_norm_fold import fold_all_batch_norms +from aimet_torch.model_preparer import prepare_model +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim +from qai_hub.client import DatasetEntries + +from qai_hub_models.models._shared.ffnet.model import FFNet +from qai_hub_models.utils.base_model import SourceModelFormat, TargetRuntime +from qai_hub_models.utils.input_spec import InputSpec +from qai_hub_models.utils.quantization_aimet import AIMETQuantizableMixin + +MODEL_ID = __name__.split(".")[-2] +FFNET_AIMET_CONFIG = os.path.abspath( + os.path.join(os.path.dirname(__file__), "aimet_config.json") +) + + +class FFNetQuantizable(AIMETQuantizableMixin, FFNet): + """ + FFNet with post train quantization support. + + Supports only 8-bit weights and activations. + """ + + def __init__( + self, + ffnet_model: FFNet, + ) -> None: + FFNet.__init__(self, ffnet_model.model) + AIMETQuantizableMixin.__init__(self, ffnet_model) + + def get_hub_compile_options( + self, target_runtime: TargetRuntime, other_compile_options: str = "" + ) -> str: + compile_options = super().get_hub_compile_options( + target_runtime, other_compile_options + ) + return compile_options + " --quantize_full_type int8 --quantize_io" + + @classmethod + def default_aimet_encodings(cls) -> str: + raise NotImplementedError() + + @classmethod + def from_pretrained( + cls, + variant_name: str, + aimet_encodings: str | None = "DEFAULT", + ) -> "FFNetQuantizable": + ffnet = FFNet.from_pretrained(variant_name).model + + input_shape = FFNetQuantizable.get_input_spec()["image"][0] + + fold_all_batch_norms(ffnet, [input_shape]) + + ffnet = prepare_model(ffnet) + + sim = QuantizationSimModel( + ffnet, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=FFNET_AIMET_CONFIG, + dummy_input=torch.rand(input_shape), + ) + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = cls.default_aimet_encodings() + load_encodings_to_sim(sim, aimet_encodings) + + sim.model.eval() + return cls(sim) + + def preferred_hub_source_model_format( + self, target_runtime: TargetRuntime + ) -> SourceModelFormat: + return SourceModelFormat.ONNX + + def get_calibration_data( + self, target_runtime: TargetRuntime, input_spec: InputSpec | None = None + ) -> DatasetEntries | None: + # Do not provide calibration data + return None diff --git a/qai_hub_models/models/_shared/imagenet_classifier/__init__.py b/qai_hub_models/models/_shared/imagenet_classifier/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/imagenet_classifier/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/imagenet_classifier/app.py b/qai_hub_models/models/_shared/imagenet_classifier/app.py new file mode 100644 index 00000000..c81bdb10 --- /dev/null +++ b/qai_hub_models/models/_shared/imagenet_classifier/app.py @@ -0,0 +1,71 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +from PIL.Image import Image +from torchvision import transforms + +from qai_hub_models.models._shared.imagenet_classifier.model import ( + IMAGENET_DIM, + ImagenetClassifier, +) +from qai_hub_models.utils.image_processing import normalize_image_transform + +IMAGENET_TRANSFORM = transforms.Compose( + [ + transforms.Resize(256), + transforms.CenterCrop(IMAGENET_DIM), + transforms.ToTensor(), + normalize_image_transform(), + ] +) + + +def preprocess_image(image: Image) -> torch.Tensor: + """ + Preprocesses images to be run through torch imagenet classifiers + as prescribed here: + https://pytorch.org/hub/pytorch_vision_resnet/ + Parameters: + image: Input image to be run through the classifier model. + Returns: + torch tensor to be directly passed to the model. + """ + out_tensor: torch.Tensor = IMAGENET_TRANSFORM(image) # type: ignore + return out_tensor.unsqueeze(0) + + +class ImagenetClassifierApp: + """ + This class consists of light-weight "app code" that is required to + perform end to end inference with an ImagenetClassifier. + + For a given image input, the app will: + * Pre-process the image (resize and normalize) + * Run Imagnet Classification + * Convert the raw output into probabilities using softmax + """ + + def __init__(self, model: ImagenetClassifier): + self.model = model + + def predict(self, image: Image) -> torch.Tensor: + """ + From the provided image or tensor, predict probability distribution + over the 1k Imagenet classes. + + Parameters: + image: A PIL Image in RGB format. + + Returns: + A (1000,) size torch tensor of probabilities, each one corresponding + to a different Imagenet1K class. + """ + + input_tensor = preprocess_image(image) + with torch.no_grad(): + output = self.model(input_tensor) + return torch.softmax(output[0], dim=0) diff --git a/qai_hub_models/models/_shared/imagenet_classifier/demo.py b/qai_hub_models/models/_shared/imagenet_classifier/demo.py new file mode 100644 index 00000000..41a81e70 --- /dev/null +++ b/qai_hub_models/models/_shared/imagenet_classifier/demo.py @@ -0,0 +1,65 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from typing import Type + +import torch + +from qai_hub_models.models._shared.imagenet_classifier.app import ImagenetClassifierApp +from qai_hub_models.models._shared.imagenet_classifier.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + ImagenetClassifier, +) +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + TEST_IMAGENET_IMAGE, +) +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import ( + CachedWebModelAsset, + load_image, + load_json, +) + +IMAGENET_LABELS_ASSET = CachedWebModelAsset( + "https://raw.githubusercontent.com/anishathalye/imagenet-simple-labels/master/imagenet-simple-labels.json", + MODEL_ID, + MODEL_ASSET_VERSION, + "imagenet_labels.json", +) + + +# Run Imagenet Classifier end-to-end on a sample image. +# The demo will print the predicted class to terminal. +def imagenet_demo(model_cls: Type[ImagenetClassifier], is_test: bool = False): + # Demo parameters + parser = get_model_cli_parser(model_cls) + parser = get_on_device_demo_parser(parser) + parser.add_argument( + "--image", + type=str, + default=TEST_IMAGENET_IMAGE, + help="test image file path or URL", + ) + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, model_cls.get_model_id()) + + model = demo_model_from_cli_args(model_cls, args) + app = ImagenetClassifierApp(model) + print("Model Loaded") + + image = load_image(args.image) + # Run app + probabilities = app.predict(image) + top5 = torch.topk(probabilities, 5) + if not is_test: + labels = load_json(IMAGENET_LABELS_ASSET) + print("Top 5 predictions for image:\n") + for i in range(5): + print(f"{labels[top5.indices[i]]}: {100 * top5.values[i]:.3g}%\n") diff --git a/qai_hub_models/models/_shared/imagenet_classifier/model.py b/qai_hub_models/models/_shared/imagenet_classifier/model.py new file mode 100644 index 00000000..516b7dca --- /dev/null +++ b/qai_hub_models/models/_shared/imagenet_classifier/model.py @@ -0,0 +1,75 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Optional + +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.evaluators.classification_evaluator import ClassificationEvaluator +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ASSET_VERSION = 1 +MODEL_ID = __name__.split(".")[-2] +IMAGENET_DIM = 224 + + +class ImagenetClassifier(BaseModel): + """ + Base class for all Imagenet Classifier models within QAI Hub Models. + """ + + def __init__( + self, + net: torch.nn.Module, + ): + """ + Basic initializer which takes in a pretrained classifier network. + Subclasses can choose to implement their own __init__ and forward methods. + """ + super().__init__() + self.net = net + self.eval() + + def forward(self, image_tensor: torch.Tensor): + """ + Predict class probabilities for an input `image`. + + Parameters: + image: A [1, 3, 224, 224] image. + Assumes image has been resized and normalized using the + standard preprocessing method for PyTorch Imagenet models. + + Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + A [1, 1000] where each value is the log-likelihood of + the image belonging to the corresponding Imagenet class. + """ + return self.net(image_tensor) + + def get_evaluator(self) -> BaseEvaluator: + return ClassificationEvaluator() + + def get_input_spec( + self, + ) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm® AI Hub. + """ + return {"image_tensor": ((1, 3, IMAGENET_DIM, IMAGENET_DIM), "float32")} + + @classmethod + def from_pretrained( + cls, + weights: Optional[str] = None, + ) -> "ImagenetClassifier": + net = cls.model_builder(weights=weights or cls.DEFAULT_WEIGHTS) + return cls(net) diff --git a/qai_hub_models/models/_shared/imagenet_classifier/test_utils.py b/qai_hub_models/models/_shared/imagenet_classifier/test_utils.py new file mode 100644 index 00000000..d8865ca5 --- /dev/null +++ b/qai_hub_models/models/_shared/imagenet_classifier/test_utils.py @@ -0,0 +1,106 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import pytest +import torch + +from qai_hub_models.models._shared.imagenet_classifier.app import ( + ImagenetClassifierApp, + preprocess_image, +) +from qai_hub_models.models._shared.imagenet_classifier.model import ( + MODEL_ASSET_VERSION, + ImagenetClassifier, +) +from qai_hub_models.utils.asset_loaders import ( + CachedWebModelAsset, + load_image, + load_numpy, +) +from qai_hub_models.utils.testing import assert_most_close + +GROUP_NAME = "imagenet_classifier" +TEST_IMAGENET_IMAGE = CachedWebModelAsset.from_asset_store( + GROUP_NAME, MODEL_ASSET_VERSION, "dog.jpg" +) + +# Class "Samoyed" from https://gist.github.com/ageitgey/4e1342c10a71981d0b491e1b8227328b +TEST_IMAGENET_CLASS = 258 + + +@pytest.fixture(scope="module") +def imagenet_sample_torch() -> torch.Tensor: + """ + Returns: + + - Preprocessed (normalized etc) image as torch.Tensor with shape [1, 3, 224, 224] + """ + img = load_image(TEST_IMAGENET_IMAGE, "imagenet_classifier") + return preprocess_image(img) + + +def run_imagenet_classifier_test( + model: ImagenetClassifier, + model_name: str, + asset_version: int = 2, + probability_threshold: float = 0.7, + diff_tol: float = 0.0, + rtol: float = 0.0, + atol: float = 1e-4, +) -> None: + """ + Evaluates the classifier on a test image and validates the output. + + Parameters: + model: The model to evaluate. + model_name: Identifier used to lookup the expected output file. + asset_version: Version of the expected output file to lookup. + probability_threshold: If the predicited probability for the correct class + is below this threshold, the method throws an error. + diff_tol: Float in range [0,1] representing the maximum percentage of + the probabilities that can differ from the ground truth while + still having the test pass. + atol: Absolute tolerance allowed for two numbers to be "close". + rtol: Relative tolerance allowed for two numbers to be "close". + """ + + img = load_image(TEST_IMAGENET_IMAGE) + app = ImagenetClassifierApp(model) + probabilities = app.predict(img) + + expected_output = CachedWebModelAsset.from_asset_store( + model_name, asset_version, "expected_out.npy" + ) + expected_out = load_numpy(expected_output) + assert_most_close(probabilities.numpy(), expected_out, diff_tol, rtol, atol) + + predicted_class = torch.argmax(probabilities, dim=0) + predicted_probability = probabilities[TEST_IMAGENET_CLASS].item() + assert ( + predicted_probability > probability_threshold + ), f"Predicted probability {predicted_probability:.3f} is below the threshold {probability_threshold}." + assert ( + predicted_class == TEST_IMAGENET_CLASS + ), f"Model predicted class {predicted_class} when correct class was {TEST_IMAGENET_CLASS}." + + +def run_imagenet_classifier_trace_test( + model: ImagenetClassifier, + diff_tol: float = 0.005, + rtol: float = 0.0, + atol: float = 1e-4, + is_quantized: bool = False, + check_trace: bool = True, +) -> None: + img = load_image(TEST_IMAGENET_IMAGE) + app = ImagenetClassifierApp(model) + if not is_quantized: + trace_app = ImagenetClassifierApp( + model.convert_to_torchscript(check_trace=check_trace) + ) + else: + trace_app = ImagenetClassifierApp(model.convert_to_quantized_torchscript()) + probabilities = app.predict(img) + trace_probs = trace_app.predict(img) + assert_most_close(probabilities.numpy(), trace_probs.numpy(), diff_tol, rtol, atol) diff --git a/qai_hub_models/models/_shared/mediapipe/__init__.py b/qai_hub_models/models/_shared/mediapipe/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/mediapipe/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/mediapipe/app.py b/qai_hub_models/models/_shared/mediapipe/app.py new file mode 100644 index 00000000..05ffcb6d --- /dev/null +++ b/qai_hub_models/models/_shared/mediapipe/app.py @@ -0,0 +1,680 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, List, Tuple + +import cv2 +import numpy as np +import torch +from PIL.Image import Image + +from qai_hub_models.models._shared.mediapipe.utils import decode_preds_from_anchors +from qai_hub_models.utils.bounding_box_processing import ( + apply_directional_box_offset, + batched_nms, + box_xywh_to_xyxy, + box_xyxy_to_xywh, + compute_box_affine_crop_resize_matrix, + compute_box_corners_with_rotation, +) +from qai_hub_models.utils.draw import ( + draw_box_from_corners, + draw_box_from_xyxy, + draw_connections, + draw_points, +) +from qai_hub_models.utils.image_processing import ( + app_to_net_image_inputs, + apply_affine_to_coordinates, + apply_batched_affines_to_frame, + compute_vector_rotation, + denormalize_coordinates, + numpy_image_to_torch, + resize_pad, +) + + +class MediaPipeApp: + """ + This class consists of "app code" that is required to perform end to end inference with MediaPipe. + + The app uses 2 models: + * MediaPipeDetector + * MediaPipeLandmark + + For a given image input, the app will: + * pre-process the image (convert to range[0, 1]) + * Detect the object and some associated keypoints + * Compute a an approximate region of interest (roi) that encapsulates the entire object. + * Extract that ROI to its own image; rotate it so the object points upwards in the frame. + * Run the landmark detector on the ROI. + * Map the landmark detector output coordinates back to the original input frame. + * if requested, draw the detected object box, ROI, keypoints, and landmarks on the frame. + """ + + def __init__( + self, + detector: Callable[[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]], + detector_anchors: torch.Tensor, + landmark_detector: Callable[[torch.Tensor], Tuple[torch.Tensor, ...]], + detector_input_dims: Tuple[int, int], + landmark_input_dims: Tuple[int, int], + keypoint_rotation_vec_start_idx: int, + keypoint_rotation_vec_end_idx: int, + rotation_offset_rads: float, + detect_box_offset_xy: float, + detect_box_scale: float, + min_detector_box_score: float = 0.95, + detector_score_clipping_threshold: int = 100, + nms_iou_threshold: float = 0.3, + min_landmark_score: float = 0.5, + landmark_connections: List[Tuple[int, int]] | None = None, + ): + """ + Create a MediaPipe application. + + Parameters: + detector: Callable[[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]] + The bounding box and keypoint detector model. + Input is an image [N C H W], channel layout is BGR, output is [coordinates, scores]. + + detector_anchors: torch.Tensor + Detector anchors, for decoding predictions from anchor points to boxes. + + landmark_detector: Callable[[torch.Tensor], Tuple[torch.Tensor, ...]] + The landmark detector model. Input is an image [N C H W], + channel layout is BGR, output is [scores, landmarks]. + + detector_input_dims: Tuple[int, int] + Input dimensionality (W, H) of the bounding box detector. + + landmark_input_dims: Tuple[int, int] + Input dimensionality (W, H) of the landmark detector. + + keypoint_rotation_vec_start_idx: int + The index of a keypoint (predicted by the bounding box detector). This KP is the start + of the vector used to compute the angle at which the object should be rotated (before + being passed to the landmark detector). + + keypoint_rotation_vec_end_idx: int + The index of a keypoint (predicted by the bounding box detector). This KP is the start + of the vector used to compute the angle at which the object should be rotated (before + being passed to the landmark detector). + + detect_box_offset_xy: float + Move the detected bounding box in the direction of the rotation vector described above by this amount + before passing the box to the landmark detector. + + detect_box_scale: float + Scale the detected bounding box's size by this amount + before passing the box to the landmark detector. + + min_detector_box_score: float + Minimum detector box score for a box to be used for landmark detection. + + detector_score_clipping_threshold: float + Clip detector box scores to [-threshold, threshold] + + nms_iou_threshold: float + IOU threshold for when NMS is run on the detector output boxes. + + min_landmark_score: float + Any landmark set with a score below this number will be discarded. + + landmark_connections: List[Tuple[int, int]] | None + Connections between landmark output points. + Format is List[Tuple[Landmark Point Index 0, Landmark Point Index 1]] + These connections will be drawn on the output image when applicable. + """ + self.detector = detector + self.detector_anchors = detector_anchors + self.landmark_detector = landmark_detector + self.detector_input_dims = detector_input_dims + self.landmark_input_dims = landmark_input_dims + self.keypoint_rotation_vec_start_idx = keypoint_rotation_vec_start_idx + self.keypoint_rotation_vec_end_idx = keypoint_rotation_vec_end_idx + self.rotation_offset_rads = rotation_offset_rads + self.detect_box_offset_xy = detect_box_offset_xy + self.detect_box_scale = detect_box_scale + self.detector_score_clipping_threshold = detector_score_clipping_threshold + self.min_detector_box_score = min_detector_box_score + self.nms_iou_threshold = nms_iou_threshold + self.min_landmark_score = min_landmark_score + self.landmark_connections = landmark_connections + + def predict(self, *args, **kwargs): + # See predict_landmarks_from_image. + return self.predict_landmarks_from_image(*args, **kwargs) + + def predict_landmarks_from_image( + self, + pixel_values_or_image: torch.Tensor | np.ndarray | Image | List[Image], + raw_output: bool = False, + ) -> Tuple[ + List[torch.Tensor | None], + List[torch.Tensor | None], + List[torch.Tensor | None], + List[torch.Tensor | None], + ] | List[np.ndarray]: + """ + From the provided image or tensor, predict the bounding boxes & classes of objects detected within. + + Parameters: + pixel_values_or_image: torch.Tensor + PIL image + or + numpy array (N H W C x uint8) or (H W C x uint8) -- both BGR channel layout + or + pyTorch tensor (N C H W x fp32, value range is [0, 1]), BGR channel layout + + raw_output: bool + See "returns" doc section for details. + + Returns: + If raw_output is false, returns: + images: List[np.ndarray] + A list of predicted images (one for each batch), with NHWC shape and BGR channel layout. + Each image will have landmarks, roi, and bounding boxes drawn, if they are detected. + + Otherwise, returns several "batched" (one element per input image) lists: + batched_selected_boxes: List[torch.Tensor | None] + Selected object bounding box coordinates. None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, 2, 2]. + Layout is + [[box_x1, box_y1], + [box_x2, box_y2]] + + batched_selected_keypoints: List[torch.Tensor | None] + Selected object bounding box keypoints. None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, # of keypoints, 2]. + Layout is + [[keypoint_0_x, keypoint_0_y], + ..., + [keypoint_max_x, keypoint_max_y]] + + batched_roi_4corners: List[torch.Tensor | None] + Selected object "region of interest" (region used as input to the landmark detector) corner coordinates. + None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, 4, 2], where 2 == (x, y) + The order of points is (top left point, bottom left point, top right point, bottom right point) + + batched_selected_landmarks: List[torch.tensor | None] + Selected landmarks. Organized like the following: + [ + # Batch 0 (for Input Image 0) + torch.Tensor([ + Selected Landmark 1 w/ shape (# of landmark points, 3) + Selected Landmark 2 w/ shape (# of landmark points, 3) + ... + ]), + # Batch 1 (for Input Image 1) + None # (this image has no detected object) + ... + ] + The shape of each inner list element is [# of landmark points, 3], + where 3 == (X, Y, Conf) + + ... (additional outputs if necessary) + """ + # Input Prep + NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs( + pixel_values_or_image + ) + + # Run Bounding Box & Keypoint Detector + batched_selected_boxes, batched_selected_keypoints = self._run_box_detector( + NCHW_fp32_torch_frames + ) + + # The region of interest ( bounding box of 4 (x, y) corners). + # List[torch.Tensor(shape=[Num Boxes, 4, 2])], + # where 2 == (x, y) + # + # A list element will be None if there is no selected ROI. + batched_roi_4corners = self._compute_object_roi( + batched_selected_boxes, batched_selected_keypoints + ) + + # selected landmarks for the ROI (if any) + # List[torch.Tensor(shape=[Num Selected Landmarks, K, 3])], + # where K == number of landmark keypoints, 3 == (x, y, p) + # + # A list element will be None if there is no ROI. + landmarks_out = self._run_landmark_detector( + NHWC_int_numpy_frames, batched_roi_4corners + ) + + if raw_output: + return ( + batched_selected_boxes, + batched_selected_keypoints, + batched_roi_4corners, + *landmarks_out, + ) + + self._draw_predictions( + NHWC_int_numpy_frames, + batched_selected_boxes, + batched_selected_keypoints, + batched_roi_4corners, + *landmarks_out, + ) + + return NHWC_int_numpy_frames + + def _run_box_detector( + self, NCHW_fp32_torch_frames: torch.Tensor + ) -> Tuple[List[torch.Tensor | None], List[torch.Tensor | None]]: + """ + From the provided image or tensor, predict the bounding boxes and keypoints of objects detected within. + + Parameters: + NCHW_fp32_torch_frames: torch.Tensor + pyTorch tensor (N C H W x fp32, value range is [0, 1]), BGR channel layout + + Returns: + batched_selected_boxes: List[torch.Tensor | None] + Selected object bounding box coordinates. None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, 2, 2]. + Layout is + [[box_x1, box_y1], + [box_x2, box_y2]] + + batched_selected_keypoints: List[torch.Tensor | None] + Selected object bounding box keypoints. None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, # of keypoints, 2]. + Layout is + [[keypoint_0_x, keypoint_0_y], + ..., + [keypoint_max_x, keypoint_max_y]] + """ + + # Resize input frames such that they're the appropriate size for detector inference. + box_detector_net_inputs, pd_net_input_scale, pd_net_input_pad = resize_pad( + NCHW_fp32_torch_frames, self.detector_input_dims + ) + + # Run object detector. + # Outputs: + # - box_coords: , where N == # of anchors & C == # of of coordinates + # Layout of C is (box_cx, boc_cw, box_w, box_h, keypoint_0_x, keypoint_0_y, ..., keypoint_maxKey_x, keypoint_maxKey_y) + # - box_scores: , where N == # of anchors. + box_coords, box_scores = self.detector(box_detector_net_inputs) + box_scores = box_scores.clamp( + -self.detector_score_clipping_threshold, + self.detector_score_clipping_threshold, + ) + box_scores = box_scores.sigmoid().squeeze(dim=-1) + + # Reshape outputs so that they have shape [..., # of coordinates, 2], where 2 == (x, y) + box_coords = box_coords.view(list(box_coords.shape)[:-1] + [-1, 2]) + anchors = self.detector_anchors.view( + list(self.detector_anchors.shape)[:-1] + [-1, 2] + ) + + # Decode to output coordinates using the model's trained anchors. + decode_preds_from_anchors(box_coords, self.detector_input_dims, anchors) + + # Convert box coordinates from CWH -> XYXY format for NMS. + box_coords[:2] = box_xywh_to_xyxy(box_coords[:2]) + + # flatten coords (remove final [2] dim) for NMS + flattened_box_coords = box_coords.view(list(box_coords.shape)[:-2] + [-1]) + + # Run non maximum suppression on the output + # batched_selected_coords = List[torch.Tensor(shape=[Num Boxes, 4])], + # where 4 = (x0, y0, x1, y1) + batched_selected_coords, _ = batched_nms( + self.nms_iou_threshold, + self.min_detector_box_score, + flattened_box_coords, + box_scores, + ) + + selected_boxes = [] + selected_keypoints = [] + for i in range(0, len(batched_selected_coords)): + selected_coords = batched_selected_coords[i] + if len(selected_coords) != 0: + # Reshape outputs again so that they have shape [..., # of boxes, 2], where 2 == (x, y) + selected_coords = batched_selected_coords[i].view( + list(batched_selected_coords[i].shape)[:-1] + [-1, 2] + ) + + denormalize_coordinates( + selected_coords, + self.detector_input_dims, + pd_net_input_scale, + pd_net_input_pad, + ) + + selected_boxes.append(selected_coords[:, :2]) + selected_keypoints.append(selected_coords[:, 2:]) + else: + selected_boxes.append(None) + selected_keypoints.append(None) + + return selected_boxes, selected_keypoints + + def _compute_object_roi( + self, + batched_selected_boxes: List[torch.Tensor | None], + batched_selected_keypoints: List[torch.Tensor | None], + ) -> List[torch.Tensor | None]: + """ + From the provided bounding boxes and keypoints, compute the region of interest (ROI) that should be used + as input to the landmark detection model. + + Parameters: + batched_selected_boxes: List[torch.Tensor | None] + Selected object bounding box coordinates. None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, 2, 2]. + Layout is + [[box_x1, box_y1], + [box_x2, box_y2]] + + batched_selected_keypoints: List[torch.Tensor | None] + Selected object bounding box keypoints. None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, # of keypoints, 2]. + Layout is + [[keypoint_0_x, keypoint_0_y], + ..., + [keypoint_max_x, keypoint_max_y]] + + Returns + batched_roi_4corners: List[torch.Tensor | None] + Selected object "region of interest" (region used as input to the landmark detector) corner coordinates. + None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, 4, 2], where 2 == (x, y) + The order of points is (top left point, bottom left point, top right point, bottom right point) + """ + batched_selected_roi = [] + for boxes, keypoints in zip(batched_selected_boxes, batched_selected_keypoints): + if boxes is None or keypoints is None: + batched_selected_roi.append(None) + continue + + # Compute bounding box center and rotation + theta = compute_vector_rotation( + keypoints[:, self.keypoint_rotation_vec_start_idx, ...], + keypoints[:, self.keypoint_rotation_vec_end_idx, ...], + self.rotation_offset_rads, + ) + selected_boxes_cwh = box_xyxy_to_xywh(boxes) + xc = selected_boxes_cwh[..., 0, 0] + yc = selected_boxes_cwh[..., 0, 1] + w = selected_boxes_cwh[..., 1, 0] + h = selected_boxes_cwh[..., 1, 1] + + # The bounding box often misses the entire object. + # Move the bounding box slightly (if necessary) to center it with the object. + apply_directional_box_offset( + self.detect_box_offset_xy * w, + keypoints[..., self.keypoint_rotation_vec_start_idx, :], + keypoints[..., self.keypoint_rotation_vec_end_idx, :], + xc, + yc, + ) + + # Apply scaling to enlargen the bounding box + w *= self.detect_box_scale + h *= self.detect_box_scale + + # Compute box corners from box center, width, height + batched_selected_roi.append( + compute_box_corners_with_rotation(xc, yc, w, h, theta) + ) + + return batched_selected_roi + + def _run_landmark_detector( + self, + NHWC_int_numpy_frames: List[np.ndarray], + batched_roi_4corners: List[torch.Tensor | None], + ) -> Tuple[List[torch.Tensor | None]]: + """ + From the provided image or tensor, predict the bounding boxes & classes of objects detected within. + + Parameters: + NHWC_int_numpy_frames: + List of numpy arrays of shape (H W C x uint8) -- BGR channel layout + Length of list is # of batches (the number of input images) + + batched_roi_4corners: List[torch.Tensor | None] + Selected object "region of interest" (region used as input to the landmark detector) corner coordinates. + None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, 4, 2], where 2 == (x, y) + The order of points is (top left point, bottom left point, top right point, bottom right point) + + Returns: + batched_selected_landmarks: List[torch.tensor | None] + Selected landmarks. Organized like the following: + [ + # Batch 0 (for Input Image 0) + torch.Tensor([ + Selected Landmark 1 w/ shape (# of landmark points, 3) + Selected Landmark 2 w/ shape (# of landmark points, 3) + ... + ]), + # Batch 1 (for Input Image 1) + None # (this image has no detected object) + ... + ] + The shape of each inner list element is [# of landmark points, 3], + where 3 == (X, Y, Conf) + + ... (additional outputs when needed by implementation) + """ + + # selected landmarks for the ROI (if any) + # List[torch.Tensor(shape=[Num Selected Landmarks, K, 3])], + # where K == number of landmark keypoints, 3 == (x, y, p) + # + # A list element will be None if there is no ROI. + batched_selected_landmarks: List[torch.Tensor | None] = [] + + # For each input image... + for batch_idx, roi_4corners in enumerate(batched_roi_4corners): + if roi_4corners is None: + continue + affines = compute_box_affine_crop_resize_matrix( + roi_4corners[:, :3], self.landmark_input_dims + ) + + # Create input images by applying the affine transforms. + keypoint_net_inputs = numpy_image_to_torch( + apply_batched_affines_to_frame( + NHWC_int_numpy_frames[batch_idx], affines, self.landmark_input_dims + ) + ) + + # Compute landmarks. + ld_scores, landmarks = self.landmark_detector( # type: ignore + keypoint_net_inputs + ) + + # Convert [0-1] ranged values of landmarks to integer pixel space. + landmarks[:, :, 0] *= self.landmark_input_dims[0] + landmarks[:, :, 1] *= self.landmark_input_dims[1] + + # 1 landmark is predicted for each ROI of each input image. + # For each region of interest & associated predicted landmarks... + all_landmarks = [] + for ld_batch_idx in range(landmarks.shape[0]): + # Exclude landmarks that don't meet the appropriate score threshold. + if ld_scores[ld_batch_idx] >= self.min_detector_box_score: + # Apply the inverse of affine transform used above to the landmark coordinates. + # This will convert the coordinates to their locations in the original input image. + inverted_affine = torch.from_numpy( + cv2.invertAffineTransform(affines[ld_batch_idx]) + ).float() + landmarks[ld_batch_idx][:, :2] = apply_affine_to_coordinates( + landmarks[ld_batch_idx][:, :2], inverted_affine + ) + + # Add the predicted landmarks to our list. + all_landmarks.append(landmarks[ld_batch_idx]) + + # Add this batch of landmarks to the output list. + batched_selected_landmarks.append( + torch.stack(all_landmarks, dim=0) if all_landmarks else None + ) + else: + # Add None for these lists, since this batch has no predicted bounding boxes. + batched_selected_landmarks.append(None) + + return (batched_selected_landmarks,) + + def _draw_box_and_roi( + self, + NHWC_int_numpy_frame: np.ndarray, + selected_boxes: torch.Tensor, + selected_keypoints: torch.Tensor, + roi_4corners: torch.Tensor, + ): + """ + Draw bounding box, keypoints, and corresponding region of interest (ROI) on the provided frame + + Parameters: + NHWC_int_numpy_frame: + Numpy array of shape (H W C x uint8) -- BGR channel layout + + selected_boxes: torch.Tensor + Selected object bounding box coordinates. Shape is [num_selected_boxes, 2, 2]. + Layout is + [[box_x1, box_y1], + [box_x2, box_y2]] + + selected_keypoints: List[torch.Tensor | None] + Selected object bounding box keypoints. Shape is [num_selected_boxes, # of keypoints, 2]. + Layout is + [[keypoint_0_x, keypoint_0_y], + ..., + [keypoint_max_x, keypoint_max_y]] + + roi_4corners: List[torch.Tensor | None] + Selected object "region of interest" (region used as input to the landmark detector) corner coordinates. + Shape is [num_selected_boxes, 4, 2], where 2 == (x, y) + + Returns + Nothing; drawing is done on input frame. + """ + for roi, box, kp in zip(roi_4corners, selected_boxes, selected_keypoints): + # Draw detector bounding box + draw_box_from_xyxy(NHWC_int_numpy_frame, box[0], box[1], (255, 0, 0), 1) + # Draw detector keypoints + draw_points(NHWC_int_numpy_frame, kp) + # Draw region of interest box computed from the detector box & keypoints + # (this is the input to the landmark detector) + draw_box_from_corners(NHWC_int_numpy_frame, roi, (0, 255, 0)) + + def _draw_landmarks( + self, + NHWC_int_numpy_frame: np.ndarray, + selected_landmarks: torch.Tensor, + **kwargs, + ): + """ + Draw landmarks on the provided frame + + Parameters: + NHWC_int_numpy_frame: + Numpy array of shape (H W C x uint8) -- BGR channel layout + + selected_landmarks + Selected landmarks. Organized like the following: + torch.Tensor([ + Selected Landmark 1 w/ shape (# of landmark points, 3) + Selected Landmark 2 w/ shape (# of landmark points, 3) + ... + ]), + The shape of each inner list element is [# of landmark points, 3], + where 3 == (X, Y, Conf) + + Returns + Nothing; drawing is done on input frame. + """ + for ldm in selected_landmarks: + # Draw landmark points + draw_points(NHWC_int_numpy_frame, ldm[:, :2], (0, 255, 0)) + # Draw connections between landmark points + if self.landmark_connections: + draw_connections( + NHWC_int_numpy_frame, + ldm[:, :2], + self.landmark_connections, + (255, 0, 0), + 2, + ) + + def _draw_predictions( + self, + NHWC_int_numpy_frames: List[np.ndarray], + batched_selected_boxes: List[torch.Tensor | None], + batched_selected_keypoints: List[torch.Tensor | None], + batched_roi_4corners: List[torch.Tensor | None], + batched_selected_landmarks: List[torch.Tensor | None], + **kwargs, + ): + """ + Draw predictions on the provided frame + + Parameters: + NHWC_int_numpy_frames: + List of numpy arrays of shape (H W C x uint8) -- BGR channel layout + Length of list is # of batches (the number of input images) + + batched_selected_boxes: List[torch.Tensor | None] + Selected object bounding box coordinates. None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, 2, 2]. + Layout is + [[box_x1, box_y1], + [box_x2, box_y2]] + + batched_selected_keypoints: List[torch.Tensor | None] + Selected object bounding box keypoints. None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, # of keypoints, 2]. + Layout is + [[keypoint_0_x, keypoint_0_y], + ..., + [keypoint_max_x, keypoint_max_y]] + + batched_roi_4corners: List[torch.Tensor | None] + Selected object "region of interest" (region used as input to the landmark detector) corner coordinates. + None if batch had no bounding boxes with a score above the threshold. + Shape of each list element is [num_selected_boxes, 4, 2], where 2 == (x, y) + The order of points is (top left point, bottom left point, top right point, bottom right point) + + batched_selected_landmarks: List[torch.tensor | None] + Selected landmarks. Organized like the following: + [ + # Batch 0 (for Input Image 0) + torch.Tensor([ + Selected Landmark 1 w/ shape (# of landmark points, 3) + Selected Landmark 2 w/ shape (# of landmark points, 3) + ... + ]), + # Batch 1 (for Input Image 1) + None # (this image has no detected object) + ... + ] + The shape of each inner list element is [# of landmark points, 3], + where 3 == (X, Y, Conf) + + Returns + Nothing; drawing is done on input frame + """ + for batch_idx in range(len(NHWC_int_numpy_frames)): + image = NHWC_int_numpy_frames[batch_idx] + ld = batched_selected_landmarks[batch_idx] + box = batched_selected_boxes[batch_idx] + kp = batched_selected_keypoints[batch_idx] + roi_4corners = batched_roi_4corners[batch_idx] + + if box is not None and kp is not None and roi_4corners is not None: + self._draw_box_and_roi(image, box, kp, roi_4corners) + if ld is not None: + self._draw_landmarks(image, ld) diff --git a/qai_hub_models/models/_shared/mediapipe/utils.py b/qai_hub_models/models/_shared/mediapipe/utils.py new file mode 100644 index 00000000..a2d141fd --- /dev/null +++ b/qai_hub_models/models/_shared/mediapipe/utils.py @@ -0,0 +1,112 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from functools import partial +from typing import Any, Tuple + +import torch + +from qai_hub_models.utils.asset_loaders import SourceAsRoot +from qai_hub_models.utils.input_spec import InputSpec + +# ContextManager for running code with MediaPipePyTorch in python path and the +# root directory of MediaPipePyTorch set as cwd +MediaPipePyTorchAsRoot = partial( + SourceAsRoot, + "https://github.com/zmurez/MediaPipePyTorch", + "65f2549ba35cd61dfd29f402f6c21882a32fabb1", + "mediapipe_pytorch", + 1, +) + + +def trace_mediapipe( + detector_input_spec: InputSpec, + box_detector: torch.nn.Module, + landmark_input_spec: InputSpec, + landmark_detector: torch.nn.Module, +) -> Tuple[Any, Any]: + # Convert the models to pytorch traces. Traces can be saved & loaded from disk. + # With Qualcomm® AI Hub, a pytorch trace can be exported to run efficiently on mobile devices! + # + # Returns: Tuple[Box Detector Trace Object, Landmark Detector Trace Object] + # + box_detector_input_shape = detector_input_spec["image"][0] + box_detector_trace = torch.jit.trace( + box_detector, [torch.rand(box_detector_input_shape)] + ) + + landmark_detector_input_shape = landmark_input_spec["image"][0] + landmark_detector_trace = torch.jit.trace( + landmark_detector, [torch.rand(landmark_detector_input_shape)] + ) + + return box_detector_trace, landmark_detector_trace + + +def decode_preds_from_anchors( + box_coords: torch.Tensor, img_size: Tuple[int, int], anchors: torch.Tensor +): + """ + Decode predictions using the provided anchors. + + This function can be exported and run inside inference frameworks if desired. + + Note: If included in the model, this code is likely to be unfriendly to quantization. + This is because of the high range and variability of the output tensor. + + For best quantization accuracy, this code should be run separately from the model, + or the model should de-quantize activations before running these layers. + + Inputs: + box_coords: torch.Tensor + coordinates. Range must be [0, 1]. Shape is [Batch, Num Anchors, 2, 2] + where [2, 2] == [[xcenter, ycenter], [w, h]] + + img_size: Tuple(int, int) + The size of the tensor that was fed to the NETWORK (NOT the original image size). + H / W is the same order as coordinates. + + anchors: float + box anchors. Range must be [0, 1]. Shape is [Batch, Num Anchors, 2, 2], + where [2, 2] == [[xcenter, ycenter], [w, h]] + + pad: Tuple(int, int) + Padding used during resizing of input image to network input tensor. (w, h) + This is the absolute # of padding pixels in the network input tensor, NOT in the original image. + + Outputs: + coordinates: [..., m] tensor, where m is always (x0, y0) + The absolute coordinates of the box in the original image. + The "coordinates" input is modified in place. + """ + assert box_coords.shape[-1] == anchors.shape[-1] == 2 + assert box_coords.shape[-3] == anchors.shape[-3] + + w_size, h_size = img_size + anchors_x, anchors_y, anchors_w, anchors_h = ( + anchors[..., 0, 0], + anchors[..., 0, 1], + anchors[..., 1, 0], + anchors[..., 1, 1], + ) + expanded_anchors_shape = list(anchors_w.shape) + [1] + + # Determine real center X and Y, as well as real pixel W and H + box_coords[..., 0, 0] = ( + box_coords[..., 0, 0] / w_size * anchors_w + anchors_x + ) # x_center + box_coords[..., 0, 1] = ( + box_coords[..., 0, 1] / h_size * anchors_h + anchors_y + ) # y_center + box_coords[..., 1, 0] = box_coords[..., 1, 0] / w_size * anchors_w # w + box_coords[..., 1, 1] = box_coords[..., 1, 1] / h_size * anchors_h # h + + # Get X and Y values of keypoints + box_coords[..., 2:, 0] = box_coords[..., 2:, 0] / w_size * anchors_w.view( + expanded_anchors_shape + ) + anchors_x.view(expanded_anchors_shape) + box_coords[..., 2:, 1] = box_coords[..., 2:, 1] / h_size * anchors_h.view( + expanded_anchors_shape + ) + anchors_y.view(expanded_anchors_shape) diff --git a/qai_hub_models/models/_shared/quicksrnet/__init__.py b/qai_hub_models/models/_shared/quicksrnet/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/quicksrnet/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/quicksrnet/common.py b/qai_hub_models/models/_shared/quicksrnet/common.py new file mode 100644 index 00000000..248c125e --- /dev/null +++ b/qai_hub_models/models/_shared/quicksrnet/common.py @@ -0,0 +1,46 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import torch + +from qai_hub_models.utils.asset_loaders import SourceAsRoot + +QUICKSRNET_SOURCE_REPOSITORY = "https://github.com/quic/aimet-model-zoo" +QUICKSRNET_SOURCE_REPO_COMMIT = "d09d2b0404d10f71a7640a87e9d5e5257b028802" + + +def _load_quicksrnet_source_model( + model_id, + model_asset_version, + scaling_factor, + num_channels, + num_intermediate_layers, + use_ito_connection, +) -> torch.nn.Module: + # Load QuickSRNet model from the source repository using the given weights. + # Returns .utils.super_resolution.models.QuickSRNetBase + with SourceAsRoot( + QUICKSRNET_SOURCE_REPOSITORY, + QUICKSRNET_SOURCE_REPO_COMMIT, + model_id, + model_asset_version, + ): + # Remove import of model_definition.py as it has an import error itself, + # but we don't need anything from that file here + with open("aimet_zoo_torch/quicksrnet/__init__.py", "r") as file: + file_content = file.read() + new_content = file_content.replace( + "from .model.model_definition import QuickSRNet", " " + ) + with open("aimet_zoo_torch/quicksrnet/__init__.py", "w") as file: + file.write(new_content) + + from aimet_zoo_torch.quicksrnet.model.models import QuickSRNetBase + + return QuickSRNetBase( + scaling_factor=scaling_factor, + num_channels=num_channels, + num_intermediate_layers=num_intermediate_layers, + use_ito_connection=use_ito_connection, + ) diff --git a/qai_hub_models/models/_shared/repaint/__init__.py b/qai_hub_models/models/_shared/repaint/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/repaint/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/repaint/app.py b/qai_hub_models/models/_shared/repaint/app.py new file mode 100644 index 00000000..bb82f8b8 --- /dev/null +++ b/qai_hub_models/models/_shared/repaint/app.py @@ -0,0 +1,83 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, List + +import numpy as np +import torch +from PIL.Image import Image + +from qai_hub_models.utils.image_processing import ( + app_to_net_image_inputs, + torch_tensor_to_PIL_image, +) + + +class RepaintMaskApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with AOTGAN. + + The app uses 1 model: + * AOTGAN + + For a given image input, the app will: + * pre-process the image + * Run AOTGAN inference + * Convert the output tensor into a PIL Image + """ + + def __init__(self, model: Callable[[torch.Tensor, torch.Tensor], torch.Tensor]): + self.model = model + + def predict(self, *args, **kwargs): + # See paint_mask_on_image. + return self.paint_mask_on_image(*args, **kwargs) + + def paint_mask_on_image( + self, + pixel_values_or_image: torch.Tensor | np.ndarray | Image | List[Image], + mask_pixel_values_or_image: torch.Tensor | np.ndarray | Image, + ) -> List[Image]: + """ + Erases and repaints the source image[s] in the pixel values given by the mask. + + Parameters: + pixel_values_or_image + PIL image(s) + or + numpy array (N H W C x uint8) or (H W C x uint8) -- both RGB channel layout + or + pyTorch tensor (N C H W x fp32, value range is [0, 1]), RGB channel layout + + mask_pixel_values_or_image + PIL image(s) + or + numpy array (N H W C x uint8) or (H W C x uint8) -- both RGB channel layout + or + pyTorch tensor (N C H W x fp32, value range is [0, 1]), RGB channel layout + + If one mask is provided, it will be used for every input image. + + Returns: + images: List[PIL.Image] + A list of predicted images (one list element per batch). + """ + NCHW_fp32_torch_frames = app_to_net_image_inputs(pixel_values_or_image)[1] + NCHW_fp32_torch_masks = app_to_net_image_inputs(mask_pixel_values_or_image)[1] + + # The number of input images should equal the number of input masks. + if NCHW_fp32_torch_masks.shape[0] != 1: + NCHW_fp32_torch_masks = NCHW_fp32_torch_masks.tile( + (NCHW_fp32_torch_frames.shape[0], 1, 1, 1) + ) + + # Mask input image + image_masked = ( + NCHW_fp32_torch_frames * (1 - NCHW_fp32_torch_masks) + NCHW_fp32_torch_masks + ) + out = self.model(image_masked, NCHW_fp32_torch_masks) + + return [torch_tensor_to_PIL_image(img) for img in out] diff --git a/qai_hub_models/models/_shared/repaint/demo.py b/qai_hub_models/models/_shared/repaint/demo.py new file mode 100644 index 00000000..f67f8270 --- /dev/null +++ b/qai_hub_models/models/_shared/repaint/demo.py @@ -0,0 +1,61 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Type + +from qai_hub_models.models._shared.repaint.app import RepaintMaskApp +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebAsset, load_image +from qai_hub_models.utils.base_model import BaseModel, TargetRuntime +from qai_hub_models.utils.display import display_or_save_image + + +# Run repaint app end-to-end on a sample image. +# The demo will display the predicted image in a window. +def repaint_demo( + model_type: Type[BaseModel], + default_image: str | CachedWebAsset, + default_mask: str | CachedWebAsset, + is_test: bool = False, +): + # Demo parameters + parser = get_model_cli_parser(model_type) + parser = get_on_device_demo_parser( + parser, available_target_runtimes=[TargetRuntime.TFLITE], add_output_dir=True + ) + parser.add_argument( + "--image", + type=str, + default=default_image, + help="test image file path or URL", + ) + parser.add_argument( + "--mask", + type=str, + default=default_mask, + help="test mask file path or URL", + ) + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, model_type.get_model_id()) + + # Load image & model + model = demo_model_from_cli_args(model_type, args) + image = load_image(args.image) + mask = load_image(args.mask) + print("Model Loaded") + + # Run app + app = RepaintMaskApp(model) + out = app.paint_mask_on_image(image, mask)[0] + + if not is_test: + display_or_save_image(image, args.output_dir, "input_image.png", "input image") + display_or_save_image(out, args.output_dir, "output_image.png", "output image") diff --git a/qai_hub_models/models/_shared/sesr/__init__.py b/qai_hub_models/models/_shared/sesr/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/sesr/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/sesr/common.py b/qai_hub_models/models/_shared/sesr/common.py new file mode 100644 index 00000000..eebef83c --- /dev/null +++ b/qai_hub_models/models/_shared/sesr/common.py @@ -0,0 +1,32 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.utils.asset_loaders import SourceAsRoot + +# SESR original repo is here: https://github.com/ARM-software/sesr +# But this is all written in TF and Keras. Torch version is in AIMET +SESR_SOURCE_REPOSITORY = "https://github.com/quic/aimet-model-zoo" +SESR_SOURCE_REPO_COMMIT = "d09d2b0404d10f71a7640a87e9d5e5257b028802" + + +def _load_sesr_source_model( + model_id, model_asset_version: int | str, scaling_factor, num_channels, num_lblocks +) -> torch.nn.Module: + # Load SESR model from the source repository using the given weights. + # Returns .utils.super_resolution.models.SESRRelease + with SourceAsRoot( + SESR_SOURCE_REPOSITORY, SESR_SOURCE_REPO_COMMIT, model_id, model_asset_version + ): + + from aimet_zoo_torch.common.super_resolution.models import SESRRelease + + return SESRRelease( + scaling_factor=scaling_factor, + num_channels=num_channels, + num_lblocks=num_lblocks, + ) diff --git a/qai_hub_models/models/_shared/super_resolution/__init__.py b/qai_hub_models/models/_shared/super_resolution/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/super_resolution/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/super_resolution/app.py b/qai_hub_models/models/_shared/super_resolution/app.py new file mode 100644 index 00000000..e4c52f6e --- /dev/null +++ b/qai_hub_models/models/_shared/super_resolution/app.py @@ -0,0 +1,67 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, List + +import torch +from PIL.Image import Image + +from qai_hub_models.utils.image_processing import ( + app_to_net_image_inputs, + torch_tensor_to_PIL_image, +) + +SCALE = 4 + + +class SuperResolutionApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with Super Resolution models. + + The app uses 1 model: + * SuperResolution models + + For a given image input, the app will: + * pre-process the image (convert to range[0, 1]) + * Run inference + * post-process the image + * display the input and output side-by-side + """ + + def __init__(self, model: Callable[[torch.Tensor], torch.Tensor]): + self.model = model + + def predict(self, *args, **kwargs): + # See upscale_image. + return self.upscale_image(*args, **kwargs) + + def upscale_image( + self, + pixel_values_or_image: torch.Tensor | Image | List[Image], + ) -> List[Image]: + """ + Upscale provided images + + Parameters: + pixel_values_or_image + PIL image(s) + or + numpy array (N H W C x uint8) or (H W C x uint8) -- both RGB channel layout + or + pyTorch tensor (N C H W x fp32, value range is [0, 1]), RGB channel layout + + Returns: + images: List[PIL.Image.Image] + A list of upscaled images (one for each input image). + """ + _, NCHW_fp32_torch_frames = app_to_net_image_inputs(pixel_values_or_image) + + # Run prediction + upscaled_images = self.model(NCHW_fp32_torch_frames) + if len(upscaled_images.shape) == 3: + upscaled_images = torch.unsqueeze(upscaled_images, 0) + + return [torch_tensor_to_PIL_image(img) for img in upscaled_images] diff --git a/qai_hub_models/models/_shared/super_resolution/demo.py b/qai_hub_models/models/_shared/super_resolution/demo.py new file mode 100644 index 00000000..e8d545ec --- /dev/null +++ b/qai_hub_models/models/_shared/super_resolution/demo.py @@ -0,0 +1,78 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import sys +from typing import List, Type + +from qai_hub_models.models._shared.super_resolution.app import SuperResolutionApp +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebAsset, load_image +from qai_hub_models.utils.base_model import BaseModel, TargetRuntime +from qai_hub_models.utils.display import display_or_save_image + + +# Run Super Resolution end-to-end on a sample image. +# The demo will display both the input image and the higher resolution output. +def super_resolution_demo( + model_cls: Type[BaseModel], + default_image: str | CachedWebAsset, + is_test: bool = False, + available_target_runtimes: List[TargetRuntime] = list( + TargetRuntime.__members__.values() + ), +): + # Demo parameters + parser = get_model_cli_parser(model_cls) + parser = get_on_device_demo_parser( + parser, + add_output_dir=True, + available_target_runtimes=available_target_runtimes, + ) + parser.add_argument( + "--image", + type=str, + default=default_image, + help="image file path or URL.", + ) + + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, model_cls.get_model_id()) + + # Load image & model + image = load_image(args.image) + input_spec = model_cls.get_input_spec() + + # Make sure the input image is consistent with the model. + # Since we are demonstrating super-resolution, we do not want to do any + # implicit resampling. + img_width, img_height = image.size + input_img_shape = (img_height, img_width) + model_img_shape = input_spec["image"][0][2:4] + if input_img_shape != model_img_shape: + print( + f"Error: The input image is required to be {model_img_shape[1]}x{model_img_shape[0]} for the on-device demo ({img_width}x{img_height} provided)" + ) + sys.exit(1) + + inference_model = demo_model_from_cli_args( + model_cls, + args, + ) + app = SuperResolutionApp(inference_model) + print("Model Loaded") + pred_images = app.upscale_image(image) + if not is_test: + display_or_save_image( + image, args.output_dir, "original_image.png", "original image" + ) + display_or_save_image( + pred_images[0], args.output_dir, "upscaled_image.png", "upscaled image" + ) diff --git a/qai_hub_models/models/_shared/swin/__init__.py b/qai_hub_models/models/_shared/swin/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/swin/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/swin/swin_transformer.py b/qai_hub_models/models/_shared/swin/swin_transformer.py new file mode 100644 index 00000000..58e4366f --- /dev/null +++ b/qai_hub_models/models/_shared/swin/swin_transformer.py @@ -0,0 +1,262 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import math +from typing import List, Optional + +import torch +from torch import Tensor +from torch.nn import functional as F +from torchvision.models.swin_transformer import ShiftedWindowAttention + + +def split_linear_input(x, weight, bias, max_channel): + num_chunks = int(-(-x.size(-1) // max_channel)) # Ceiling division + if num_chunks == 1: + return F.linear(x, weight, bias) + x_chunks = x.chunk(num_chunks, dim=-1) + weight_chunks = weight.chunk(num_chunks, dim=1) + output = sum( + [ + F.linear(x_chunk, weight_chunk) + for x_chunk, weight_chunk in zip(x_chunks, weight_chunks) + ] + ) + if bias is not None: + output += bias + return output + + +def split_linear(x, weight, bias, max_channel=512): + """ + Split linear input and output channels to have no more than `max_channel` + """ + num_chunks = int(-(-weight.size(0) // max_channel)) # Ceiling division + if num_chunks == 1: + return split_linear_input(x, weight, bias, max_channel) + weight_chunks = weight.chunk(num_chunks, dim=0) + bias_chunks = bias.chunk(num_chunks) if bias is not None else [None] * num_chunks + # Apply F.linear separately and concatenate the outputs + output = torch.cat( + [ + split_linear_input(x, weight_chunk, bias_chunk, max_channel) + for weight_chunk, bias_chunk in zip(weight_chunks, bias_chunks) + ], + dim=-1, + ) + return output + + +class ShiftedWindowAttentionInf(torch.nn.Module): + def __init__(self, model: ShiftedWindowAttention): + """ + Optimize for inference. See `shifted_window_attention_inf` for details. + + Note: We do not monkey patch + `torchvision.models.swin_transformer.shifted_window_attention` so that we can + test numerical parity between ShiftedWindowAttentionInf and + ShiftedWindowAttention + """ + super().__init__() + self.model = model + + def forward(self, x: Tensor) -> Tensor: + """ + Args: + x (Tensor): Tensor with layout of [B, H, W, C] + Returns: + Tensor with same layout as input, i.e. [B, H, W, C] + """ + relative_position_bias = self.model.get_relative_position_bias() + return shifted_window_attention_inf( + x, + self.model.qkv.weight, + self.model.proj.weight, + relative_position_bias, + self.model.window_size, + self.model.num_heads, + shift_size=self.model.shift_size, + attention_dropout=self.model.attention_dropout, + dropout=self.model.dropout, + qkv_bias=self.model.qkv.bias, + proj_bias=self.model.proj.bias, + training=self.model.training, + ) + + +# Overrides for SwinTranformer model +# Alternative to https://github.com/pytorch/vision/blob/0d75d9e5516f446c9c0ef93bd4ed9fea13992d06/torchvision/models/swin_transformer.py#L116 +# fixes view from rank-6 to rank-5 for SwinTransformer +def shifted_window_attention_inf( + input: Tensor, + qkv_weight: Tensor, + proj_weight: Tensor, + relative_position_bias: Tensor, + window_size: List[int], + num_heads: int, + shift_size: List[int], + attention_dropout: float = 0.0, + dropout: float = 0.0, + qkv_bias: Optional[Tensor] = None, + proj_bias: Optional[Tensor] = None, + logit_scale: Optional[Tensor] = None, + training: bool = True, +) -> Tensor: + """ + Updated from + https://github.com/pytorch/vision/blob/0d75d9e5516f446c9c0ef93bd4ed9fea13992d06/torchvision/models/swin_transformer.py#L116 + """ + B, H, W, C = input.shape + # pad feature maps to multiples of window size + pad_r = (window_size[1] - W % window_size[1]) % window_size[1] + pad_b = (window_size[0] - H % window_size[0]) % window_size[0] + x = input + if pad_r != 0 or pad_b != 0: + x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b)) + _, pad_H, pad_W, _ = x.shape + + shift_size = shift_size.copy() + # If window size is larger than feature size, there is no need to shift window + if window_size[0] >= pad_H: + shift_size[0] = 0 + if window_size[1] >= pad_W: + shift_size[1] = 0 + + # cyclic shift + if sum(shift_size) > 0: + x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1]), dims=(1, 2)) + + # partition windows + num_windows = (pad_H // window_size[0]) * (pad_W // window_size[1]) + + # Local change begin + x = x.view( + B * pad_H // window_size[0], + window_size[0], + pad_W // window_size[1], + window_size[1] * C, + ) + + x = x.permute(0, 2, 1, 3).reshape( + B * num_windows, window_size[0] * window_size[1], C + ) # B*nW, Ws*Ws, C + # Local change end + + # multi-head attention + if logit_scale is not None and qkv_bias is not None: + qkv_bias = qkv_bias.clone() + length = qkv_bias.numel() // 3 + qkv_bias[length : 2 * length].zero_() + # === Local change begin === + # Split qkv projection + q_weight, k_weight, v_weight = torch.split( + qkv_weight, qkv_weight.shape[0] // 3, dim=0 + ) + q_bias, k_bias, v_bias = torch.split(qkv_bias, qkv_bias.shape[0] // 3, dim=0) + if q_weight.shape[0] > 512: + # Improve GPU residency with smaller fully connected layers + q = split_linear(x, q_weight, q_bias) + k = split_linear(x, k_weight, k_bias) + v = split_linear(x, v_weight, v_bias) + else: + q = F.linear(x, q_weight, q_bias) + k = F.linear(x, k_weight, k_bias) + v = F.linear(x, v_weight, v_bias) + + q = q.reshape(x.size(0), x.size(1), num_heads, C // num_heads).permute(0, 2, 1, 3) + k = k.reshape(x.size(0), x.size(1), num_heads, C // num_heads).permute(0, 2, 1, 3) + v = v.reshape(x.size(0), x.size(1), num_heads, C // num_heads).permute(0, 2, 1, 3) + # === Local change end === + if logit_scale is not None: + # cosine attention + attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1) + logit_scale = torch.clamp(logit_scale, max=math.log(100.0)).exp() + attn = attn * logit_scale + else: + q = q * (C // num_heads) ** -0.5 + attn = q.matmul(k.transpose(-2, -1)) + # add relative position bias + attn = attn + relative_position_bias + + if sum(shift_size) > 0: + # generate attention mask + attn_mask = x.new_zeros((pad_H, pad_W)) + h_slices = ( + (0, -window_size[0]), + (-window_size[0], -shift_size[0]), + (-shift_size[0], None), + ) + w_slices = ( + (0, -window_size[1]), + (-window_size[1], -shift_size[1]), + (-shift_size[1], None), + ) + count = 0 + for h in h_slices: + for w in w_slices: + attn_mask[h[0] : h[1], w[0] : w[1]] = count + count += 1 + attn_mask = attn_mask.view( + pad_H // window_size[0], + window_size[0], + pad_W // window_size[1], + window_size[1], + ) + attn_mask = attn_mask.permute(0, 2, 1, 3).reshape( + num_windows, window_size[0] * window_size[1] + ) + attn_mask = attn_mask.unsqueeze(1) - attn_mask.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( + attn_mask == 0, float(0.0) + ) + # ==== Local change begin === + attn = attn.view( + x.size(0) // num_windows, num_windows, num_heads, x.size(1) * x.size(1) + ) + attn = attn + attn_mask.reshape(num_windows, -1).unsqueeze(0).unsqueeze(2) + # ==== Local change end === + attn = attn.view(-1, num_heads, x.size(1), x.size(1)) + + attn = F.softmax(attn, dim=-1) + attn = F.dropout(attn, p=attention_dropout, training=training) + + x = attn.matmul(v).transpose(1, 2).reshape(x.size(0), x.size(1), C) + x = F.linear(x, proj_weight, proj_bias) + x = F.dropout(x, p=dropout, training=training) + + # reverse windows + # Local change begin + x = x.view( + B * pad_H // window_size[0], + pad_W // window_size[1], + window_size[0], + window_size[1] * C, + ) + x = x.permute(0, 2, 1, 3).reshape(B, pad_H, pad_W, C) + # Local change end + + # reverse cyclic shift + if sum(shift_size) > 0: + x = torch.roll(x, shifts=(shift_size[0], shift_size[1]), dims=(1, 2)) + + # unpad features + x = x[:, :H, :W, :].contiguous() + return x + + +class AutoSplitLinear(torch.nn.Module): + def __init__(self, model: torch.nn.Linear): + super().__init__() + self.linear = model + self.weight = model.weight + self.bias = model.bias + + def forward(self, x: Tensor): + if self.linear.in_features > 512 or self.linear.out_features > 512: + x = split_linear(x, self.linear.weight, self.linear.bias, max_channel=512) + else: + x = self.linear(x) + return x diff --git a/qai_hub_models/models/_shared/video_classifier/__init__.py b/qai_hub_models/models/_shared/video_classifier/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/video_classifier/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/video_classifier/app.py b/qai_hub_models/models/_shared/video_classifier/app.py new file mode 100644 index 00000000..8c92fe2e --- /dev/null +++ b/qai_hub_models/models/_shared/video_classifier/app.py @@ -0,0 +1,179 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from pathlib import Path +from typing import List, Tuple + +import torch +import torchvision.io + +from qai_hub_models.models._shared.video_classifier.model import KineticsClassifier + + +def normalize(video: torch.Tensor): + """Normalize the video frames. + Parameters: + video: Video tensor (Number of frames x HWC) with values between 0-255 + Channel Layout: RGB + + Returns: + video: Video is normalized to have values between 0-1 + and transposed so the shape is Channel x Number of frames x HW. + """ + return video.permute(3, 0, 1, 2).to(torch.float32) / 255 + + +def resize(video: torch.Tensor, size: Tuple[int, int]): + """ + Interpolate the frames of the image to match model's input resolution. + + Parameters: + video: torch.Tensor + + Returns: + video: Resized video is returned. + Selected settings for resize were recommended. + + """ + return torch.nn.functional.interpolate( + video, size=size, scale_factor=None, mode="bilinear", align_corners=False + ) + + +def crop(video: torch.Tensor, output_size: Tuple[int, int]): + """ + Parameters: + video: torch.Tensor + Input video torch.Tensor. + output_size: desired output shape for each frame. + + Returns: + video: torch.Tensor + Center cropped based on the output size + + """ + h, w = video.shape[-2:] + th, tw = output_size + i = int(round((h - th) / 2.0)) + j = int(round((w - tw) / 2.0)) + return video[..., i : (i + th), j : (j + tw)] + + +def normalize_base( + video: torch.Tensor, mean: List[float], std: List[float] +) -> torch.Tensor: + """ + + Parameters: + video: Input video torch.Tensor + mean: Mean to be subtracted per channel of the input. + std: Standard deviation to be divided per each channel. + + Returns: + video: Normalized based on provided mean and scale. + The operaion is done per channle. + + """ + shape = (-1,) + (1,) * (video.dim() - 1) + mean_tensor = torch.as_tensor(mean).reshape(shape) + std_tensor = torch.as_tensor(std).reshape(shape) + return (video - mean_tensor) / std_tensor + + +def read_video_per_second(path: str) -> torch.Tensor: + """ + + Parameters: + path: Path of the input video. + + Returns: + input_video: Reads video from path and converts to torch tensor. + + """ + input_video, _, _ = torchvision.io.read_video(path, pts_unit="sec") + return input_video + + +def preprocess_video_kinetics_400(input_video: torch.Tensor): + """ + Preprocess the input video correctly for video classification inference. + + Parameters: + input_video: Raw input tensor + + Returns: + video: Normalized, resized, cropped and normalized by channel for input model. + This preprocessing is dd + + """ + mean = [0.43216, 0.394666, 0.37645] + std = [0.22803, 0.22145, 0.216989] + input_video = normalize(input_video) + input_video = resize(input_video, (128, 171)) + input_video = crop(input_video, (112, 112)) + input_video = normalize_base(input_video, mean=mean, std=std) + return input_video + + +def get_class_name_kinetics_400() -> List[str]: + """Return the class name.""" + actions = "abseiling,air drumming,answering questions,applauding,applying cream,archery,arm wrestling,arranging flowers,assembling computer,auctioning,baby waking up,baking cookies,balloon blowing,bandaging,barbequing,bartending,beatboxing,bee keeping,belly dancing,bench pressing,bending back,bending metal,biking through snow,blasting sand,blowing glass,blowing leaves,blowing nose,blowing out candles,bobsledding,bookbinding,bouncing on trampoline,bowling,braiding hair,breading or breadcrumbing,breakdancing,brush painting,brushing hair,brushing teeth,building cabinet,building shed,bungee jumping,busking,canoeing or kayaking,capoeira,carrying baby,cartwheeling,carving pumpkin,catching fish,catching or throwing baseball,catching or throwing frisbee,catching or throwing softball,celebrating,changing oil,changing wheel,checking tires,cheerleading,chopping wood,clapping,clay pottery making,clean and jerk,cleaning floor,cleaning gutters,cleaning pool,cleaning shoes,cleaning toilet,cleaning windows,climbing a rope,climbing ladder,climbing tree,contact juggling,cooking chicken,cooking egg,cooking on campfire,cooking sausages,counting money,country line dancing,cracking neck,crawling baby,crossing river,crying,curling hair,cutting nails,cutting pineapple,cutting watermelon,dancing ballet,dancing charleston,dancing gangnam style,dancing macarena,deadlifting,decorating the christmas tree,digging,dining,disc golfing,diving cliff,dodgeball,doing aerobics,doing laundry,doing nails,drawing,dribbling basketball,drinking,drinking beer,drinking shots,driving car,driving tractor,drop kicking,drumming fingers,dunking basketball,dying hair,eating burger,eating cake,eating carrots,eating chips,eating doughnuts,eating hotdog,eating ice cream,eating spaghetti,eating watermelon,egg hunting,exercising arm,exercising with an exercise ball,extinguishing fire,faceplanting,feeding birds,feeding fish,feeding goats,filling eyebrows,finger snapping,fixing hair,flipping pancake,flying kite,folding clothes,folding napkins,folding paper,front raises,frying vegetables,garbage collecting,gargling,getting a haircut,getting a tattoo,giving or receiving award,golf chipping,golf driving,golf putting,grinding meat,grooming dog,grooming horse,gymnastics tumbling,hammer throw,headbanging,headbutting,high jump,high kick,hitting baseball,hockey stop,holding snake,hopscotch,hoverboarding,hugging,hula hooping,hurdling,hurling (sport),ice climbing,ice fishing,ice skating,ironing,javelin throw,jetskiing,jogging,juggling balls,juggling fire,juggling soccer ball,jumping into pool,jumpstyle dancing,kicking field goal,kicking soccer ball,kissing,kitesurfing,knitting,krumping,laughing,laying bricks,long jump,lunge,making a cake,making a sandwich,making bed,making jewelry,making pizza,making snowman,making sushi,making tea,marching,massaging back,massaging feet,massaging legs,massaging person's head,milking cow,mopping floor,motorcycling,moving furniture,mowing lawn,news anchoring,opening bottle,opening present,paragliding,parasailing,parkour,passing American football (in game),passing American football (not in game),peeling apples,peeling potatoes,petting animal (not cat),petting cat,picking fruit,planting trees,plastering,playing accordion,playing badminton,playing bagpipes,playing basketball,playing bass guitar,playing cards,playing cello,playing chess,playing clarinet,playing controller,playing cricket,playing cymbals,playing didgeridoo,playing drums,playing flute,playing guitar,playing harmonica,playing harp,playing ice hockey,playing keyboard,playing kickball,playing monopoly,playing organ,playing paintball,playing piano,playing poker,playing recorder,playing saxophone,playing squash or racquetball,playing tennis,playing trombone,playing trumpet,playing ukulele,playing violin,playing volleyball,playing xylophone,pole vault,presenting weather forecast,pull ups,pumping fist,pumping gas,punching bag,punching person (boxing),push up,pushing car,pushing cart,pushing wheelchair,reading book,reading newspaper,recording music,riding a bike,riding camel,riding elephant,riding mechanical bull,riding mountain bike,riding mule,riding or walking with horse,riding scooter,riding unicycle,ripping paper,robot dancing,rock climbing,rock scissors paper,roller skating,running on treadmill,sailing,salsa dancing,sanding floor,scrambling eggs,scuba diving,setting table,shaking hands,shaking head,sharpening knives,sharpening pencil,shaving head,shaving legs,shearing sheep,shining shoes,shooting basketball,shooting goal (soccer),shot put,shoveling snow,shredding paper,shuffling cards,side kick,sign language interpreting,singing,situp,skateboarding,ski jumping,skiing (not slalom or crosscountry),skiing crosscountry,skiing slalom,skipping rope,skydiving,slacklining,slapping,sled dog racing,smoking,smoking hookah,snatch weight lifting,sneezing,sniffing,snorkeling,snowboarding,snowkiting,snowmobiling,somersaulting,spinning poi,spray painting,spraying,springboard diving,squat,sticking tongue out,stomping grapes,stretching arm,stretching leg,strumming guitar,surfing crowd,surfing water,sweeping floor,swimming backstroke,swimming breast stroke,swimming butterfly stroke,swing dancing,swinging legs,swinging on something,sword fighting,tai chi,taking a shower,tango dancing,tap dancing,tapping guitar,tapping pen,tasting beer,tasting food,testifying,texting,throwing axe,throwing ball,throwing discus,tickling,tobogganing,tossing coin,tossing salad,training dog,trapezing,trimming or shaving beard,trimming trees,triple jump,tying bow tie,tying knot (not on a tie),tying tie,unboxing,unloading truck,using computer,using remote controller (not gaming),using segway,vault,waiting in line,walking the dog,washing dishes,washing feet,washing hair,washing hands,water skiing,water sliding,watering plants,waxing back,waxing chest,waxing eyebrows,waxing legs,weaving basket,welding,whistling,windsurfing,wrapping present,wrestling,writing,yawning,yoga,zumba" + return actions.split(",") + + +def recognize_action_kinetics_400(prediction: torch.Tensor) -> List[str]: + """ + Return the top 5 class names. + Parameters: + prediction: Get the probability for all classes. + + Returns: + classnames: List of class ids from Kinetics-400 dataset is returned. + + """ + # Get top 5 class probabilities + prediction = torch.topk(prediction.flatten(), 5).indices + + actions = get_class_name_kinetics_400() + return [actions[pred] for pred in prediction] + + +class KineticsClassifierApp: + """ + This class consists of light-weight "app code" that is required to + perform end to end inference with an KineticsClassifier. + + For a given image input, the app will: + * Pre-process the video (resize and normalize) + * Run Video Classification + * Return the probability of each class. + """ + + def __init__(self, model: KineticsClassifier): + self.model = model.eval() + + def predict(self, path: str | Path) -> List[str]: + """ + From the provided path of the video, predict probability distribution + over the 400 Kinetics classes and return the class name. + + Parameters: + path: Path to the raw video + + Returns: + prediction: List[str] with top 5 most probable classes for a given video. + """ + + # Reads the video via provided path + input_video = read_video_per_second(str(path)) + + # Preprocess the video + input_video = preprocess_video_kinetics_400(input_video) + + # Inference using mdoel + raw_prediction = self.model(input_video.unsqueeze(0)) + + return recognize_action_kinetics_400(raw_prediction) diff --git a/qai_hub_models/models/_shared/video_classifier/demo.py b/qai_hub_models/models/_shared/video_classifier/demo.py new file mode 100644 index 00000000..99ce64a8 --- /dev/null +++ b/qai_hub_models/models/_shared/video_classifier/demo.py @@ -0,0 +1,43 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import tempfile +from typing import Type + +from qai_hub_models.models._shared.video_classifier.app import KineticsClassifierApp +from qai_hub_models.models._shared.video_classifier.model import KineticsClassifier +from qai_hub_models.utils.args import get_model_cli_parser, model_from_cli_args +from qai_hub_models.utils.asset_loaders import CachedWebAsset, load_path + + +# +# Run KineticsClassifierApp end-to-end on a sample video. +# The demo will display top classification predictions for the video. +# +def kinetics_classifier_demo( + model_type: Type[KineticsClassifier], + default_video: str | CachedWebAsset, + is_test: bool = False, +): + # Demo parameters + parser = get_model_cli_parser(model_type) + + parser.add_argument( + "--video", type=str, default=default_video, help="video file path or URL." + ) + + args = parser.parse_args([] if is_test else None) + + # Load image & model + model = model_from_cli_args(model_type, args) + app = KineticsClassifierApp(model) + print("Model Loaded") + with tempfile.TemporaryDirectory() as tmpdir: + dst_path = load_path(args.video, tmpdir) + predictions = app.predict(path=str(dst_path)) + top5_classes = ", ".join(predictions) + if not is_test: + print(f"Top 5 predictions: {top5_classes}") diff --git a/qai_hub_models/models/_shared/video_classifier/model.py b/qai_hub_models/models/_shared/video_classifier/model.py new file mode 100644 index 00000000..6b75ea3a --- /dev/null +++ b/qai_hub_models/models/_shared/video_classifier/model.py @@ -0,0 +1,59 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +DEFAULT_VIDEO_DIM = 112 + + +class KineticsClassifier(BaseModel): + """ + Base class for all Kinetics Classifier models within QAI Hub Models. + """ + + def __init__(self, net: torch.nn.Module): + """ + Basic initializer which takes in a pretrained classifier network. + Subclasses can choose to implement their own __init__ and forward methods. + """ + super().__init__() + self.net = net + + def forward(self, video: torch.Tensor): + """ + Predict class probabilities for an input `video`. + + Parameters: + video: A [C, Number of frames, H, W] video. + Assumes video has been resized and normalized as implemented + in the preprocess_image function in video_preprocessing.py file. + Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + A [1, 400] where each value is the log-likelihood of + the video belonging to the corresponding Kinetics class. + """ + return self.net(video) + + def get_input_spec( + self, + num_frames: int = 16, + ) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm AI Hub. + """ + return { + "video": ( + (1, 3, num_frames, DEFAULT_VIDEO_DIM, DEFAULT_VIDEO_DIM), + "float32", + ) + } diff --git a/qai_hub_models/models/_shared/yolo/__init__.py b/qai_hub_models/models/_shared/yolo/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/models/_shared/yolo/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/models/_shared/yolo/app.py b/qai_hub_models/models/_shared/yolo/app.py new file mode 100644 index 00000000..668ebd0d --- /dev/null +++ b/qai_hub_models/models/_shared/yolo/app.py @@ -0,0 +1,150 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, List, Tuple + +import numpy as np +import torch +from PIL.Image import Image + +from qai_hub_models.utils.bounding_box_processing import batched_nms +from qai_hub_models.utils.draw import draw_box_from_xyxy +from qai_hub_models.utils.image_processing import app_to_net_image_inputs + + +class YoloObjectDetectionApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference + with Yolo object detection models. + + The app works with following models: + * YoloV7 + * YoloV8Detection + + For a given image input, the app will: + * pre-process the image (convert to range[0, 1]) + * Run Yolo inference + * if requested, post-process YoloV7 output using non maximum suppression + * if requested, draw the predicted bounding boxes on the input image + """ + + def __init__( + self, + model: Callable[ + [torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor] + ], + nms_score_threshold: float = 0.45, + nms_iou_threshold: float = 0.7, + ): + """ + Initialize a YoloObjectDetectionApp application. + + Parameters: + model: torch.Tensor + Yolo object detection model. + + Inputs: + Tensor of shape (N H W C x float32) with range [0, 1] and BGR channel layout. + + Outputs: + boxes: Tensor of shape [batch, num preds, 4] where 4 == (x1, y1, x2, y2). + The output are in the range of the input image's dimensions (NOT [0-1]) + + scores: Tensor of shape [batch, num_preds, # of classes (typically 80)] + + class_idx: Tensor of shape [num_preds] where the values are the indices + of the most probable class of the prediction. + + nms_score_threshold + Score threshold for non maximum suppression. + + nms_iou_threshold + Intersection over Union threshold for non maximum suppression. + """ + self.model = model + self.nms_score_threshold = nms_score_threshold + self.nms_iou_threshold = nms_iou_threshold + + def check_image_size(self, pixel_values: torch.Tensor) -> None: + """ + Verify image size is valid model input. + """ + raise NotImplementedError + + def predict(self, *args, **kwargs): + # See predict_boxes_from_image. + return self.predict_boxes_from_image(*args, **kwargs) + + def predict_boxes_from_image( + self, + pixel_values_or_image: torch.Tensor | np.ndarray | Image | List[Image], + raw_output: bool = False, + ) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[torch.Tensor]] | List[ + np.ndarray + ]: + """ + From the provided image or tensor, predict the bounding boxes & classes of objects detected within. + + Parameters: + pixel_values_or_image: torch.Tensor + PIL image + or + numpy array (N H W C x uint8) or (H W C x uint8) -- both BGR channel layout + or + pyTorch tensor (N C H W x fp32, value range is [0, 1]), BGR channel layout + + raw_output: bool + See "returns" doc section for details. + + Returns: + If raw_output is false or pixel_values_or_image is not a PIL image, returns: + images: List[np.ndarray] + A list of predicted BGR, [H, W, C] images (one list element per batch). Each image will have bounding boxes drawn. + + Otherwise, returns: + boxes: List[torch.Tensor] + Bounding box locations per batch. List element shape is [num preds, 4] where 4 == (x1, y1, x2, y2) + scores: List[torch.Tensor] + class scores per batch multiplied by confidence: List element shape is [num_preds, # of classes (typically 80)] + class_idx: List[torch.tensor] + Shape is [num_preds] where the values are the indices of the most probable class of the prediction. + """ + + # Input Prep + NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs( + pixel_values_or_image + ) + self.check_image_size(NCHW_fp32_torch_frames) + + # Run prediction + pred_boxes, pred_scores, pred_class_idx = self.model(NCHW_fp32_torch_frames) + + # Non Maximum Suppression on each batch + pred_boxes, pred_scores, pred_class_idx = batched_nms( + self.nms_iou_threshold, + self.nms_score_threshold, + pred_boxes, + pred_scores, + pred_class_idx, + ) + + # Return raw output if requested + if raw_output or isinstance(pixel_values_or_image, torch.Tensor): + return (pred_boxes, pred_scores, pred_class_idx) + + # Add boxes to each batch + for batch_idx in range(len(pred_boxes)): + pred_boxes_batch = pred_boxes[batch_idx] + for box in pred_boxes_batch: + draw_box_from_xyxy( + NHWC_int_numpy_frames[batch_idx], + box[0:2].int(), + box[2:4].int(), + color=(0, 255, 0), + size=2, + ) + + return NHWC_int_numpy_frames diff --git a/qai_hub_models/models/_shared/yolo/demo.py b/qai_hub_models/models/_shared/yolo/demo.py new file mode 100644 index 00000000..f9662848 --- /dev/null +++ b/qai_hub_models/models/_shared/yolo/demo.py @@ -0,0 +1,62 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, Type + +from PIL import Image + +from qai_hub_models.models._shared.yolo.app import YoloObjectDetectionApp +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebAsset, load_image +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.display import display_or_save_image + + +# Run Yolo end-to-end on a sample image. +# The demo will display a image with the predicted bounding boxes. +def yolo_detection_demo( + model_type: Type[BaseModel], + app_type: Callable[..., YoloObjectDetectionApp], + default_image: str | CachedWebAsset, + stride_multiple: int | None = None, + is_test: bool = False, +): + # Demo parameters + parser = get_model_cli_parser(model_type) + parser = get_on_device_demo_parser(parser, add_output_dir=True) + image_help = "image file path or URL." + if stride_multiple: + image_help = f"{image_help} Image spatial dimensions (x and y) must be multiples of {stride_multiple}." + parser.add_argument("--image", type=str, default=default_image, help=image_help) + parser.add_argument( + "--score-threshold", + type=float, + default=0.45, + help="Score threshold for NonMaximumSuppression", + ) + parser.add_argument( + "--iou-threshold", + type=float, + default=0.7, + help="Intersection over Union (IoU) threshold for NonMaximumSuppression", + ) + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, model_type.get_model_id()) + + model = demo_model_from_cli_args(model_type, args) + + app = app_type(model, args.score_threshold, args.iou_threshold) + print("Model Loaded") + image = load_image(args.image) + pred_images = app.predict_boxes_from_image(image) + out = Image.fromarray(pred_images[0]) + if not is_test: + display_or_save_image(out, args.output_dir, "yolo_demo_output.png") diff --git a/qai_hub_models/models/_shared/yolo/utils.py b/qai_hub_models/models/_shared/yolo/utils.py new file mode 100644 index 00000000..217d1d7a --- /dev/null +++ b/qai_hub_models/models/_shared/yolo/utils.py @@ -0,0 +1,111 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.base_model import InputsType +from qai_hub_models.utils.image_processing import app_to_net_image_inputs + + +def transform_box_layout_xywh2xyxy(boxes: torch.Tensor) -> torch.Tensor: + """ + Convert boxes with (xywh) layout to (xyxy) + + Parameters: + boxes (torch.Tensor): Input boxes with layout (xywh) + + Returns: + torch.Tensor: Output box with layout (xyxy) + i.e. [top_left_x | top_left_y | bot_right_x | bot_right_y] + """ + # Convert to (x1, y1, x2, y2) + # TODO(#8595): Splitting ops into smaller chunks makes them NPU resident + cx = torch.split(boxes[..., 0], 5000, dim=-1) + cy = torch.split(boxes[..., 1], 5000, dim=-1) + w_2 = torch.split(boxes[..., 2] / 2, 5000, dim=-1) + h_2 = torch.split(boxes[..., 3] / 2, 5000, dim=-1) + boxes_splits = [] + for i in range(len(cx)): + top_left_x = cx[i] - w_2[i] + top_left_y = cy[i] - h_2[i] + bot_right_x = cx[i] + w_2[i] + bot_right_y = cy[i] + h_2[i] + boxes = torch.stack((top_left_x, top_left_y, bot_right_x, bot_right_y), -1) + boxes_splits.append(boxes) + return torch.cat(boxes_splits, dim=-2) + + +def detect_postprocess(detector_output: torch.Tensor): + """ + Post processing to break Yolo(v6,v7) detector output into multiple, consumable tensors (eg. for NMS). + such as bounding boxes, classes, and confidence. + + Parameters: + detector_output: torch.Tensor + The output of Yolo Detection model + Shape is [batch, num_preds, k] + where, k = # of classes + 5 + k is structured as follows [boxes (4) : conf (1) : # of classes] + and boxes are co-ordinates [x_center, y_center, w, h] + + Returns: + boxes: torch.Tensor + Bounding box locations. Shape is [batch, num preds, 4] where 4 == (x1, y1, x2, y2) + scores: torch.Tensor + class scores multiplied by confidence: Shape is [batch, num_preds] + class_idx: torch.tensor + Shape is [batch, num_preds, 1] where the last dim is the index of the most probable class of the prediction. + """ + # Break output into parts + boxes = detector_output[:, :, :4] + conf = detector_output[:, :, 4:5] + scores = detector_output[:, :, 5:] + + # Convert boxes to (x1, y1, x2, y2) + boxes = transform_box_layout_xywh2xyxy(boxes) + + # Combine confidence and scores. + scores *= conf + + # Get class ID of most likely score. + scores, class_idx = get_most_likely_score(scores) + + return boxes, scores, class_idx + + +def get_most_likely_score(scores: torch.Tensor): + """ + Returns most likely score and class id + + Args: + scores (torch.tensor): final score after post-processing predictions + + Returns: + scores: torch.Tensor + class scores reduced to keep max score per prediction + Shape is [batch, num_preds] + class_idx: torch.tensor + Shape is [batch, num_preds] where the last dim is the index of the most probable class of the prediction. + """ + # TODO(#8595): QNN crashes when running max on a large tensor + # Split into chunks of size 5k to keep the model NPU resident + score_splits = torch.split(scores, 5000, dim=-2) + max_scores = [] + max_indices = [] + for split in score_splits: + scores, class_idx = torch.max(split, -1, keepdim=False) + max_scores.append(scores) + max_indices.append(class_idx.float()) + return torch.cat(max_scores, dim=-1), torch.cat(max_indices, dim=-1) + + +def yolo_sample_inputs() -> InputsType: + image_address = CachedWebModelAsset.from_asset_store( + "yolov7", 1, "yolov7_demo_640.jpg" + ) + image = load_image(image_address) + return {"image": [app_to_net_image_inputs(image)[1].numpy()]} diff --git a/qai_hub_models/models/baichuan_7b_quantized/README.md b/qai_hub_models/models/baichuan_7b_quantized/README.md new file mode 100644 index 00000000..ba681122 --- /dev/null +++ b/qai_hub_models/models/baichuan_7b_quantized/README.md @@ -0,0 +1,27 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Baichuan-7B: Large language model achieving state-of-the-art performance on Chinese and English language benchmarks](https://aihub.qualcomm.com/models/baichuan_7b_quantized) + +Baichuan-7B is a family of LLMs. It achieves the state-of-the-art performance of its size on standard Chinese and English authoritative benchmarks (C-EVAL/MMLU). 4-bit weights and 16-bit activations making it suitable for on-device The model is quantized to deployment. For Prompt and output length specified below, the time to first token is Llama-PromptProcessor-Quantized's latency and average time per addition token is Llama-TokenGenerator-KVCache-Quantized's latency. + +This is based on the implementation of Baichuan-7B found +[here](https://github.com/baichuan-inc/Baichuan-7B/). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/baichuan_7b_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Baichuan-7B can be found + [here](https://github.com/baichuan-inc/Baichuan-7B/blob/main/LICENSE). + + +## References +* [Baichuan 2: Open Large-scale Language Models](https://arxiv.org/abs/2309.10305) +* [Source Model Implementation](https://github.com/baichuan-inc/Baichuan-7B/) diff --git a/qai_hub_models/models/baichuan_7b_quantized/info.yaml b/qai_hub_models/models/baichuan_7b_quantized/info.yaml new file mode 100644 index 00000000..4fb26cc4 --- /dev/null +++ b/qai_hub_models/models/baichuan_7b_quantized/info.yaml @@ -0,0 +1,45 @@ +name: Baichuan-7B +id: baichuan_7b_quantized +status: public +headline: Large language model achieving state-of-the-art performance on Chinese and English language benchmarks. +domain: Generative AI +description: Baichuan-7B is a family of LLMs. It achieves the state-of-the-art performance of + its size on standard Chinese and English authoritative benchmarks (C-EVAL/MMLU). + 4-bit weights and 16-bit activations making it suitable for on-device + The model is quantized to deployment. For Prompt and output length specified below, + the time to first token is Llama-PromptProcessor-Quantized's latency and average + time per addition token is Llama-TokenGenerator-KVCache-Quantized's latency. +use_case: Text Generation +tags: + - llm + - generative-ai + - quantized +research_paper: https://arxiv.org/abs/2309.10305 +research_paper_title: "Baichuan 2: Open Large-scale Language Models" +license: https://github.com/baichuan-inc/Baichuan-7B/blob/main/LICENSE +source_repo: https://github.com/baichuan-inc/Baichuan-7B/ +technical_details: + Number of parameters: 7B + Model size: 3.9GB + Model-1 (Prompt Processor): Baichuan-PromptProcessor-Quantized + Max context length: 1024 + Prompt processor input: 1024 tokens + Prompt processor output: 1 output token + KVCache for token generator + Model-2 (Token Generator): Baichuan-TokenGenerator-KVCache-Quantized + Token generator input: 1 input token + past KVCache + Token generator output: 1 output token + KVCache for next iteration + Decoding length: 1024 (1 output token + 1023 from KVCache) + Use: Initiate conversation with prompt-processor and then token generator for subsequent iterations. + QNN-SDK: "2.19" +applicable_scenarios: + - Dialogue + - Content Generation + - Customer Support +related_models: [] +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: [] diff --git a/qai_hub_models/models/baichuan_7b_quantized/perf.yaml b/qai_hub_models/models/baichuan_7b_quantized/perf.yaml new file mode 100644 index 00000000..e0e4d38e --- /dev/null +++ b/qai_hub_models/models/baichuan_7b_quantized/perf.yaml @@ -0,0 +1,77 @@ +models: +- name: Baichuan-TokenGenerator-KVCache-Quantized + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S24 Ultra + os: '14' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 3 + timestamp: '2024-02-16T22:23:17.643089Z' + torchscript_onnx_qnn: + inference_time: 108059 + throughput: 9.25 + estimated_peak_memory_range: + min: 561152 + max: 112366992 + layer_info: + layers_on_npu: 33820 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 33820 + precision: uint16 + primary_compute_unit: NPU + job_id: "null" + job_status: Passed +- name: Baichuan-PromptProcessor-Quantized + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S24 Ultra + os: '14' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 3 + timestamp: '2024-02-16T22:23:17.643089Z' + torchscript_onnx_qnn: + inference_time: 2599326 + throughput: 0.38 + estimated_peak_memory_range: + min: 53248 + max: 40255040 + layer_info: + layers_on_npu: 31772 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 31772 + precision: uint16 + primary_compute_unit: NPU + job_id: "null" + job_status: Passed +aggregated: + supported_devices: + - Samsung Galaxy S24 Ultra + supported_oses: + - Android + supported_chipsets: + - Snapdragon® 8 Gen 3 + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S24 Ultra + os: '14' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 3 + timestamp: '2024-02-16T22:23:17.643089Z' + torchscript_onnx_qnn: + inference_time: 108059 + throughput: 9.25 + estimated_peak_memory_range: + min: 561152 + max: 112366992 + precision: uint16 + primary_compute_unit: NPU + job_id: "" + job_status: Passed diff --git a/qai_hub_models/models/controlnet_quantized/README.md b/qai_hub_models/models/controlnet_quantized/README.md new file mode 100644 index 00000000..bde72516 --- /dev/null +++ b/qai_hub_models/models/controlnet_quantized/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [ControlNet: Generating visual arts from text prompt and input guiding image](https://aihub.qualcomm.com/models/controlnet_quantized) + +On-device, high-resolution image synthesis from text and image prompts. ControlNet guides Stable-diffusion with provided input image to generate accurate images from given input prompt. + +This is based on the implementation of ControlNet found +[here](https://github.com/lllyasviel/ControlNet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/controlnet_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[controlnet_quantized]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.controlnet_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.controlnet_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of ControlNet can be found + [here](https://github.com/lllyasviel/ControlNet/blob/main/LICENSE). + + +## References +* [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) +* [Source Model Implementation](https://github.com/lllyasviel/ControlNet) diff --git a/qai_hub_models/models/controlnet_quantized/__init__.py b/qai_hub_models/models/controlnet_quantized/__init__.py new file mode 100644 index 00000000..8c993cd8 --- /dev/null +++ b/qai_hub_models/models/controlnet_quantized/__init__.py @@ -0,0 +1,11 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.controlnet_quantized.app import ( # noqa: F401 + ControlNetApp as app, +) +from qai_hub_models.models.controlnet_quantized.model import MODEL_ID # noqa: F401 +from qai_hub_models.models.controlnet_quantized.model import ( # noqa: F401 + ControlNetQuantized as Model, +) diff --git a/qai_hub_models/models/controlnet_quantized/app.py b/qai_hub_models/models/controlnet_quantized/app.py new file mode 100644 index 00000000..8483b343 --- /dev/null +++ b/qai_hub_models/models/controlnet_quantized/app.py @@ -0,0 +1,253 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from typing import Any, Callable, Tuple + +import cv2 +import numpy as np +import torch +from diffusers.models.embeddings import get_timestep_embedding +from PIL import Image +from torchvision import transforms + +OUT_H, OUT_W = 512, 512 + + +class ControlNetApp: + """ + ControlNetApp represents the application code needed to string + together the various neural networks that make up the ControlNet + algorithm. This code is written in Python and pipeline uses PyTorch + while running neural networks on-device. This is meant to serve as a + reference implementation for this application in other languages and + for other platforms. + + Please run the app via `demo.py`. + + References + ---------- + * https://arxiv.org/abs/2302.05543 + * https://github.com/lllyasviel/ControlNet + """ + + def __init__( + self, + text_encoder: Callable[..., Tuple[torch.Tensor, ...]], + vae_decoder: Callable[..., Tuple[torch.Tensor, ...]], + unet: Callable[..., Tuple[torch.Tensor, ...]], + controlnet: Callable[..., Tuple[torch.Tensor, ...]], + tokenizer: Any, + scheduler: Any, + time_embedding: Any, + ): + """ + Initializes ControlNetApp with required neural networks for end-to-end pipeline. + + Parameters + ---------- + text_encoder: + Encoder input text + vae_decoder: + Decoder to decode latent space into output image + unet: + Denoises image in latent space + controlnet: + Conditions denoise w.r.t. input image + tokenizer: + Tokenizer for input text. + Output of Tokenizer is fed to text_encoder. + One can experiments with different tokenizers available based on Clip-ViT. + scheduler: + Solver for diffusion steps. + Updates latent space during each iteration. + time_embeddings: + Projects time-step into embedding used during denoising in latent space. + """ + + self.text_encoder = text_encoder + self.vae_decoder = vae_decoder + self.unet = unet + self.controlnet = controlnet + self.tokenizer = tokenizer + self.scheduler = scheduler + self.time_embedding = time_embedding + + def get_time_embedding(self, timestep): + timestep = torch.tensor([timestep]) + t_emb = get_timestep_embedding(timestep, 320, True, 0) + emb = self.time_embedding(t_emb) + + return emb + + def _make_canny_image(self, input_image: Image): + image = np.asarray(input_image) + + # Get edges for input with Canny Edge Detection + low_threshold = 100 + high_threshold = 200 + + image = cv2.Canny(image, low_threshold, high_threshold) + image = image[:, :, None] + image = np.concatenate([image, image, image], axis=2) + + # Make image channel-first and scale + image = np.transpose(image, (2, 0, 1)) + image = image.astype(np.float32) / 255.0 + torch_image = torch.Tensor(image).unsqueeze(0) + + # Resize input image to supported size + return transforms.Resize(size=(OUT_H, OUT_W))(torch_image) + + def _encode_text_prompt(self, prompt: str) -> torch.Tensor: + """ + Takes a text prompt and returns a tensor with its text embedding. + + Parameters + ---------- + prompt: + The text prompt to encode. + """ + # Tokenize input prompt + text_input = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ) + + max_length = text_input.input_ids.shape[-1] + uncond_input = self.tokenizer( + [""], + padding="max_length", + max_length=max_length, + return_tensors="pt", + ) + + # Embed using the text encoder neural network + # Encode input and empty prompt in one go + print(f"\nExtracting embeddings (inference on TextEncoder)\n{'-' * 50}") + embeddings = self.text_encoder( + [ + text_input.input_ids.type(torch.int32), + uncond_input.input_ids.type(torch.int32), + ] + ) + cond_embeddings, uncond_embeddings = torch.split(embeddings, 1, 0) + return cond_embeddings, uncond_embeddings + + def predict(self, *args, **kwargs): + # See generate_image. + return self.generate_image(*args, **kwargs) + + def generate_image( + self, + prompt: str, + input_image: Image, + num_steps: int = 5, + seed: int = 0, + guidance_scale: float = 7.5, + ) -> torch.Tensor: + """ + Generate an image using the PyTorch reference neural networks. This + code can be used as a reference for how to glue together the neural + networks in an application. Note that this code relies on a tokenizer + and scheduler from the HuggingFace's diffusers library, so those would + have to be ported to the application as well. + + Parameters + ---------- + prompt: + The text prompt to generate an image from. + num_steps: + The number of steps to run the diffusion process for. Higher value + may lead to better image quality. + input_image: + Path to input image for conditioning image generation. + seed: + The seed to use for the random number generator. + guidance_scale: + Classifier-free guidance is a method that allows us to control how + strongly the image generation is guided by the prompt. This is done + by always processing two samples at once: an unconditional (using a + text embedding of an empty prompt) and a conditional (using a text + embedding of the provided prompt). Given the noise prediction of + both of these, we linearly interpolate between them based on the + guidance_scale. A guidance scale of 0 is the same as using an empty + prompt. A guidance scale of 1 turns off classifier-free guidance + and is computationally less expensive since it only processes one + sample at a time. Intuitively you may think the rest of guidance + scales are between 0 and 1, but it is common to use a scale greater + than 1 as a method of amplifying the prompt's influence on the + image, pushing it further away from the unconditional sample. + + Returns + ------- + torch.Tensor + The generated image in RGB scaled in [0, 1] with tensor shape (H, + W, 3). The height and the width may depend on the underlying Stable + Diffusion version, but is typically 512x512. + """ + + # Encode text prompt + cond_embeddings, uncond_embeddings = self._encode_text_prompt(prompt) + self.scheduler.set_timesteps(num_steps) + self.scheduler.config.prediction_type = "epsilon" + + # Channel last input + latents_shape = (1, 4, OUT_H // 8, OUT_W // 8) + + generator = torch.manual_seed(seed) + latents = torch.randn(latents_shape, generator=generator) + latents = latents * self.scheduler.init_noise_sigma + + # Helper method to go back and forth from channel-first to channel-last + def _make_channel_last_torch(input_tensor): + return torch.permute(input_tensor, [0, 2, 3, 1]) + + def _make_channel_first_torch(input_tensor): + return torch.permute(torch.Tensor(input_tensor), [0, 3, 1, 2]) + + # Get image with edges for conditioning + canny_image = self._make_canny_image(input_image) + canny_image = _make_channel_last_torch(canny_image) + + for i, t in enumerate(self.scheduler.timesteps): + print(f"\nStep: {i + 1}\n{'-' * 10}") + time_emb = self.get_time_embedding(t) + latent_model_input = self.scheduler.scale_model_input(latents, t) + + latent_model_input = _make_channel_last_torch(latent_model_input) + + # Denoise input + print( + f"\nDenoising image in latent space (inference on ControlNet)\n{'-' * 60}" + ) + controlnet_out = self.controlnet( + [latent_model_input] * 2, + [time_emb] * 2, + [cond_embeddings, uncond_embeddings], + [canny_image] * 2, + ) + controlnet_out_split = [] + for each in controlnet_out: + controlnet_out_split.append(torch.split(each, 1, 0)) + + print(f"\nDenoising image in latent space (inference on UNet)\n{'-' * 50}") + noise_pred = self.unet( + [latent_model_input] * 2, + [time_emb] * 2, + [cond_embeddings, uncond_embeddings], + *controlnet_out_split, + ) + noise_cond, noise_uncond = torch.split(noise_pred, 1, 0) + + noise_pred = noise_uncond + guidance_scale * (noise_cond - noise_uncond) + noise_pred = _make_channel_first_torch(noise_pred) + latents = self.scheduler.step(noise_pred, t, latents).prev_sample + + print(f"\nDecoding generated image (inference on VAEDecoder)\n{'-' * 50}") + # Decode generated image from latent space + latents_vae = _make_channel_last_torch(latents) + image = self.vae_decoder(latents_vae) + return image diff --git a/qai_hub_models/models/controlnet_quantized/demo.py b/qai_hub_models/models/controlnet_quantized/demo.py new file mode 100644 index 00000000..0f22452b --- /dev/null +++ b/qai_hub_models/models/controlnet_quantized/demo.py @@ -0,0 +1,170 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse + +import numpy as np +import qai_hub as hub +from diffusers import DPMSolverMultistepScheduler, UNet2DConditionModel +from PIL import Image +from transformers import CLIPTokenizer + +from qai_hub_models.models.controlnet_quantized.app import ControlNetApp +from qai_hub_models.models.controlnet_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + ClipVITTextEncoder, + ControlNet, + Unet, + VAEDecoder, +) +from qai_hub_models.utils.args import DEFAULT_EXPORT_DEVICE, add_output_dir_arg +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.base_model import BasePrecompiledModel +from qai_hub_models.utils.display import display_or_save_image +from qai_hub_models.utils.inference import HubModel +from qai_hub_models.utils.qai_hub_helpers import can_access_qualcomm_ai_hub + +INPUT_IMAGE = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_images/test_bird_image.png" +).fetch() + +DEFAULT_DEMO_PROMPT = "a white bird on a colorful window" + + +def _get_hub_model(input_model: BasePrecompiledModel, device=DEFAULT_EXPORT_DEVICE): + if not can_access_qualcomm_ai_hub(): + raise RuntimeError( + "ControlNet on-device demo requires access to QAI-Hub.\n" + "Please visit https://aihub.qualcomm.com/ and sign-up." + ) + + # Upload model + uploaded_model = hub.upload_model(input_model.get_target_model_path()) + inputs = list(input_model.get_input_spec().keys()) + return HubModel(uploaded_model, inputs, hub.Device(name=device)) + + +# Run ControlNet end-to-end on a given prompt and input image. +# The demo will output an AI-generated image based on the given inputs. +def main(is_test: bool = False): + parser = argparse.ArgumentParser() + parser.add_argument( + "--prompt", + type=str, + default=DEFAULT_DEMO_PROMPT, + help="Prompt to generate image from.", + ) + parser.add_argument( + "--image", + type=str, + default=INPUT_IMAGE, + help="Input image to extract edges from.", + ) + parser.add_argument( + "--num-steps", + type=int, + default=2, + help="The number of diffusion iteration steps (higher means better quality).", + ) + parser.add_argument( + "--seed", + type=int, + default=0, + help="Random seed.", + ) + add_output_dir_arg(parser) + parser.add_argument( + "--guidance-scale", + type=float, + default=7.5, + help="Strength of guidance (higher means more influence from prompt).", + ) + parser.add_argument( + "--device", + type=str, + default=DEFAULT_EXPORT_DEVICE, + help="Device to run stable-diffusion demo on.", + ) + args = parser.parse_args([] if is_test else None) + + if not is_test: + print(f"\n{'-' * 100}") + print( + f"** Performing image generation on-device({args.device}) with ControlNet - Stable Diffusion **" + ) + print() + print("Prompt:", args.prompt) + print("Image:", args.image) + print("Number of steps:", args.num_steps) + print("Guidance scale:", args.guidance_scale) + print("Seed:", args.seed) + print() + print( + "Note: This reference demo uses significant amounts of memory and may take 5-10 minutes to run per step." + ) + print(f"{'-' * 100}\n") + + print(f"Downloading model assets\n{'-' * 35}") + # Load components + text_encoder = ClipVITTextEncoder.from_precompiled() + unet = Unet.from_precompiled() + vae_decoder = VAEDecoder.from_precompiled() + controlnet = ControlNet.from_precompiled() + + # Create four HubModel instances to prepare for on-device inference. + # This is similar to initializing PyTorch model to call forward method later. + # Instead of forward, we later submit inference_jobs on QAI-Hub for + # on-device evaluation. + print(f"Uploading model assets on QAI-Hub\n{'-' * 35}") + text_encoder = _get_hub_model(text_encoder, args.device) + unet = _get_hub_model(unet, args.device) + vae_decoder = _get_hub_model(vae_decoder, args.device) + controlnet = _get_hub_model(controlnet, args.device) + + # Create tokenizer, scheduler and time_embedding required + # for control-net pipeline. + tokenizer = CLIPTokenizer.from_pretrained( + "stabilityai/stable-diffusion-2-1-base", subfolder="tokenizer", revision="main" + ) + + scheduler = DPMSolverMultistepScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + num_train_timesteps=1000, + ) + + embedding = UNet2DConditionModel.from_pretrained( + "runwayml/stable-diffusion-v1-5", subfolder="unet" + ).time_embedding + + # Load Application + app = ControlNetApp( + text_encoder=text_encoder, + vae_decoder=vae_decoder, + unet=unet, + controlnet=controlnet, + tokenizer=tokenizer, + scheduler=scheduler, + time_embedding=embedding, + ) + + # Generate image + image = app.generate_image( + args.prompt, + load_image(args.image), + num_steps=args.num_steps, + seed=args.seed, + guidance_scale=args.guidance_scale, + ) + + pil_img = Image.fromarray(np.round(image.numpy() * 255).astype(np.uint8)[0]) + + if not is_test: + display_or_save_image(pil_img, args.output_dir) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/controlnet_quantized/export.py b/qai_hub_models/models/controlnet_quantized/export.py new file mode 100644 index 00000000..01cb4b9b --- /dev/null +++ b/qai_hub_models/models/controlnet_quantized/export.py @@ -0,0 +1,172 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import warnings +from pathlib import Path +from typing import List, Mapping, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.controlnet_quantized import Model +from qai_hub_models.utils.args import TargetRuntime, export_parser +from qai_hub_models.utils.printing import print_profile_metrics_from_job +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, +) + +ALL_COMPONENTS = ["text_encoder", "unet", "vae_decoder", "controlnet"] +DEFAULT_COMPONENTS = ["text_encoder", "vae_decoder", "unet", "controlnet"] + + +def export_model( + device: str = "Samsung Galaxy S23", + components: Optional[List[str]] = None, + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + profile_options: str = "", + **additional_model_kwargs, +) -> Mapping[str, Tuple[Optional[hub.ProfileJob], Optional[hub.InferenceJob]]] | List[ + str +]: + """ + This function accomplishes 5 main tasks: + + 1. Initialize model. + 2. Upload model assets to hub. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Summarizes the results from profiling. + + Each of the last three steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + components: List of sub-components of the model that will be exported. + Each component is compiled and profiled separately. + Defaults to ALL_COMPONENTS if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_summary: If set, skips waiting for and summarizing results + from profiling. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_precompiled` + + Returns: + A Mapping from component_name to a 2-tuple of: + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "controlnet_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + component_arg = components + components = components or DEFAULT_COMPONENTS + for component in components: + if component not in ALL_COMPONENTS: + raise ValueError(f"Invalid component {component}.") + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "controlnet_quantized", + "ControlNet", + device, + skip_profiling, + skip_inferencing, + False, + skip_summary, + output_path, + TargetRuntime.QNN, + "", + profile_options, + component_arg, + ) + + # 1. Initialize model + print("Initializing model class") + model = Model.from_precompiled() + components_dict = {} + if "text_encoder" in components: + components_dict["text_encoder"] = model.text_encoder + if "unet" in components: + components_dict["unet"] = model.unet + if "vae_decoder" in components: + components_dict["vae_decoder"] = model.vae_decoder + if "controlnet" in components: + components_dict["controlnet"] = model.controlnet + + # 2. Upload model assets to hub + print("Uploading model assets on hub") + uploaded_models = {} + for component_name in components: + uploaded_models[component_name] = hub.upload_model( + components_dict[component_name].get_target_model_path() + ) + + # 3. Profile the model assets on real devices + profile_jobs = {} + if not skip_profiling: + for component_name in components: + print(f"Profiling model {component_name} on a hosted device.") + profile_jobs[component_name] = hub.submit_profile_job( + model=uploaded_models[component_name], + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_jobs = {} + if not skip_inferencing: + for component_name in components: + print( + f"Running inference for {component_name} on a hosted device with example inputs." + ) + sample_inputs = components_dict[component_name].sample_inputs() + inference_jobs[component_name] = hub.submit_inference_job( + model=uploaded_models[component_name], + inputs=sample_inputs, + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 5. Summarize the results from profiling + if not skip_summary and not skip_profiling: + for component_name in components: + profile_job = profile_jobs[component_name] + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + return { + component_name: ( + profile_jobs.get(component_name, None), + inference_jobs.get(component_name, None), + ) + for component_name in components + } + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser( + model_cls=Model, components=ALL_COMPONENTS, exporting_compiled_model=True + ) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/controlnet_quantized/info.yaml b/qai_hub_models/models/controlnet_quantized/info.yaml new file mode 100644 index 00000000..d2a7df33 --- /dev/null +++ b/qai_hub_models/models/controlnet_quantized/info.yaml @@ -0,0 +1,37 @@ +name: ControlNet +id: controlnet_quantized +status: public +headline: Generating visual arts from text prompt and input guiding image. +domain: Generative AI +description: On-device, high-resolution image synthesis from text and image prompts. + ControlNet guides Stable-diffusion with provided input image to generate accurate + images from given input prompt. +use_case: Image Generation +tags: + - generative-ai + - quantized +research_paper: https://arxiv.org/abs/2302.05543 +research_paper_title: Adding Conditional Control to Text-to-Image Diffusion Models +license: https://github.com/lllyasviel/ControlNet/blob/main/LICENSE +source_repo: https://github.com/lllyasviel/ControlNet +technical_details: + Input: Text prompt and input image as a reference + QNN-SDK: '2.19' + Text Encoder Number of parameters: 340M + UNet Number of parameters: 865M + VAE Decoder Number of parameters: 83M + ControlNet Number of parameters: 361M + Model size: 1.4GB +applicable_scenarios: + - Image Generation + - Image Editing + - Content Creation +related_models: + - stable_diffusion_quantized +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: no +dataset: [] +license_type: apache-2.0 diff --git a/qai_hub_models/models/controlnet_quantized/model.py b/qai_hub_models/models/controlnet_quantized/model.py new file mode 100644 index 00000000..57a882a0 --- /dev/null +++ b/qai_hub_models/models/controlnet_quantized/model.py @@ -0,0 +1,169 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os + +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.base_model import BasePrecompiledModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +QNN_SDK_PREFIX = "QNN219" +TEXT_ENCODER = os.path.join(QNN_SDK_PREFIX, "text_encoder.serialized.bin") +UNET_DIFFUSER = os.path.join(QNN_SDK_PREFIX, "unet.serialized.bin") +VAE_DECODER = os.path.join(QNN_SDK_PREFIX, "vae_decoder.serialized.bin") +CONTROL_NET = os.path.join(QNN_SDK_PREFIX, "controlnet.serialized.bin") + + +class ControlNetQuantized: + """ + ControlNet class consists of + - Text Encoder + - UNet based diffuser + - VAE decoder, and + - ControlNet + + All models are pre-trained, quantized (int8 weight, uint16 activations) + and compiled into serialized binary for Qualcomm Snapdragon Gen2+. + """ + + def __init__(self, text_encoder, unet, vae_decoder, controlnet) -> None: + self.text_encoder = text_encoder + self.unet = unet + self.vae_decoder = vae_decoder + self.controlnet = controlnet + + @classmethod + def from_precompiled(cls) -> "ControlNetQuantized": + return ControlNetQuantized( + text_encoder=ClipVITTextEncoder.from_precompiled(), + unet=Unet.from_precompiled(), + vae_decoder=VAEDecoder.from_precompiled(), + controlnet=ControlNet.from_precompiled(), + ) + + +class ClipVITTextEncoder(BasePrecompiledModel): + """ + CLIP-ViT based Text Encoder. + + Pre-trained, quantized (int8 weight, uint16 activations) + and compiled into serialized binary for Qualcomm Snapdragon Gen2+. + """ + + def __init__(self, target_model_path) -> None: + self.target_model_path = target_model_path + + @classmethod + def from_precompiled(cls) -> "ClipVITTextEncoder": + text_encoder_path = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, TEXT_ENCODER + ).fetch() + return ClipVITTextEncoder(text_encoder_path) + + def get_target_model_path(self) -> str: + return self.target_model_path + + def get_input_spec(self) -> InputSpec: + return {"input_1": ((1, 77), "int32")} + + +class Unet(BasePrecompiledModel): + """ + UNet model to denoise image in latent space. + + Pre-trained, quantized (int8 weight, uint16 activations) + and compiled into serialized binary for Qualcomm Snapdragon Gen2+. + """ + + def __init__(self, target_model_path) -> None: + self.target_model_path = target_model_path + + @classmethod + def from_precompiled(cls) -> "Unet": + model_path = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, UNET_DIFFUSER + ).fetch() + return Unet(model_path) + + def get_target_model_path(self) -> str: + return self.target_model_path + + def get_input_spec(self) -> InputSpec: + return { + "input_1": ((1, 64, 64, 4), "float32"), + "input_2": ((1, 1280), "float32"), + "input_3": ((1, 77, 768), "float32"), + "controlnet_downblock1": ((1, 64, 64, 320), "float32"), + "controlnet_downblock2": ((1, 64, 64, 320), "float32"), + "controlnet_downblock3": ((1, 64, 64, 320), "float32"), + "controlnet_downblock4": ((1, 32, 32, 320), "float32"), + "controlnet_downblock5": ((1, 32, 32, 640), "float32"), + "controlnet_downblock6": ((1, 32, 32, 640), "float32"), + "controlnet_downblock7": ((1, 16, 16, 640), "float32"), + "controlnet_downblock8": ((1, 16, 16, 1280), "float32"), + "controlnet_downblock9": ((1, 16, 16, 1280), "float32"), + "controlnet_downblock10": ((1, 8, 8, 1280), "float32"), + "controlnet_downblock11": ((1, 8, 8, 1280), "float32"), + "controlnet_downblock12": ((1, 8, 8, 1280), "float32"), + "controlnet_midblock": ((1, 8, 8, 1280), "float32"), + } + + +class VAEDecoder(BasePrecompiledModel): + """ + Decodes image from latent into output generated image. + + Pre-trained, quantized (int8 weight, uint16 activations) + and compiled into serialized binary for Qualcomm Snapdragon Gen2+. + """ + + def __init__(self, target_model_path) -> None: + self.target_model_path = target_model_path + + @classmethod + def from_precompiled(cls) -> "VAEDecoder": + model_path = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, VAE_DECODER + ).fetch() + return VAEDecoder(model_path) + + def get_target_model_path(self) -> str: + return self.target_model_path + + def get_input_spec(self) -> InputSpec: + return {"input_1": ((1, 64, 64, 4), "float32")} + + +class ControlNet(BasePrecompiledModel): + """ + Decodes image from latent into output generated image. + + Pre-trained, quantized (int8 weight, uint16 activations) + and compiled into serialized binary for Qualcomm Snapdragon Gen2+. + """ + + def __init__(self, target_model_path) -> None: + self.target_model_path = target_model_path + + @classmethod + def from_precompiled(cls) -> "ControlNet": + model_path = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, CONTROL_NET + ).fetch() + return ControlNet(model_path) + + def get_target_model_path(self) -> str: + return self.target_model_path + + def get_input_spec(self) -> InputSpec: + return { + "input_1": ((1, 64, 64, 4), "float32"), + "input_2": ((1, 1280), "float32"), + "input_3": ((1, 77, 768), "float32"), + "input_4": ((1, 512, 512, 3), "float32"), + } diff --git a/qai_hub_models/models/controlnet_quantized/perf.yaml b/qai_hub_models/models/controlnet_quantized/perf.yaml new file mode 100644 index 00000000..1b8fdd4d --- /dev/null +++ b/qai_hub_models/models/controlnet_quantized/perf.yaml @@ -0,0 +1,127 @@ +models: +- name: Text-Encoder-Quantized + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-14T05:29:28.928297Z' + torchscript_onnx_qnn: + inference_time: 11369 + throughput: 87.95 + estimated_peak_memory_range: + min: 57344 + max: 34869152 + layer_info: + layers_on_npu: 570 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 570 + precision: uint16 + primary_compute_unit: NPU + job_id: jz5w40nzg + job_status: Passed +- name: VAE-Decoder-Quantized + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-14T05:29:28.928297Z' + torchscript_onnx_qnn: + inference_time: 386746 + throughput: 2.58 + estimated_peak_memory_range: + min: 122880 + max: 4489392 + layer_info: + layers_on_npu: 409 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 409 + precision: uint16 + primary_compute_unit: NPU + job_id: jnp16kxk5 + job_status: Passed +- name: UNet-Quantized + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-14T05:29:28.928297Z' + torchscript_onnx_qnn: + inference_time: 259981 + throughput: 3.84 + estimated_peak_memory_range: + min: 13058048 + max: 15044232 + layer_info: + layers_on_npu: 5434 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 5434 + precision: uint16 + primary_compute_unit: NPU + job_id: jmg9d7eq5 + job_status: Passed +- name: ControlNet-Quantized + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-14T05:29:28.928297Z' + torchscript_onnx_qnn: + inference_time: 103748 + throughput: 9.63 + estimated_peak_memory_range: + min: 200704 + max: 23278088 + layer_info: + layers_on_npu: 2406 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 2406 + precision: uint16 + primary_compute_unit: NPU + job_id: jw56w9jng + job_status: Passed +aggregated: + supported_devices: + - Samsung Galaxy S23 Ultra + supported_oses: + - Android + supported_chipsets: + - Snapdragon® 8 Gen 2 + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-14T05:29:28.928297Z' + torchscript_onnx_qnn: + inference_time: 761844 + throughput: 1.31 + estimated_peak_memory_range: + min: 13058048 + max: 34869152 + precision: int16 + primary_compute_unit: NPU + job_id: "" + job_status: Passed diff --git a/qai_hub_models/models/controlnet_quantized/requirements.txt b/qai_hub_models/models/controlnet_quantized/requirements.txt new file mode 100644 index 00000000..8d0cd0c6 --- /dev/null +++ b/qai_hub_models/models/controlnet_quantized/requirements.txt @@ -0,0 +1,3 @@ +transformers==4.31.0 +diffusers[torch]==0.21.4 +opencv-python==4.8.1.78 diff --git a/qai_hub_models/models/controlnet_quantized/test.py b/qai_hub_models/models/controlnet_quantized/test.py new file mode 100644 index 00000000..f192cd28 --- /dev/null +++ b/qai_hub_models/models/controlnet_quantized/test.py @@ -0,0 +1,39 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import tempfile + +import pytest + +from qai_hub_models.models.controlnet_quantized.demo import main as demo_main +from qai_hub_models.models.controlnet_quantized.export import export_model + + +@pytest.mark.skip("#105 move slow_cloud and slow tests to nightly.") +@pytest.mark.slow_cloud +def test_export(): + with tempfile.TemporaryDirectory() as tmpdir: + exported_jobs = export_model( + # Testing text_encoder as it's smallest model in + # ControlNet pipeline + components=["text_encoder"], + skip_inferencing=True, + skip_downloading=True, + skip_summary=True, + output_dir=tmpdir, + ) + + # NOTE: Not waiting for job to finish + # as it will slow CI down. + # Rather, we should create waiting test and move to nightly. + for jobs in exported_jobs.values(): + profile_job, inference_job = jobs[0], jobs[1] + assert profile_job is not None + assert inference_job is None + + +@pytest.mark.skip("#105 move slow_cloud and slow tests to nightly.") +@pytest.mark.slow_cloud +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/convnext_tiny/README.md b/qai_hub_models/models/convnext_tiny/README.md new file mode 100644 index 00000000..7f489f79 --- /dev/null +++ b/qai_hub_models/models/convnext_tiny/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [ConvNext-Tiny: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/convnext_tiny) + +ConvNextTiny is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of ConvNext-Tiny found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/convnext_tiny). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.convnext_tiny.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.convnext_tiny.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of ConvNext-Tiny can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py) diff --git a/qai_hub_models/models/convnext_tiny/__init__.py b/qai_hub_models/models/convnext_tiny/__init__.py new file mode 100644 index 00000000..5f7510aa --- /dev/null +++ b/qai_hub_models/models/convnext_tiny/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import ConvNextTiny as Model # noqa: F401 diff --git a/qai_hub_models/models/convnext_tiny/demo.py b/qai_hub_models/models/convnext_tiny/demo.py new file mode 100644 index 00000000..892edb19 --- /dev/null +++ b/qai_hub_models/models/convnext_tiny/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.convnext_tiny.model import ConvNextTiny + + +def main(is_test: bool = False): + imagenet_demo(ConvNextTiny, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/convnext_tiny/export.py b/qai_hub_models/models/convnext_tiny/export.py new file mode 100644 index 00000000..d06aadff --- /dev/null +++ b/qai_hub_models/models/convnext_tiny/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.convnext_tiny import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "convnext_tiny" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "convnext_tiny", + "ConvNext-Tiny", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/convnext_tiny/info.yaml b/qai_hub_models/models/convnext_tiny/info.yaml new file mode 100644 index 00000000..70292699 --- /dev/null +++ b/qai_hub_models/models/convnext_tiny/info.yaml @@ -0,0 +1,38 @@ +name: ConvNext-Tiny +# id must match with the model dir name in qai_hub_models +id: convnext_tiny +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: ConvNextTiny is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: [] +research_paper: https://arxiv.org/abs/2201.03545 +research_paper_title: A ConvNet for the 2020s +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 28.6M + Model size: 109 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +form_factors: + - Phone + - Tablet + - IoT +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/convnext_tiny/model.py b/qai_hub_models/models/convnext_tiny/model.py new file mode 100644 index 00000000..8b838392 --- /dev/null +++ b/qai_hub_models/models/convnext_tiny/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class ConvNextTiny(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.convnext_tiny(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/convnext_tiny/perf.yaml b/qai_hub_models/models/convnext_tiny/perf.yaml new file mode 100644 index 00000000..e76fd904 --- /dev/null +++ b/qai_hub_models/models/convnext_tiny/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: ConvNext-Tiny + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 11532.0 + throughput: 86.71522719389525 + estimated_peak_memory_range: + min: 339968 + max: 2817216 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 380 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 380 + job_id: jmg9zykqp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:23:45.273161Z' diff --git a/qai_hub_models/models/convnext_tiny/test.py b/qai_hub_models/models/convnext_tiny/test.py new file mode 100644 index 00000000..b45cb350 --- /dev/null +++ b/qai_hub_models/models/convnext_tiny/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.convnext_tiny.demo import main as demo_main +from qai_hub_models.models.convnext_tiny.model import MODEL_ID, ConvNextTiny + + +def test_task(): + run_imagenet_classifier_test(ConvNextTiny.from_pretrained(), MODEL_ID) + + +def test_trace(): + run_imagenet_classifier_trace_test(ConvNextTiny.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/ddrnet23_slim/README.md b/qai_hub_models/models/ddrnet23_slim/README.md new file mode 100644 index 00000000..96da4502 --- /dev/null +++ b/qai_hub_models/models/ddrnet23_slim/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [DDRNet23-Slim: Segment images or video by class in real-time on device](https://aihub.qualcomm.com/models/ddrnet23_slim) + +DDRNet23Slim is a machine learning model that segments an image into semantic classes, specifically designed for road-based scenes. It is designed for the application of self-driving cars. + +This is based on the implementation of DDRNet23-Slim found +[here](https://github.com/chenjun2hao/DDRNet.pytorch). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/ddrnet23_slim). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.ddrnet23_slim.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.ddrnet23_slim.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of DDRNet23-Slim can be found + [here](https://github.com/chenjun2hao/DDRNet.pytorch/blob/main/LICENSE). + + +## References +* [Deep Dual-resolution Networks for Real-time and Accurate Semantic Segmentation of Road Scenes](https://arxiv.org/abs/2101.06085) +* [Source Model Implementation](https://github.com/chenjun2hao/DDRNet.pytorch) diff --git a/qai_hub_models/models/ddrnet23_slim/__init__.py b/qai_hub_models/models/ddrnet23_slim/__init__.py new file mode 100644 index 00000000..5760d739 --- /dev/null +++ b/qai_hub_models/models/ddrnet23_slim/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import DDRNetApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import DDRNet as Model # noqa: F401 diff --git a/qai_hub_models/models/ddrnet23_slim/app.py b/qai_hub_models/models/ddrnet23_slim/app.py new file mode 100644 index 00000000..50ecac58 --- /dev/null +++ b/qai_hub_models/models/ddrnet23_slim/app.py @@ -0,0 +1,111 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, List + +import numpy as np +import torch +import torch.nn.functional as F +from PIL import Image + +from qai_hub_models.models.ddrnet23_slim.model import NUM_CLASSES +from qai_hub_models.utils.draw import create_color_map +from qai_hub_models.utils.image_processing import ( + app_to_net_image_inputs, + normalize_image_transform, +) + + +class DDRNetApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with DDRNet. + + The app uses 1 model: + * DDRNet + + For a given image input, the app will: + * pre-process the image (convert to range[0, 1]) + * Run DDRNet inference + * Convert the output segmentation mask into a visual representation + * Overlay the segmentation mask onto the image and return it + """ + + def __init__(self, model: Callable[[torch.Tensor], torch.Tensor]): + self.model = model + + def predict(self, *args, **kwargs): + # See segment_image. + return self.segment_image(*args, **kwargs) + + def segment_image( + self, + pixel_values_or_image: torch.Tensor + | np.ndarray + | Image.Image + | List[Image.Image], + raw_output: bool = False, + ) -> List[Image.Image] | np.ndarray: + """ + Return the input image with the segmentation mask overlayed on it. + + Parameters: + pixel_values_or_image + PIL image(s) + or + numpy array (N H W C x uint8) or (H W C x uint8) -- both RGB channel layout + or + pyTorch tensor (N C H W x fp32, value range is [0, 1]), RGB channel layout + + raw_output: bool + See "returns" doc section for details. + + Returns: + If raw_output is true, returns: + masks: np.ndarray + A list of predicted masks. + + Otherwise, returns: + segmented_images: List[PIL.Image] + Images with segmentation map overlaid with an alpha of 0.5. + """ + NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs( + pixel_values_or_image + ) + input_transform = normalize_image_transform() + NCHW_fp32_torch_frames = input_transform(NCHW_fp32_torch_frames) + + with torch.no_grad(): + # pred_mask is 8x downsampled + pred_masks = self.model(NCHW_fp32_torch_frames) + + # Upsample pred mask to original image size + # Need to upsample in the probability space, not in class labels + pred_masks = F.interpolate( + input=pred_masks, + size=NCHW_fp32_torch_frames.shape[-2:], + mode="bilinear", + align_corners=False, + ) + + if raw_output: + return pred_masks.detach().numpy() + + # Create color map and convert segmentation mask to RGB image + pred_mask_img = torch.argmax(pred_masks, 1) + + # Overlay the segmentation mask on the image. alpha=1 is mask only, + # alpha=0 is image only. + color_map = create_color_map(NUM_CLASSES) + out = [] + for i, img_tensor in enumerate(NHWC_int_numpy_frames): + out.append( + Image.blend( + Image.fromarray(img_tensor), + Image.fromarray(color_map[pred_mask_img[i]]), + alpha=0.5, + ) + ) + return out diff --git a/qai_hub_models/models/ddrnet23_slim/demo.py b/qai_hub_models/models/ddrnet23_slim/demo.py new file mode 100644 index 00000000..7c0a166e --- /dev/null +++ b/qai_hub_models/models/ddrnet23_slim/demo.py @@ -0,0 +1,60 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.ddrnet23_slim.app import DDRNetApp +from qai_hub_models.models.ddrnet23_slim.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + DDRNet, +) +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.display import display_or_save_image +from qai_hub_models.utils.image_processing import pil_resize_pad, pil_undo_resize_pad + +INPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_input_image.png" +) + + +# Run DDRNet end-to-end on a sample image. +# The demo will display a image with the predicted segmentation map overlaid. +def main(is_test: bool = False): + # Demo parameters + parser = get_model_cli_parser(DDRNet) + parser = get_on_device_demo_parser(parser, add_output_dir=True) + parser.add_argument( + "--image", + type=str, + default=INPUT_IMAGE_ADDRESS, + help="image file path or URL", + ) + args = parser.parse_args([] if is_test else None) + model = demo_model_from_cli_args(DDRNet, args) + validate_on_device_demo_args(args, DDRNet.get_model_id()) + + # Load image + (_, _, height, width) = DDRNet.get_input_spec()["image"][0] + orig_image = load_image(args.image) + image, _, padding = pil_resize_pad(orig_image, (height, width)) + print("Model Loaded") + + app = DDRNetApp(model) + output = app.segment_image(image)[0] + + if not is_test: + # Resize / unpad annotated image + image_annotated = pil_undo_resize_pad(output, orig_image.size, padding) + display_or_save_image( + image_annotated, args.output_dir, "ddrnet_demo_output.png" + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ddrnet23_slim/export.py b/qai_hub_models/models/ddrnet23_slim/export.py new file mode 100644 index 00000000..4141c8d6 --- /dev/null +++ b/qai_hub_models/models/ddrnet23_slim/export.py @@ -0,0 +1,197 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.ddrnet23_slim import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "ddrnet23_slim" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "ddrnet23_slim", + "DDRNet23-Slim", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ddrnet23_slim/info.yaml b/qai_hub_models/models/ddrnet23_slim/info.yaml new file mode 100644 index 00000000..95e0fdab --- /dev/null +++ b/qai_hub_models/models/ddrnet23_slim/info.yaml @@ -0,0 +1,37 @@ +name: DDRNet23-Slim +# id must match with the model dir name in qai_hub_models +id: ddrnet23_slim +status: public +headline: Segment images or video by class in real-time on device. +domain: Computer Vision +description: DDRNet23Slim is a machine learning model that segments an image into + semantic classes, specifically designed for road-based scenes. It is designed for + the application of self-driving cars. +use_case: Semantic Segmentation +tags: + - real-time +research_paper: https://arxiv.org/abs/2101.06085 +research_paper_title: Deep Dual-resolution Networks for Real-time and Accurate Semantic + Segmentation of Road Scenes +license: https://github.com/chenjun2hao/DDRNet.pytorch/blob/main/LICENSE +source_repo: https://github.com/chenjun2hao/DDRNet.pytorch +technical_details: + Model checkpoint: DDRNet23s_imagenet.pth + Inference latency: RealTime + Input resolution: 2048x1024 + Number of parameters: 5.69M + Model size: 21.7 MB +applicable_scenarios: + - Self-driving cars +related_models: + - unet_segmentation + - fcn_resnet50 +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: mit +dataset: + - cityscapes diff --git a/qai_hub_models/models/ddrnet23_slim/model.py b/qai_hub_models/models/ddrnet23_slim/model.py new file mode 100644 index 00000000..b9e0d9c8 --- /dev/null +++ b/qai_hub_models/models/ddrnet23_slim/model.py @@ -0,0 +1,107 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from pathlib import Path + +import torch +import torch.nn as nn + +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, SourceAsRoot +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +DDRNET_SOURCE_REPOSITORY = "https://github.com/chenjun2hao/DDRNet.pytorch" +DDRNET_SOURCE_REPO_COMMIT = "bc0e193e87ead839dbc715c48e6bfb059cf21b27" +MODEL_ID = __name__.split(".")[-2] +# Originally from https://drive.google.com/file/d/1d_K3Af5fKHYwxSo8HkxpnhiekhwovmiP/view +DEFAULT_WEIGHTS = "DDRNet23s_imagenet.pth" +MODEL_ASSET_VERSION = 1 +NUM_CLASSES = 19 + + +class DDRNet(BaseModel): + """Exportable DDRNet image segmenter, end-to-end.""" + + def __init__(self, model: nn.Module) -> None: + super().__init__() + self.model = model + + @classmethod + def from_pretrained(cls, checkpoint_path: str | None = None): + """Load DDRNetSlim from a weightfile created by the source DDRNetSlim repository.""" + with SourceAsRoot( + DDRNET_SOURCE_REPOSITORY, + DDRNET_SOURCE_REPO_COMMIT, + MODEL_ID, + MODEL_ASSET_VERSION, + ): + bad_init_file = Path("lib/models/__init__.py") + if bad_init_file.exists(): + bad_init_file.unlink() + + from lib.models.ddrnet_23_slim import BasicBlock, DualResNet # type: ignore + + ddrnetslim_model = DualResNet( + BasicBlock, + [2, 2, 2, 2], + num_classes=NUM_CLASSES, + planes=32, + spp_planes=128, + head_planes=64, + # No need to use aux loss for inference + augment=False, + ) + + if not checkpoint_path: + checkpoint_path = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_WEIGHTS + ).fetch() + + pretrained_dict = torch.load( + checkpoint_path, map_location=torch.device("cpu") + ) + if "state_dict" in pretrained_dict: + pretrained_dict = pretrained_dict["state_dict"] + model_dict = ddrnetslim_model.state_dict() + pretrained_dict = { + k[6:]: v + for k, v in pretrained_dict.items() + if k[6:] in model_dict.keys() + } + model_dict.update(pretrained_dict) + ddrnetslim_model.load_state_dict(model_dict) + + ddrnetslim_model.to(torch.device("cpu")).eval() + + return cls(ddrnetslim_model) + + def forward(self, image: torch.Tensor): + """ + Run DDRNet23_Slim on `image`, and produce a predicted segmented image mask. + + Parameters: + image: Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: BGR + + Returns: + segmented mask per class: Shape [batch, classes, 128, 256] + """ + return self.model(image) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 1280, + width: int = 640, + ) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm AI Hub. Default resolution is 2048x1024 + so this expects an image where width is twice the height. + """ + return {"image": ((batch_size, num_channels, height, width), "float32")} diff --git a/qai_hub_models/models/ddrnet23_slim/perf.yaml b/qai_hub_models/models/ddrnet23_slim/perf.yaml new file mode 100644 index 00000000..1f405dd9 --- /dev/null +++ b/qai_hub_models/models/ddrnet23_slim/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: DDRNet23-Slim + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 6736.0 + throughput: 148.45605700712588 + estimated_peak_memory_range: + min: 991232 + max: 3246040 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 131 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 131 + job_id: jvgddqv6g + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:12:22.404643Z' diff --git a/qai_hub_models/models/ddrnet23_slim/test.py b/qai_hub_models/models/ddrnet23_slim/test.py new file mode 100644 index 00000000..5833b4d1 --- /dev/null +++ b/qai_hub_models/models/ddrnet23_slim/test.py @@ -0,0 +1,50 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models.ddrnet23_slim.app import DDRNetApp +from qai_hub_models.models.ddrnet23_slim.demo import INPUT_IMAGE_ADDRESS +from qai_hub_models.models.ddrnet23_slim.demo import main as demo_main +from qai_hub_models.models.ddrnet23_slim.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + DDRNet, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_same, skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_output_image.png" +) + + +# Verify that the output from Torch is as expected. +@skip_clone_repo_check +def test_task(): + app = DDRNetApp(DDRNet.from_pretrained()) + original_image = load_image(INPUT_IMAGE_ADDRESS) + output_image = app.segment_image(original_image)[0] + output_image_oracle = load_image(OUTPUT_IMAGE_ADDRESS) + + assert_most_same( + np.asarray(output_image), np.asarray(output_image_oracle), diff_tol=0.01 + ) + + +@skip_clone_repo_check +def test_trace(): + app = DDRNetApp(DDRNet.from_pretrained().convert_to_torchscript()) + original_image = load_image(INPUT_IMAGE_ADDRESS) + output_image = app.segment_image(original_image)[0] + output_image_oracle = load_image(OUTPUT_IMAGE_ADDRESS) + + assert_most_same( + np.asarray(output_image), np.asarray(output_image_oracle), diff_tol=0.01 + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/deeplabv3_resnet50/README.md b/qai_hub_models/models/deeplabv3_resnet50/README.md new file mode 100644 index 00000000..8d4db7eb --- /dev/null +++ b/qai_hub_models/models/deeplabv3_resnet50/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [DeepLabV3-ResNet50: Deep Convolutional Neural Network model for semantic segmentation](https://aihub.qualcomm.com/models/deeplabv3_resnet50) + +DeepLabV3 is designed for semantic segmentation at multiple scales, trained on the COCO dataset. It uses ResNet50 as a backbone. + +This is based on the implementation of DeepLabV3-ResNet50 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/segmentation/deeplabv3.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/deeplabv3_resnet50). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.deeplabv3_resnet50.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.deeplabv3_resnet50.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of DeepLabV3-ResNet50 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Rethinking Atrous Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1706.05587) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/segmentation/deeplabv3.py) diff --git a/qai_hub_models/models/deeplabv3_resnet50/__init__.py b/qai_hub_models/models/deeplabv3_resnet50/__init__.py new file mode 100644 index 00000000..a3298d43 --- /dev/null +++ b/qai_hub_models/models/deeplabv3_resnet50/__init__.py @@ -0,0 +1,8 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.deeplab.app import DeepLabV3App as App # noqa: F401 + +from .model import MODEL_ID # noqa: F401 +from .model import DeepLabV3_ResNet50 as Model # noqa: F401 diff --git a/qai_hub_models/models/deeplabv3_resnet50/demo.py b/qai_hub_models/models/deeplabv3_resnet50/demo.py new file mode 100644 index 00000000..e182f4ae --- /dev/null +++ b/qai_hub_models/models/deeplabv3_resnet50/demo.py @@ -0,0 +1,27 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.deeplab.demo import deeplabv3_demo +from qai_hub_models.models.deeplabv3_resnet50.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + NUM_CLASSES, + DeepLabV3_ResNet50, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +# Demo image comes from https://github.com/pytorch/hub/raw/master/images/deeplab1.png +# and has had alpha channel removed for use as input +INPUT_IMAGE_LOCAL_PATH = "deeplabv3_demo.png" +INPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, INPUT_IMAGE_LOCAL_PATH +) + + +def main(is_test: bool = False): + deeplabv3_demo(DeepLabV3_ResNet50, INPUT_IMAGE_ADDRESS, NUM_CLASSES, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/deeplabv3_resnet50/export.py b/qai_hub_models/models/deeplabv3_resnet50/export.py new file mode 100644 index 00000000..cb49af85 --- /dev/null +++ b/qai_hub_models/models/deeplabv3_resnet50/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.deeplabv3_resnet50 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "deeplabv3_resnet50" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "deeplabv3_resnet50", + "DeepLabV3-ResNet50", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0,output_1", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0,output_1", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/deeplabv3_resnet50/info.yaml b/qai_hub_models/models/deeplabv3_resnet50/info.yaml new file mode 100644 index 00000000..2e150816 --- /dev/null +++ b/qai_hub_models/models/deeplabv3_resnet50/info.yaml @@ -0,0 +1,35 @@ +name: DeepLabV3-ResNet50 +# id must match with the model dir name in qai_hub_models +id: deeplabv3_resnet50 +status: public +headline: Deep Convolutional Neural Network model for semantic segmentation. +domain: Computer Vision +use_case: Semantic Segmentation +description: DeepLabV3 is designed for semantic segmentation at multiple scales, trained + on the COCO dataset. It uses ResNet50 as a backbone. +tags: [] +research_paper: https://arxiv.org/abs/1706.05587 +research_paper_title: Rethinking Atrous Convolution for Semantic Image Segmentation +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: + https://github.com/pytorch/vision/blob/main/torchvision/models/segmentation/deeplabv3.py +technical_details: + Model checkpoint: COCO_WITH_VOC_LABELS_V1 + Input resolution: 224x224 + Number of parameters: 39.6M + Model size: 151 MB +applicable_scenarios: + - Anomaly Detection + - Inventory Management +related_models: + - sam + - unet_segmentation + - fcn_resnet50 +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: [] diff --git a/qai_hub_models/models/deeplabv3_resnet50/model.py b/qai_hub_models/models/deeplabv3_resnet50/model.py new file mode 100644 index 00000000..50bd69bd --- /dev/null +++ b/qai_hub_models/models/deeplabv3_resnet50/model.py @@ -0,0 +1,64 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +import torchvision.models as tv_models + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.models._shared.deeplab.evaluator import DeepLabV3Evaluator +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_WEIGHTS = "COCO_WITH_VOC_LABELS_V1" +NUM_CLASSES = 21 + + +class DeepLabV3_ResNet50(BaseModel): + """Exportable DeepLabV3_ResNet50 image segmentation applications, end-to-end.""" + + def __init__( + self, + deeplabv3_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = deeplabv3_model + + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> DeepLabV3_ResNet50: + model = tv_models.segmentation.deeplabv3_resnet50(weights=weights).eval() + return cls(model) + + def get_evaluator(self) -> BaseEvaluator: + return DeepLabV3Evaluator(NUM_CLASSES) + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run DeepLabV3_ResNet50 on `image`, and produce a tensor of classes for segmentation + + Parameters: + image: Pixel values pre-processed for model consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + tensor: Bx21xHxW tensor of class logits per pixel + """ + return self.model(image)["out"] + + def get_input_spec( + self, + batch_size: int = 1, + num_channels: int = 3, + height: int = 224, + width: int = 224, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} diff --git a/qai_hub_models/models/deeplabv3_resnet50/perf.yaml b/qai_hub_models/models/deeplabv3_resnet50/perf.yaml new file mode 100644 index 00000000..eeccef3a --- /dev/null +++ b/qai_hub_models/models/deeplabv3_resnet50/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: DeepLabV3-ResNet50 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 57759.0 + throughput: 17.313319136411643 + estimated_peak_memory_range: + min: 12288 + max: 171360368 + primary_compute_unit: GPU + precision: fp16 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 96 + layers_on_cpu: 0 + total_layers: 96 + job_id: jqp4ydxqp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 146022.0 + throughput: 6.848283135417951 + estimated_peak_memory_range: + min: 806912 + max: 9532744 + primary_compute_unit: GPU + precision: fp16 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 82 + layers_on_cpu: 0 + total_layers: 82 + job_id: j0pxl67jp + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:31:27.279356Z' diff --git a/qai_hub_models/models/deeplabv3_resnet50/test.py b/qai_hub_models/models/deeplabv3_resnet50/test.py new file mode 100644 index 00000000..cfff53bf --- /dev/null +++ b/qai_hub_models/models/deeplabv3_resnet50/test.py @@ -0,0 +1,61 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models._shared.deeplab.app import DeepLabV3App +from qai_hub_models.models.deeplabv3_resnet50.demo import INPUT_IMAGE_ADDRESS +from qai_hub_models.models.deeplabv3_resnet50.demo import main as demo_main +from qai_hub_models.models.deeplabv3_resnet50.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + NUM_CLASSES, + DeepLabV3_ResNet50, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_close, skip_clone_repo_check + +OUTPUT_IMAGE_LOCAL_PATH = "deeplabv3_demo_output.png" +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, OUTPUT_IMAGE_LOCAL_PATH +) + + +@skip_clone_repo_check +def test_task(): + image = load_image(INPUT_IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + app = DeepLabV3App(DeepLabV3_ResNet50.from_pretrained(), num_classes=NUM_CLASSES) + app_output_image = app.predict(image, False) + + np.testing.assert_allclose( + np.asarray(app_output_image, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_trace(): + image = load_image(INPUT_IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + app = DeepLabV3App( + DeepLabV3_ResNet50.from_pretrained().convert_to_torchscript(), + num_classes=NUM_CLASSES, + ) + app_output_image = app.predict(image, False) + + assert_most_close( + np.asarray(app_output_image, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/densenet121/README.md b/qai_hub_models/models/densenet121/README.md new file mode 100644 index 00000000..5636d301 --- /dev/null +++ b/qai_hub_models/models/densenet121/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [DenseNet-121: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/densenet121) + +Densenet is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of DenseNet-121 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/densenet121). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.densenet121.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.densenet121.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of DenseNet-121 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py) diff --git a/qai_hub_models/models/densenet121/__init__.py b/qai_hub_models/models/densenet121/__init__.py new file mode 100644 index 00000000..9c3b2efe --- /dev/null +++ b/qai_hub_models/models/densenet121/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import DenseNet as Model # noqa: F401 diff --git a/qai_hub_models/models/densenet121/demo.py b/qai_hub_models/models/densenet121/demo.py new file mode 100644 index 00000000..72ba762b --- /dev/null +++ b/qai_hub_models/models/densenet121/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.densenet121.model import DenseNet + + +def main(is_test: bool = False): + imagenet_demo(DenseNet, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/densenet121/export.py b/qai_hub_models/models/densenet121/export.py new file mode 100644 index 00000000..a7b4e0ba --- /dev/null +++ b/qai_hub_models/models/densenet121/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.densenet121 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "densenet121" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "densenet121", + "DenseNet-121", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/densenet121/info.yaml b/qai_hub_models/models/densenet121/info.yaml new file mode 100644 index 00000000..7eb5a937 --- /dev/null +++ b/qai_hub_models/models/densenet121/info.yaml @@ -0,0 +1,39 @@ +name: DenseNet-121 +# id must match with the model dir name in qai_hub_models +id: densenet121 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: Densenet is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +use_case: Image Classification +tags: + - backbone +research_paper: https://arxiv.org/abs/1608.06993 +research_paper_title: Densely Connected Convolutional Networks +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 7.97M + Model size: 30.5 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - squeezenet1_1 + - googlenet +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/densenet121/model.py b/qai_hub_models/models/densenet121/model.py new file mode 100644 index 00000000..79faf024 --- /dev/null +++ b/qai_hub_models/models/densenet121/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class DenseNet(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.densenet121(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/densenet121/perf.yaml b/qai_hub_models/models/densenet121/perf.yaml new file mode 100644 index 00000000..ed91b04c --- /dev/null +++ b/qai_hub_models/models/densenet121/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: DenseNet-121 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1605.0 + throughput: 623.0529595015577 + estimated_peak_memory_range: + min: 28672 + max: 20688688 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 310 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 310 + job_id: jlpe7w275 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 1449.0 + throughput: 690.1311249137336 + estimated_peak_memory_range: + min: 73728 + max: 209142552 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 371 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 371 + job_id: jygzljwz5 + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:21:38.990133Z' diff --git a/qai_hub_models/models/densenet121/test.py b/qai_hub_models/models/densenet121/test.py new file mode 100644 index 00000000..82b584c4 --- /dev/null +++ b/qai_hub_models/models/densenet121/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.densenet121.demo import main as demo_main +from qai_hub_models.models.densenet121.model import MODEL_ID, DenseNet + + +def test_task(): + run_imagenet_classifier_test(DenseNet.from_pretrained(), MODEL_ID) + + +def test_trace(): + run_imagenet_classifier_trace_test(DenseNet.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/detr_resnet101/README.md b/qai_hub_models/models/detr_resnet101/README.md new file mode 100644 index 00000000..7151e680 --- /dev/null +++ b/qai_hub_models/models/detr_resnet101/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [DETR-ResNet101: Transformer based object detector with ResNet101 backbone](https://aihub.qualcomm.com/models/detr_resnet101) + +DETR is a machine learning model that can detect objects (trained on COCO dataset). + +This is based on the implementation of DETR-ResNet101 found +[here](https://github.com/facebookresearch/detr). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/detr_resnet101). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[detr_resnet101]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.detr_resnet101.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.detr_resnet101.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of DETR-ResNet101 can be found + [here](https://github.com/facebookresearch/detr/blob/main/LICENSE). + + +## References +* [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) +* [Source Model Implementation](https://github.com/facebookresearch/detr) diff --git a/qai_hub_models/models/detr_resnet101/__init__.py b/qai_hub_models/models/detr_resnet101/__init__.py new file mode 100644 index 00000000..3c9a82e6 --- /dev/null +++ b/qai_hub_models/models/detr_resnet101/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.app import DETRApp as App # noqa: F401 +from qai_hub_models.models.detr_resnet101.model import ( # noqa: F401 + DETRResNet101 as Model, +) + +from .model import MODEL_ID # noqa: F401 diff --git a/qai_hub_models/models/detr_resnet101/demo.py b/qai_hub_models/models/detr_resnet101/demo.py new file mode 100644 index 00000000..d9ed9cc0 --- /dev/null +++ b/qai_hub_models/models/detr_resnet101/demo.py @@ -0,0 +1,26 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.demo import detr_demo +from qai_hub_models.models.detr_resnet101.model import ( + DEFAULT_WEIGHTS, + MODEL_ASSET_VERSION, + MODEL_ID, + DETRResNet101, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "detr_demo_image.jpg" +) + + +# Run DETR app end-to-end on a sample image. +# The demo will display the predicted mask in a window. +def main(is_test: bool = False): + detr_demo(DETRResNet101, DEFAULT_WEIGHTS, IMAGE_ADDRESS, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/detr_resnet101/export.py b/qai_hub_models/models/detr_resnet101/export.py new file mode 100644 index 00000000..821fc3db --- /dev/null +++ b/qai_hub_models/models/detr_resnet101/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.detr_resnet101 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "detr_resnet101" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "detr_resnet101", + "DETR-ResNet101", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/detr_resnet101/info.yaml b/qai_hub_models/models/detr_resnet101/info.yaml new file mode 100644 index 00000000..45c4e48e --- /dev/null +++ b/qai_hub_models/models/detr_resnet101/info.yaml @@ -0,0 +1,36 @@ +name: DETR-ResNet101 +# id must match with the model dir name in qai_hub_models +id: detr_resnet101 +status: public +tags: [] +headline: Transformer based object detector with ResNet101 backbone. +domain: Computer Vision +description: DETR is a machine learning model that can detect objects (trained on + COCO dataset). +use_case: Object Detection +research_paper: https://arxiv.org/abs/2005.12872 +research_paper_title: End-to-End Object Detection with Transformers +license: https://github.com/facebookresearch/detr/blob/main/LICENSE +source_repo: https://github.com/facebookresearch/detr +technical_details: + Model checkpoint: ResNet101 + Input resolution: 480x480 + Number of parameters: 60.3M + Model size: 230 MB +applicable_scenarios: + - Factory Automation + - Robotic Navigation + - Camera +related_models: + - detr_resnet50 + - detr_resnet50_dc5 + - detr_resnet101_dc5 +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: + - detection-datasets/coco diff --git a/qai_hub_models/models/detr_resnet101/model.py b/qai_hub_models/models/detr_resnet101/model.py new file mode 100644 index 00000000..ad1b7fd2 --- /dev/null +++ b/qai_hub_models/models/detr_resnet101/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.detr.model import DETR + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "facebook/detr-resnet-101" +MODEL_ASSET_VERSION = 1 + + +class DETRResNet101(DETR): + """Exportable DETR model, end-to-end.""" + + @classmethod + def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): + return DETR.from_pretrained(ckpt_name) diff --git a/qai_hub_models/models/detr_resnet101/perf.yaml b/qai_hub_models/models/detr_resnet101/perf.yaml new file mode 100644 index 00000000..86375675 --- /dev/null +++ b/qai_hub_models/models/detr_resnet101/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: DETR-ResNet101 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 640294.0 + throughput: 1.5617825561382739 + estimated_peak_memory_range: + min: 107266048 + max: 111542968 + primary_compute_unit: CPU + precision: fp32 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 957 + total_layers: 957 + job_id: jz5wl39zp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:15:32.226652Z' diff --git a/qai_hub_models/models/detr_resnet101/requirements.txt b/qai_hub_models/models/detr_resnet101/requirements.txt new file mode 100644 index 00000000..3582ec2c --- /dev/null +++ b/qai_hub_models/models/detr_resnet101/requirements.txt @@ -0,0 +1,2 @@ +transformers==4.31.0 +timm==0.9.7 diff --git a/qai_hub_models/models/detr_resnet101/test.py b/qai_hub_models/models/detr_resnet101/test.py new file mode 100644 index 00000000..7a9b8da1 --- /dev/null +++ b/qai_hub_models/models/detr_resnet101/test.py @@ -0,0 +1,31 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.app import DETRApp +from qai_hub_models.models.detr_resnet101.demo import MODEL_ASSET_VERSION, MODEL_ID +from qai_hub_models.models.detr_resnet101.demo import main as demo_main +from qai_hub_models.models.detr_resnet101.model import DEFAULT_WEIGHTS, DETRResNet101 +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "detr_test_image.jpg" +) + + +def test_task(): + net = DETRResNet101.from_pretrained(DEFAULT_WEIGHTS) + img = load_image(IMAGE_ADDRESS) + _, _, label, _ = DETRApp(net).predict(img, DEFAULT_WEIGHTS) + assert set(list(label.numpy())) == {75, 63, 17} + + +def test_trace(): + net = DETRResNet101.from_pretrained(DEFAULT_WEIGHTS).convert_to_torchscript() + img = load_image(IMAGE_ADDRESS) + _, _, label, _ = DETRApp(net).predict(img, DEFAULT_WEIGHTS) + assert set(list(label.numpy())) == {75, 63, 17} + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/detr_resnet101_dc5/README.md b/qai_hub_models/models/detr_resnet101_dc5/README.md new file mode 100644 index 00000000..bc12519e --- /dev/null +++ b/qai_hub_models/models/detr_resnet101_dc5/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [DETR-ResNet101-DC5: Transformer based object detector with ResNet101 backbone (dilated C5 stage)](https://aihub.qualcomm.com/models/detr_resnet101_dc5) + +DETR is a machine learning model that can detect objects (trained on COCO dataset). + +This is based on the implementation of DETR-ResNet101-DC5 found +[here](https://github.com/facebookresearch/detr). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/detr_resnet101_dc5). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[detr_resnet101_dc5]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.detr_resnet101_dc5.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.detr_resnet101_dc5.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of DETR-ResNet101-DC5 can be found + [here](https://github.com/facebookresearch/detr/blob/main/LICENSE). + + +## References +* [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) +* [Source Model Implementation](https://github.com/facebookresearch/detr) diff --git a/qai_hub_models/models/detr_resnet101_dc5/__init__.py b/qai_hub_models/models/detr_resnet101_dc5/__init__.py new file mode 100644 index 00000000..4f36f774 --- /dev/null +++ b/qai_hub_models/models/detr_resnet101_dc5/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.app import DETRApp as App # noqa: F401 +from qai_hub_models.models.detr_resnet101_dc5.model import ( # noqa: F401 + DETRResNet101DC5 as Model, +) + +from .model import MODEL_ID # noqa: F401 diff --git a/qai_hub_models/models/detr_resnet101_dc5/demo.py b/qai_hub_models/models/detr_resnet101_dc5/demo.py new file mode 100644 index 00000000..fd286725 --- /dev/null +++ b/qai_hub_models/models/detr_resnet101_dc5/demo.py @@ -0,0 +1,26 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.demo import detr_demo +from qai_hub_models.models.detr_resnet101_dc5.model import ( + DEFAULT_WEIGHTS, + MODEL_ASSET_VERSION, + MODEL_ID, + DETRResNet101DC5, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "detr_demo_image.jpg" +) + + +# Run DETR app end-to-end on a sample image. +# The demo will display the predicted mask in a window. +def main(is_test: bool = False): + detr_demo(DETRResNet101DC5, DEFAULT_WEIGHTS, IMAGE_ADDRESS, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/detr_resnet101_dc5/export.py b/qai_hub_models/models/detr_resnet101_dc5/export.py new file mode 100644 index 00000000..d2351cef --- /dev/null +++ b/qai_hub_models/models/detr_resnet101_dc5/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.detr_resnet101_dc5 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "detr_resnet101_dc5" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "detr_resnet101_dc5", + "DETR-ResNet101-DC5", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/detr_resnet101_dc5/info.yaml b/qai_hub_models/models/detr_resnet101_dc5/info.yaml new file mode 100644 index 00000000..323c4743 --- /dev/null +++ b/qai_hub_models/models/detr_resnet101_dc5/info.yaml @@ -0,0 +1,36 @@ +name: DETR-ResNet101-DC5 +# id must match with the model dir name in qai_hub_models +id: detr_resnet101_dc5 +status: public +tags: [] +headline: Transformer based object detector with ResNet101 backbone (dilated C5 stage). +domain: Computer Vision +description: DETR is a machine learning model that can detect objects (trained on + COCO dataset). +use_case: Object Detection +research_paper: https://arxiv.org/abs/2005.12872 +research_paper_title: End-to-End Object Detection with Transformers +license: https://github.com/facebookresearch/detr/blob/main/LICENSE +source_repo: https://github.com/facebookresearch/detr +technical_details: + Model checkpoint: ResNet101-DC5 + Input resolution: 480x480 + Number of parameters: 61.1M + Model size: 231 MB +applicable_scenarios: + - Factory Automation + - Robotic Navigation + - Camera +related_models: + - detr_resnet50 + - detr_resnet50_dc5 + - detr_resnet101 +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: + - detection-datasets/coco diff --git a/qai_hub_models/models/detr_resnet101_dc5/model.py b/qai_hub_models/models/detr_resnet101_dc5/model.py new file mode 100644 index 00000000..b23e643c --- /dev/null +++ b/qai_hub_models/models/detr_resnet101_dc5/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.detr.model import DETR + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "facebook/detr-resnet-101-dc5" +MODEL_ASSET_VERSION = 1 + + +class DETRResNet101DC5(DETR): + """Exportable DETR model, end-to-end.""" + + @classmethod + def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): + return DETR.from_pretrained(ckpt_name) diff --git a/qai_hub_models/models/detr_resnet101_dc5/perf.yaml b/qai_hub_models/models/detr_resnet101_dc5/perf.yaml new file mode 100644 index 00000000..3412b2d5 --- /dev/null +++ b/qai_hub_models/models/detr_resnet101_dc5/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: DETR-ResNet101-DC5 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 971988.0 + throughput: 1.0288192858348046 + estimated_peak_memory_range: + min: 12288 + max: 291526464 + primary_compute_unit: CPU + precision: fp32 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 958 + total_layers: 958 + job_id: jlpe7w875 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:40:02.166898Z' diff --git a/qai_hub_models/models/detr_resnet101_dc5/requirements.txt b/qai_hub_models/models/detr_resnet101_dc5/requirements.txt new file mode 100644 index 00000000..3582ec2c --- /dev/null +++ b/qai_hub_models/models/detr_resnet101_dc5/requirements.txt @@ -0,0 +1,2 @@ +transformers==4.31.0 +timm==0.9.7 diff --git a/qai_hub_models/models/detr_resnet101_dc5/test.py b/qai_hub_models/models/detr_resnet101_dc5/test.py new file mode 100644 index 00000000..64d63836 --- /dev/null +++ b/qai_hub_models/models/detr_resnet101_dc5/test.py @@ -0,0 +1,36 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.app import DETRApp +from qai_hub_models.models.detr_resnet101_dc5.demo import IMAGE_ADDRESS +from qai_hub_models.models.detr_resnet101_dc5.demo import main as demo_main +from qai_hub_models.models.detr_resnet101_dc5.model import ( + DEFAULT_WEIGHTS, + MODEL_ASSET_VERSION, + MODEL_ID, + DETRResNet101DC5, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "detr_test_image.jpg" +) + + +def test_task(): + net = DETRResNet101DC5.from_pretrained(DEFAULT_WEIGHTS) + img = load_image(IMAGE_ADDRESS) + _, _, label, _ = DETRApp(net).predict(img, DEFAULT_WEIGHTS) + assert set(list(label.numpy())) == {75, 63, 17} + + +def test_trace(): + net = DETRResNet101DC5.from_pretrained(DEFAULT_WEIGHTS).convert_to_torchscript() + img = load_image(IMAGE_ADDRESS) + _, _, label, _ = DETRApp(net).predict(img, DEFAULT_WEIGHTS) + assert set(list(label.numpy())) == {75, 63, 17} + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/detr_resnet50/README.md b/qai_hub_models/models/detr_resnet50/README.md new file mode 100644 index 00000000..7a9910a6 --- /dev/null +++ b/qai_hub_models/models/detr_resnet50/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [DETR-ResNet50: Transformer based object detector with ResNet50 backbone](https://aihub.qualcomm.com/models/detr_resnet50) + +DETR is a machine learning model that can detect objects (trained on COCO dataset). + +This is based on the implementation of DETR-ResNet50 found +[here](https://github.com/facebookresearch/detr). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/detr_resnet50). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[detr_resnet50]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.detr_resnet50.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.detr_resnet50.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of DETR-ResNet50 can be found + [here](https://github.com/facebookresearch/detr/blob/main/LICENSE). + + +## References +* [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) +* [Source Model Implementation](https://github.com/facebookresearch/detr) diff --git a/qai_hub_models/models/detr_resnet50/__init__.py b/qai_hub_models/models/detr_resnet50/__init__.py new file mode 100644 index 00000000..19d012fa --- /dev/null +++ b/qai_hub_models/models/detr_resnet50/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.app import DETRApp as App # noqa: F401 +from qai_hub_models.models.detr_resnet50.model import ( # noqa: F401 + DETRResNet50 as Model, +) + +from .model import MODEL_ID # noqa: F401 diff --git a/qai_hub_models/models/detr_resnet50/demo.py b/qai_hub_models/models/detr_resnet50/demo.py new file mode 100644 index 00000000..6fcf2a7f --- /dev/null +++ b/qai_hub_models/models/detr_resnet50/demo.py @@ -0,0 +1,26 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.demo import detr_demo +from qai_hub_models.models.detr_resnet50.model import ( + DEFAULT_WEIGHTS, + MODEL_ASSET_VERSION, + MODEL_ID, + DETRResNet50, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "detr_demo_image.jpg" +) + + +# Run DETR app end-to-end on a sample image. +# The demo will display the predicted mask in a window. +def main(is_test: bool = False): + detr_demo(DETRResNet50, DEFAULT_WEIGHTS, IMAGE_ADDRESS, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/detr_resnet50/export.py b/qai_hub_models/models/detr_resnet50/export.py new file mode 100644 index 00000000..c2f4003e --- /dev/null +++ b/qai_hub_models/models/detr_resnet50/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.detr_resnet50 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "detr_resnet50" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "detr_resnet50", + "DETR-ResNet50", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/detr_resnet50/info.yaml b/qai_hub_models/models/detr_resnet50/info.yaml new file mode 100644 index 00000000..1daf3e8b --- /dev/null +++ b/qai_hub_models/models/detr_resnet50/info.yaml @@ -0,0 +1,36 @@ +name: DETR-ResNet50 +# id must match with the model dir name in qai_hub_models +id: detr_resnet50 +status: public +tags: [] +headline: Transformer based object detector with ResNet50 backbone. +domain: Computer Vision +description: DETR is a machine learning model that can detect objects (trained on + COCO dataset). +use_case: Object Detection +research_paper: https://arxiv.org/abs/2005.12872 +research_paper_title: End-to-End Object Detection with Transformers +license: https://github.com/facebookresearch/detr/blob/main/LICENSE +source_repo: https://github.com/facebookresearch/detr +technical_details: + Model checkpoint: ResNet50 + Input resolution: 480x480 + Number of parameters: 41.3M + Model size: 158 MB +applicable_scenarios: + - Factory Automation + - Robotic Navigation + - Camera +related_models: + - detr_resnet50_dc5 + - detr_resnet101_dc5 + - detr_resnet101 +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: + - detection-datasets/coco diff --git a/qai_hub_models/models/detr_resnet50/model.py b/qai_hub_models/models/detr_resnet50/model.py new file mode 100644 index 00000000..9ad17ea2 --- /dev/null +++ b/qai_hub_models/models/detr_resnet50/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.detr.model import DETR + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "facebook/detr-resnet-50" +MODEL_ASSET_VERSION = 1 + + +class DETRResNet50(DETR): + """Exportable DETR model, end-to-end.""" + + @classmethod + def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): + return DETR.from_pretrained(ckpt_name) diff --git a/qai_hub_models/models/detr_resnet50/perf.yaml b/qai_hub_models/models/detr_resnet50/perf.yaml new file mode 100644 index 00000000..a0f2be6b --- /dev/null +++ b/qai_hub_models/models/detr_resnet50/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: DETR-ResNet50 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 346284.0 + throughput: 2.887803074932714 + estimated_peak_memory_range: + min: 109121536 + max: 112011896 + primary_compute_unit: CPU + precision: fp32 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 889 + total_layers: 889 + job_id: jvgddqrkg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:35:06.340774Z' diff --git a/qai_hub_models/models/detr_resnet50/requirements.txt b/qai_hub_models/models/detr_resnet50/requirements.txt new file mode 100644 index 00000000..3582ec2c --- /dev/null +++ b/qai_hub_models/models/detr_resnet50/requirements.txt @@ -0,0 +1,2 @@ +transformers==4.31.0 +timm==0.9.7 diff --git a/qai_hub_models/models/detr_resnet50/test.py b/qai_hub_models/models/detr_resnet50/test.py new file mode 100644 index 00000000..9174f95b --- /dev/null +++ b/qai_hub_models/models/detr_resnet50/test.py @@ -0,0 +1,47 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.app import DETRApp +from qai_hub_models.models.detr_resnet50.demo import main as demo_main +from qai_hub_models.models.detr_resnet50.model import ( + DEFAULT_WEIGHTS, + MODEL_ASSET_VERSION, + MODEL_ID, + DETRResNet50, +) +from qai_hub_models.utils.args import get_model_cli_parser, model_from_cli_args +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image + +EXPECTED_OUTPUT = {75, 63, 17} + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "detr_test_image.jpg" +) + + +def test_task(): + net = DETRResNet50.from_pretrained() + img = load_image(IMAGE_ADDRESS) + _, _, label, _ = DETRApp(net).predict(img, DEFAULT_WEIGHTS) + assert set(list(label.numpy())) == EXPECTED_OUTPUT + + +def test_cli_from_pretrained(): + args = get_model_cli_parser(DETRResNet50).parse_args([]) + assert model_from_cli_args(DETRResNet50, args) is not None + + +def test_trace(): + net = DETRResNet50.from_pretrained() + input_spec = net.get_input_spec() + trace = net.convert_to_torchscript(input_spec) + + img = load_image(IMAGE_ADDRESS) + _, _, label, _ = DETRApp(trace).predict(img, DEFAULT_WEIGHTS) + assert set(list(label.numpy())) == EXPECTED_OUTPUT + + +def test_demo(): + # Run demo and verify it does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/detr_resnet50_dc5/README.md b/qai_hub_models/models/detr_resnet50_dc5/README.md new file mode 100644 index 00000000..93283e40 --- /dev/null +++ b/qai_hub_models/models/detr_resnet50_dc5/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [DETR-ResNet50-DC5: Transformer based object detector with ResNet50 backbone (dilated C5 stage)](https://aihub.qualcomm.com/models/detr_resnet50_dc5) + +DETR is a machine learning model that can detect objects (trained on COCO dataset). + +This is based on the implementation of DETR-ResNet50-DC5 found +[here](https://github.com/facebookresearch/detr). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/detr_resnet50_dc5). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[detr_resnet50_dc5]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.detr_resnet50_dc5.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.detr_resnet50_dc5.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of DETR-ResNet50-DC5 can be found + [here](https://github.com/facebookresearch/detr/blob/main/LICENSE). + + +## References +* [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) +* [Source Model Implementation](https://github.com/facebookresearch/detr) diff --git a/qai_hub_models/models/detr_resnet50_dc5/__init__.py b/qai_hub_models/models/detr_resnet50_dc5/__init__.py new file mode 100644 index 00000000..bbae7846 --- /dev/null +++ b/qai_hub_models/models/detr_resnet50_dc5/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.app import DETRApp as App # noqa: F401 +from qai_hub_models.models.detr_resnet50_dc5.model import ( # noqa: F401 + DETRResNet50DC5 as Model, +) + +from .model import MODEL_ID # noqa: F401 diff --git a/qai_hub_models/models/detr_resnet50_dc5/demo.py b/qai_hub_models/models/detr_resnet50_dc5/demo.py new file mode 100644 index 00000000..0eeaee66 --- /dev/null +++ b/qai_hub_models/models/detr_resnet50_dc5/demo.py @@ -0,0 +1,26 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.demo import detr_demo +from qai_hub_models.models.detr_resnet50_dc5.model import ( + DEFAULT_WEIGHTS, + MODEL_ASSET_VERSION, + MODEL_ID, + DETRResNet50DC5, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "detr_demo_image.jpg" +) + + +# Run DETR app end-to-end on a sample image. +# The demo will display the predicted mask in a window. +def main(is_test: bool = False): + detr_demo(DETRResNet50DC5, DEFAULT_WEIGHTS, IMAGE_ADDRESS, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/detr_resnet50_dc5/export.py b/qai_hub_models/models/detr_resnet50_dc5/export.py new file mode 100644 index 00000000..d7ee3b0f --- /dev/null +++ b/qai_hub_models/models/detr_resnet50_dc5/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.detr_resnet50_dc5 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "detr_resnet50_dc5" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "detr_resnet50_dc5", + "DETR-ResNet50-DC5", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/detr_resnet50_dc5/info.yaml b/qai_hub_models/models/detr_resnet50_dc5/info.yaml new file mode 100644 index 00000000..4777b6ea --- /dev/null +++ b/qai_hub_models/models/detr_resnet50_dc5/info.yaml @@ -0,0 +1,36 @@ +name: DETR-ResNet50-DC5 +# id must match with the model dir name in qai_hub_models +id: detr_resnet50_dc5 +status: public +tags: [] +headline: Transformer based object detector with ResNet50 backbone (dilated C5 stage). +domain: Computer Vision +description: DETR is a machine learning model that can detect objects (trained on + COCO dataset). +use_case: Object Detection +research_paper: https://arxiv.org/abs/2005.12872 +research_paper_title: End-to-End Object Detection with Transformers +license: https://github.com/facebookresearch/detr/blob/main/LICENSE +source_repo: https://github.com/facebookresearch/detr +technical_details: + Model checkpoint: ResNet50-DC5 + Input resolution: 480x480 + Number of parameters: 42.2M + Model size: 159 MB +applicable_scenarios: + - Factory Automation + - Robotic Navigation + - Camera +related_models: + - detr_resnet50 + - detr_resnet101_dc5 + - detr_resnet101 +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: + - detection-datasets/coco diff --git a/qai_hub_models/models/detr_resnet50_dc5/model.py b/qai_hub_models/models/detr_resnet50_dc5/model.py new file mode 100644 index 00000000..5fa213f7 --- /dev/null +++ b/qai_hub_models/models/detr_resnet50_dc5/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.detr.model import DETR + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "facebook/detr-resnet-50-dc5" +MODEL_ASSET_VERSION = 1 + + +class DETRResNet50DC5(DETR): + """Exportable DETR model, end-to-end.""" + + @classmethod + def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): + return DETR.from_pretrained(ckpt_name) diff --git a/qai_hub_models/models/detr_resnet50_dc5/perf.yaml b/qai_hub_models/models/detr_resnet50_dc5/perf.yaml new file mode 100644 index 00000000..4a6143b0 --- /dev/null +++ b/qai_hub_models/models/detr_resnet50_dc5/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: DETR-ResNet50-DC5 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 760148.0 + throughput: 1.3155332908854591 + estimated_peak_memory_range: + min: 251318272 + max: 254954864 + primary_compute_unit: CPU + precision: fp32 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 890 + total_layers: 890 + job_id: j1pvlr7m5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:15:27.657498Z' diff --git a/qai_hub_models/models/detr_resnet50_dc5/requirements.txt b/qai_hub_models/models/detr_resnet50_dc5/requirements.txt new file mode 100644 index 00000000..3582ec2c --- /dev/null +++ b/qai_hub_models/models/detr_resnet50_dc5/requirements.txt @@ -0,0 +1,2 @@ +transformers==4.31.0 +timm==0.9.7 diff --git a/qai_hub_models/models/detr_resnet50_dc5/test.py b/qai_hub_models/models/detr_resnet50_dc5/test.py new file mode 100644 index 00000000..28533460 --- /dev/null +++ b/qai_hub_models/models/detr_resnet50_dc5/test.py @@ -0,0 +1,34 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.detr.app import DETRApp +from qai_hub_models.models.detr_resnet50_dc5.demo import MODEL_ASSET_VERSION, MODEL_ID +from qai_hub_models.models.detr_resnet50_dc5.demo import main as demo_main +from qai_hub_models.models.detr_resnet50_dc5.model import ( + DEFAULT_WEIGHTS, + DETRResNet50DC5, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "detr_test_image.jpg" +) + + +def test_task(): + net = DETRResNet50DC5.from_pretrained(DEFAULT_WEIGHTS) + img = load_image(IMAGE_ADDRESS) + _, _, label, _ = DETRApp(net).predict(img, DEFAULT_WEIGHTS) + assert set(list(label.numpy())) == {75, 63, 17} + + +def test_trace(): + net = DETRResNet50DC5.from_pretrained(DEFAULT_WEIGHTS).convert_to_torchscript() + img = load_image(IMAGE_ADDRESS) + _, _, label, _ = DETRApp(net).predict(img, DEFAULT_WEIGHTS) + assert set(list(label.numpy())) == {75, 63, 17} + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/efficientnet_b0/README.md b/qai_hub_models/models/efficientnet_b0/README.md new file mode 100644 index 00000000..855950e7 --- /dev/null +++ b/qai_hub_models/models/efficientnet_b0/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [EfficientNet-B0: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/efficientnet_b0) + +EfficientNetB0 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of EfficientNet-B0 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/efficientnet_b0). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.efficientnet_b0.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.efficientnet_b0.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of EfficientNet-B0 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py) diff --git a/qai_hub_models/models/efficientnet_b0/__init__.py b/qai_hub_models/models/efficientnet_b0/__init__.py new file mode 100644 index 00000000..1099224a --- /dev/null +++ b/qai_hub_models/models/efficientnet_b0/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import EfficientNetB0 as Model # noqa: F401 diff --git a/qai_hub_models/models/efficientnet_b0/demo.py b/qai_hub_models/models/efficientnet_b0/demo.py new file mode 100644 index 00000000..40bd18fb --- /dev/null +++ b/qai_hub_models/models/efficientnet_b0/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.efficientnet_b0.model import EfficientNetB0 + + +def main(is_test: bool = False): + imagenet_demo(EfficientNetB0, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/efficientnet_b0/export.py b/qai_hub_models/models/efficientnet_b0/export.py new file mode 100644 index 00000000..2aaa4601 --- /dev/null +++ b/qai_hub_models/models/efficientnet_b0/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.efficientnet_b0 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "efficientnet_b0" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "efficientnet_b0", + "EfficientNet-B0", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/efficientnet_b0/info.yaml b/qai_hub_models/models/efficientnet_b0/info.yaml new file mode 100644 index 00000000..d66d06a5 --- /dev/null +++ b/qai_hub_models/models/efficientnet_b0/info.yaml @@ -0,0 +1,40 @@ +name: EfficientNet-B0 +# id must match with the model dir name in qai_hub_models +id: efficientnet_b0 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: EfficientNetB0 is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: + - backbone +research_paper: https://arxiv.org/abs/1905.11946 +research_paper_title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural + Networks' +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 5.27M + Model size: 20.2 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/efficientnet_b0/model.py b/qai_hub_models/models/efficientnet_b0/model.py new file mode 100644 index 00000000..956dc29b --- /dev/null +++ b/qai_hub_models/models/efficientnet_b0/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class EfficientNetB0(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.efficientnet_b0(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/efficientnet_b0/perf.yaml b/qai_hub_models/models/efficientnet_b0/perf.yaml new file mode 100644 index 00000000..ea6a7116 --- /dev/null +++ b/qai_hub_models/models/efficientnet_b0/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: EfficientNet-B0 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 2184.0 + throughput: 457.87545787545787 + estimated_peak_memory_range: + min: 12288 + max: 2340896 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 243 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 243 + job_id: j0pxl61jp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 2166.0 + throughput: 461.6805170821791 + estimated_peak_memory_range: + min: 12288 + max: 86865200 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 242 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 242 + job_id: jo5m06zyg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:27:58.826690Z' diff --git a/qai_hub_models/models/efficientnet_b0/test.py b/qai_hub_models/models/efficientnet_b0/test.py new file mode 100644 index 00000000..b7808e69 --- /dev/null +++ b/qai_hub_models/models/efficientnet_b0/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.efficientnet_b0.demo import main as demo_main +from qai_hub_models.models.efficientnet_b0.model import MODEL_ID, EfficientNetB0 + + +def test_task(): + run_imagenet_classifier_test(EfficientNetB0.from_pretrained(), MODEL_ID) + + +def test_trace(): + run_imagenet_classifier_trace_test(EfficientNetB0.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/esrgan/README.md b/qai_hub_models/models/esrgan/README.md new file mode 100644 index 00000000..99d8588d --- /dev/null +++ b/qai_hub_models/models/esrgan/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [ESRGAN: Upscale images and remove image noise](https://aihub.qualcomm.com/models/esrgan) + +ESRGAN is a machine learning model that upscales an image with minimal loss in quality. + +This is based on the implementation of ESRGAN found +[here](https://github.com/xinntao/ESRGAN/). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/esrgan). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.esrgan.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.esrgan.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of ESRGAN can be found + [here](https://github.com/xinntao/ESRGAN/blob/master/LICENSE). + + +## References +* [ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks](https://arxiv.org/abs/1809.00219) +* [Source Model Implementation](https://github.com/xinntao/ESRGAN/) diff --git a/qai_hub_models/models/esrgan/__init__.py b/qai_hub_models/models/esrgan/__init__.py new file mode 100644 index 00000000..3d7845a8 --- /dev/null +++ b/qai_hub_models/models/esrgan/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 + SuperResolutionApp as App, +) + +from .model import ESRGAN as Model # noqa: F401 +from .model import MODEL_ID # noqa: F401 diff --git a/qai_hub_models/models/esrgan/demo.py b/qai_hub_models/models/esrgan/demo.py new file mode 100644 index 00000000..3a15c695 --- /dev/null +++ b/qai_hub_models/models/esrgan/demo.py @@ -0,0 +1,25 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo +from qai_hub_models.models.esrgan.model import ESRGAN, MODEL_ASSET_VERSION, MODEL_ID +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "esrgan_demo.jpg" +) + + +# Run ESRGAN end-to-end on a sample image. +# The demo will display a image upscaled with no loss in quality. +def main(is_test: bool = False): + super_resolution_demo( + model_cls=ESRGAN, + default_image=IMAGE_ADDRESS, + is_test=is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/esrgan/export.py b/qai_hub_models/models/esrgan/export.py new file mode 100644 index 00000000..ff2a383d --- /dev/null +++ b/qai_hub_models/models/esrgan/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.esrgan import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "esrgan" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "esrgan", + "ESRGAN", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/esrgan/info.yaml b/qai_hub_models/models/esrgan/info.yaml new file mode 100644 index 00000000..f0a23cd5 --- /dev/null +++ b/qai_hub_models/models/esrgan/info.yaml @@ -0,0 +1,33 @@ +name: ESRGAN +# id must match with the model dir name in qai_hub_models +id: esrgan +status: public +headline: Upscale images and remove image noise. +domain: Computer Vision +description: ESRGAN is a machine learning model that upscales an image with minimal + loss in quality. +use_case: Super Resolution +tags: [] +research_paper: https://arxiv.org/abs/1809.00219 +research_paper_title: 'ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks' +license: https://github.com/xinntao/ESRGAN/blob/master/LICENSE +source_repo: https://github.com/xinntao/ESRGAN/ +technical_details: + Model checkpoint: ESRGAN_x4 + Input resolution: 128x128 + Number of parameters: 16.7M + Model size: 64.0 MB +applicable_scenarios: + - Virtual Real Estate Tours + - Gaming + - ARVR +form_factors: + - Phone + - Tablet +related_models: + - real_esrgan_general_x4v3 + - real_esrgan_x4plus +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: [] diff --git a/qai_hub_models/models/esrgan/model.py b/qai_hub_models/models/esrgan/model.py new file mode 100644 index 00000000..41b57acc --- /dev/null +++ b/qai_hub_models/models/esrgan/model.py @@ -0,0 +1,99 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.evaluators.superres_evaluator import SuperResolutionOutputEvaluator +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, SourceAsRoot +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +ESRGAN_SOURCE_REPOSITORY = "https://github.com/xinntao/ESRGAN" +ESRGAN_SOURCE_REPO_COMMIT = "73e9b634cf987f5996ac2dd33f4050922398a921" +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 2 +DEFAULT_WEIGHTS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "RRDB_ESRGAN_x4.pth" +) +SCALING_FACTOR = 4 + + +class ESRGAN(BaseModel): + """Exportable ESRGAN super resolution applications, end-to-end.""" + + def __init__( + self, + esrgan_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = esrgan_model + + @classmethod + def from_pretrained(cls, weights_path: str | None = None) -> ESRGAN: + """Load ESRGAN from a weightfile created by the source ESRGAN repository.""" + + # Load PyTorch model from disk + esrgan_model = _load_esrgan_source_model_from_weights(weights_path) + + return cls(esrgan_model) + + def get_evaluator(self) -> BaseEvaluator: + return SuperResolutionOutputEvaluator() + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run ESRGAN on `image`, and produce an upscaled image + + Parameters: + image: Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + image: Pixel values + Range: float[0, 1] + 3-channel Color Space: RGB + """ + return self.model(image) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 128, + width: int = 128, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} + + +def _load_esrgan_source_model_from_weights( + weights_path: str | None = None, +) -> torch.nn.Module: + # Load ESRGAN model from the source repository using the given weights. + with SourceAsRoot( + ESRGAN_SOURCE_REPOSITORY, + ESRGAN_SOURCE_REPO_COMMIT, + MODEL_ID, + MODEL_ASSET_VERSION, + ): + # download the weights file + if not weights_path: + weights_path = DEFAULT_WEIGHTS.fetch() + print(f"Weights file downloaded as {weights_path}") + + # necessary import. `esrgan.RRDBNet_arch` comes from the esrgan repo. + import RRDBNet_arch as arch + + esrgan_model = arch.RRDBNet(3, 3, 64, 23, gc=32) + esrgan_model.load_state_dict( + torch.load(weights_path, map_location=torch.device("cpu")), strict=True + ) + return esrgan_model diff --git a/qai_hub_models/models/esrgan/perf.yaml b/qai_hub_models/models/esrgan/perf.yaml new file mode 100644 index 00000000..50e0e2bd --- /dev/null +++ b/qai_hub_models/models/esrgan/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: ESRGAN + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 76337.0 + throughput: 13.099807432830737 + estimated_peak_memory_range: + min: 3301376 + max: 6221192 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 1024 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 1024 + job_id: jnp1nw7kg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 66070.0 + throughput: 15.135462388375965 + estimated_peak_memory_range: + min: 102400 + max: 101973424 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 1027 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 1027 + job_id: jvgddq8kg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:29:43.305116Z' diff --git a/qai_hub_models/models/esrgan/test.py b/qai_hub_models/models/esrgan/test.py new file mode 100644 index 00000000..25a9e20a --- /dev/null +++ b/qai_hub_models/models/esrgan/test.py @@ -0,0 +1,49 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models._shared.super_resolution.app import SuperResolutionApp +from qai_hub_models.models.esrgan.demo import IMAGE_ADDRESS +from qai_hub_models.models.esrgan.demo import main as demo_main +from qai_hub_models.models.esrgan.model import ESRGAN, MODEL_ASSET_VERSION, MODEL_ID +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "esrgan_demo_output.png" +) + + +@skip_clone_repo_check +def test_task(): + image = load_image(IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + app = SuperResolutionApp(ESRGAN.from_pretrained()) + app_output_image = app.upscale_image(image)[0] + np.testing.assert_allclose( + np.asarray(app_output_image, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_trace(): + image = load_image(IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + app = SuperResolutionApp(ESRGAN.from_pretrained().convert_to_torchscript()) + app_output_image = app.upscale_image(image)[0] + np.testing.assert_allclose( + np.asarray(app_output_image, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/facebook_denoiser/README.md b/qai_hub_models/models/facebook_denoiser/README.md new file mode 100644 index 00000000..b92ce796 --- /dev/null +++ b/qai_hub_models/models/facebook_denoiser/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Facebook-Denoiser: Real-time speech denoising optimized for mobile and edge](https://aihub.qualcomm.com/models/facebook_denoiser) + +Facebook Denoiser is a machine learning model that can denoise & isolate voices in sound clips. + +This is based on the implementation of Facebook-Denoiser found +[here](https://github.com/facebookresearch/denoiser). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/facebook_denoiser). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[facebook_denoiser]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.facebook_denoiser.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.facebook_denoiser.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Facebook-Denoiser can be found + [here](https://github.com/facebookresearch/denoiser/blob/main/LICENSE). + + +## References +* [Real Time Speech Enhancement in the Waveform Domain](https://arxiv.org/abs/2006.12847) +* [Source Model Implementation](https://github.com/facebookresearch/denoiser) diff --git a/qai_hub_models/models/facebook_denoiser/__init__.py b/qai_hub_models/models/facebook_denoiser/__init__.py new file mode 100644 index 00000000..c89f8ec4 --- /dev/null +++ b/qai_hub_models/models/facebook_denoiser/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import FacebookDenoiserApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import FacebookDenoiser as Model # noqa: F401 diff --git a/qai_hub_models/models/facebook_denoiser/app.py b/qai_hub_models/models/facebook_denoiser/app.py new file mode 100644 index 00000000..5af5473b --- /dev/null +++ b/qai_hub_models/models/facebook_denoiser/app.py @@ -0,0 +1,98 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +from pathlib import Path +from typing import Callable, List, Sequence + +import numpy as np +import torch +import torchaudio + +from qai_hub_models.models.facebook_denoiser.model import SAMPLE_RATE + + +class FacebookDenoiserApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with Facebook Denoiser. + + For a given audio input, the app will: + * load the audio from the source wav file + * call the denoiser + * save the denoised audio back to a different wav file + """ + + def __init__( + self, + denoiser: Callable[[torch.Tensor], torch.Tensor], + sample_rate: int = SAMPLE_RATE, + ): + self.denoiser = denoiser + self.sample_rate = sample_rate + + def predict(self, *args, **kwargs): + """See FacebookDenoiserApp::denoise_audio for interface documentation.""" + return self.denoise_audio(*args, **kwargs) + + def denoise_audio( + self, + input_audio: Sequence[Path | str | torch.Tensor | np.ndarray], + out_dir: Path | str | None = None, + ) -> List[Path | torch.Tensor]: + """ + Denoise and isolate the speech in the provided audio clip(s). + + Parameters: + input_audio: List[Path | str | torch.Tensor | np.ndarray] + A list of paths (to .wav files), or loaded audio in torch Tensor / numpy format. + Tensors must be shape [2, sample_rate * length of recording in seconds]. + All audio must have the same sample rate the model was trained on. + + out_dir: bool + If: + * this is set to a folder, AND + * all of input_audio are file paths + Then a list of saved .wav file paths will be returned. + + Otherwise, the method will return a list of predicted WAV audio tensors. + + Returns: + Predicted audio. See `raw_output` parameter above for type of return value. + """ + with torch.no_grad(): + all_inputs_are_paths = True + + noisy_audios = [] + for audio in input_audio: + if isinstance(audio, str) or isinstance(audio, Path): + audio, sample_rate = torchaudio.load(audio) + assert sample_rate == self.sample_rate + else: + all_inputs_are_paths = False + if isinstance(audio, np.ndarray): + audio = torch.from_numpy(audio) + noisy_audios.append(audio) + + estimates = [] + for noisy in noisy_audios: + out = self.denoiser(noisy) + out = out / max(out.abs().max().item(), 1) # Normalize + if all_inputs_are_paths and out_dir: + # We don't run files in batches, take the first batch output + out = out[:, 0] + estimates.append(out) + + if out_dir and all_inputs_are_paths: + output_files = [] + for path, estimate in zip(input_audio, estimates): + filename = os.path.join( + out_dir, os.path.basename(path).rsplit(".", 1)[0] + ) + filename = Path(f"{filename}_enhanced.wav") + torchaudio.save(filename, estimate, self.sample_rate) + output_files.append(filename) + return output_files + return estimates diff --git a/qai_hub_models/models/facebook_denoiser/demo.py b/qai_hub_models/models/facebook_denoiser/demo.py new file mode 100644 index 00000000..6d6a86fe --- /dev/null +++ b/qai_hub_models/models/facebook_denoiser/demo.py @@ -0,0 +1,69 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os +import tempfile +from typing import List + +from qai_hub_models.models.facebook_denoiser.app import FacebookDenoiserApp +from qai_hub_models.models.facebook_denoiser.model import ( + ASSET_VERSION, + MODEL_ID, + SAMPLE_RATE, + FacebookDenoiser, +) +from qai_hub_models.utils.args import get_model_cli_parser, model_from_cli_args +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_path + +EXAMPLE_RECORDING = CachedWebModelAsset.from_asset_store( + MODEL_ID, ASSET_VERSION, "icsi_meeting_recording.wav" +) + + +def main(is_test: bool = False): + """ + Run facebook denoiser on a sample audio (`.wav`) file. + """ + parser = get_model_cli_parser(FacebookDenoiser) + parser.add_argument( + "--audio", + nargs="+", + default=[EXAMPLE_RECORDING], + help="WAV file paths or URLs", + ) + parser.add_argument( + "--sample-rate", + type=int, + default=SAMPLE_RATE, + help="Audio sample rate the model was trained on", + ) + parser.add_argument( + "--output-dir", + type=str, + default=os.getcwd(), + help="output directory (where output WAV should be written)", + ) + args = parser.parse_args([] if is_test else None) + + # Load Model + source_model = model_from_cli_args(FacebookDenoiser, args) + app = FacebookDenoiserApp(source_model, args.sample_rate) + + # Download data + audio: List[str] = args.audio + with tempfile.TemporaryDirectory() as tmpdir: + for idx, file in enumerate(audio): + audio[idx] = load_path(file, tmpdir) + + # Dump output from app + output = app.denoise_audio(audio, args.output_dir) + + if not is_test: + print("Wrote files:") + for path in output: + print(str(path)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/facebook_denoiser/export.py b/qai_hub_models/models/facebook_denoiser/export.py new file mode 100644 index 00000000..c66f846d --- /dev/null +++ b/qai_hub_models/models/facebook_denoiser/export.py @@ -0,0 +1,181 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.facebook_denoiser import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "facebook_denoiser" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "facebook_denoiser", + "Facebook-Denoiser", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=sample_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/facebook_denoiser/info.yaml b/qai_hub_models/models/facebook_denoiser/info.yaml new file mode 100644 index 00000000..0b4e8453 --- /dev/null +++ b/qai_hub_models/models/facebook_denoiser/info.yaml @@ -0,0 +1,30 @@ +name: Facebook-Denoiser +# id must match with the model dir name in qai_hub_models +id: facebook_denoiser +status: public +headline: Real-time speech denoising optimized for mobile and edge. +domain: Audio +description: Facebook Denoiser is a machine learning model that can denoise & isolate + voices in sound clips. +use_case: Audio Enhancement +tags: [] +research_paper: https://arxiv.org/abs/2006.12847 +research_paper_title: Real Time Speech Enhancement in the Waveform Domain +license: https://github.com/facebookresearch/denoiser/blob/main/LICENSE +source_repo: https://github.com/facebookresearch/denoiser +technical_details: + Input resolution: 1x1x917 + Number of parameters: 18.9M + Model size: 72.0 MB +applicable_scenarios: + - Gaming + - Voice Calling +form_factors: + - Phone + - Tablet + - IoT +related_models: [] +has_static_banner: yes +has_animated_banner: yes +license_type: cc-by-nc-4.0 +dataset: [] diff --git a/qai_hub_models/models/facebook_denoiser/model.py b/qai_hub_models/models/facebook_denoiser/model.py new file mode 100644 index 00000000..72a23b59 --- /dev/null +++ b/qai_hub_models/models/facebook_denoiser/model.py @@ -0,0 +1,60 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +from denoiser import pretrained +from denoiser.pretrained import DNS_48_URL + +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +SAMPLE_RATE = 16000 +HIDDEN_LAYER_COUNT = 48 +DEFAULT_SEQUENCE_LENGTH = 917 +MODEL_ID = "facebook_denoiser" +ASSET_VERSION = 1 + + +class FacebookDenoiser(BaseModel): + def __init__(self, net: torch.nn.Module): + """ + Basic initializer which takes in a pretrained Facebook DNS network. + """ + super().__init__() + self.net = net + + def forward(self, audio: torch.Tensor) -> torch.Tensor: + """ + Predict denoised audio from noisy input audio. + + Parameters: + audio: A [NUM_SOUND_CHANNELS, BATCH, SEQ_LEN] or [NUM_SOUND_CHANNELS, SEQ_LEN] audio snippet. + SEQ_LEN == AUDIO_SAMPLE_RATE * AUDIO_LENGTH_IN_SECONDS + + Returns: + audio: A [NUM_SOUND_CHANNELS, BATCH, SEQ_LEN] denoised audio snippet. + """ + return self.net(audio) + + def get_input_spec( + self, + batch_size: int = 1, + sequence_length: int = DEFAULT_SEQUENCE_LENGTH, + ) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm AI Hub. + """ + return {"audio": ((batch_size, 1, sequence_length), "float32")} + + @classmethod + def from_pretrained( + cls, state_dict_url: str = DNS_48_URL, hidden_layer_count=HIDDEN_LAYER_COUNT + ) -> FacebookDenoiser: + net = pretrained._demucs( + state_dict_url is not None, state_dict_url, hidden=hidden_layer_count + ) + return cls(net) diff --git a/qai_hub_models/models/facebook_denoiser/perf.yaml b/qai_hub_models/models/facebook_denoiser/perf.yaml new file mode 100644 index 00000000..4f7e4ae7 --- /dev/null +++ b/qai_hub_models/models/facebook_denoiser/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Facebook-Denoiser + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 6985.0 + throughput: 143.16392269148176 + estimated_peak_memory_range: + min: 28246016 + max: 51679504 + primary_compute_unit: CPU + precision: fp32 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 209 + total_layers: 209 + job_id: jn5qlrw7p + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:10:37.856306Z' diff --git a/qai_hub_models/models/facebook_denoiser/requirements.txt b/qai_hub_models/models/facebook_denoiser/requirements.txt new file mode 100644 index 00000000..0307e34c --- /dev/null +++ b/qai_hub_models/models/facebook_denoiser/requirements.txt @@ -0,0 +1,3 @@ +denoiser +torchaudio +PySoundFile; sys_platform == 'win32' diff --git a/qai_hub_models/models/facebook_denoiser/test.py b/qai_hub_models/models/facebook_denoiser/test.py new file mode 100644 index 00000000..56a46b6c --- /dev/null +++ b/qai_hub_models/models/facebook_denoiser/test.py @@ -0,0 +1,65 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import pytest +import torch +import torchaudio + +from qai_hub_models.models.facebook_denoiser.app import FacebookDenoiserApp +from qai_hub_models.models.facebook_denoiser.demo import EXAMPLE_RECORDING +from qai_hub_models.models.facebook_denoiser.demo import main as demo_main +from qai_hub_models.models.facebook_denoiser.model import ( + ASSET_VERSION, + MODEL_ID, + SAMPLE_RATE, + FacebookDenoiser, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +ENHANCED_EXAMPLE_RECORDING = CachedWebModelAsset.from_asset_store( + MODEL_ID, ASSET_VERSION, "icsi_meeting_recording_enhanced.wav" +) + + +def _handle_runtime_error(e: RuntimeError): + if "Couldn't find appropriate backend to handle uri" not in str(e): + raise e + print( + "You're missing either FFMPEG on Linux (apt-get install ffmpeg) or PySoundFile on Windows (pip install PySoundFile)" + ) + + +def test_task(): + app = FacebookDenoiserApp(FacebookDenoiser.from_pretrained()) + try: + out = app.predict([EXAMPLE_RECORDING.fetch()])[0][:, 0] + except RuntimeError as e: + _handle_runtime_error(e) + return + expected, _ = torchaudio.load(ENHANCED_EXAMPLE_RECORDING.fetch()) + torch.testing.assert_allclose(out, expected) + + +@pytest.mark.skip(reason="Fails with a mysterious error in DefaultCPUAllocator.") +def test_trace(): + try: + input_data, sample_rate = torchaudio.load(EXAMPLE_RECORDING.fetch()) + assert sample_rate == SAMPLE_RATE + batch_size, sequence_length = input_data.shape + input_data = input_data.unsqueeze(1) + + model = FacebookDenoiser.from_pretrained() + input_spec = model.get_input_spec(sequence_length, batch_size) + app = FacebookDenoiserApp(model.convert_to_torchscript(input_spec)) + out = app.predict([input_data])[0][:, 0] + except RuntimeError as e: + _handle_runtime_error(e) + return + + expected, _ = torchaudio.load(ENHANCED_EXAMPLE_RECORDING.fetch()) + torch.testing.assert_allclose(out, expected) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/fastsam_s/README.md b/qai_hub_models/models/fastsam_s/README.md new file mode 100644 index 00000000..ef56d369 --- /dev/null +++ b/qai_hub_models/models/fastsam_s/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [FastSam-S: Generate high quality segmentation mask on device](https://aihub.qualcomm.com/models/fastsam_s) + +The Fast Segment Anything Model (FastSAM) is a novel, real-time CNN-based solution for the Segment Anything task. This task is designed to segment any object within an image based on various possible user interaction prompts. The model performs competitively despite significantly reduced computation, making it a practical choice for a variety of vision tasks. + +This is based on the implementation of FastSam-S found +[here](https://github.com/CASIA-IVA-Lab/FastSAM). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/fastsam_s). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[fastsam_s]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.fastsam_s.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.fastsam_s.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of FastSam-S can be found + [here](https://github.com/CASIA-IVA-Lab/FastSAM/blob/main/LICENSE). + + +## References +* [Fast Segment Anything](https://arxiv.org/abs/2306.12156) +* [Source Model Implementation](https://github.com/CASIA-IVA-Lab/FastSAM) diff --git a/qai_hub_models/models/fastsam_s/__init__.py b/qai_hub_models/models/fastsam_s/__init__.py new file mode 100644 index 00000000..0c0f3489 --- /dev/null +++ b/qai_hub_models/models/fastsam_s/__init__.py @@ -0,0 +1,8 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.fastsam.app import FastSAMApp as App # noqa: F401 + +from .model import MODEL_ID # noqa: F401 +from .model import FastSAM_S as Model # noqa: F401 diff --git a/qai_hub_models/models/fastsam_s/demo.py b/qai_hub_models/models/fastsam_s/demo.py new file mode 100644 index 00000000..d5396330 --- /dev/null +++ b/qai_hub_models/models/fastsam_s/demo.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.fastsam.demo import fastsam_demo +from qai_hub_models.models.fastsam_s.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + FastSAM_S, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +INPUT_IMAGE = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "image_640.jpg" +) + + +def main(is_test: bool = False): + fastsam_demo(FastSAM_S, image_path=INPUT_IMAGE, is_test=is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/fastsam_s/export.py b/qai_hub_models/models/fastsam_s/export.py new file mode 100644 index 00000000..4aa0ff25 --- /dev/null +++ b/qai_hub_models/models/fastsam_s/export.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.fastsam_s import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "fastsam_s" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "fastsam_s", + "FastSam-S", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace( + model, make_torch_inputs(input_spec), check_trace=False + ) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_1,output_2,output_3,output_5", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_1,output_2,output_3,output_5", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/fastsam_s/info.yaml b/qai_hub_models/models/fastsam_s/info.yaml new file mode 100644 index 00000000..f2a59ae2 --- /dev/null +++ b/qai_hub_models/models/fastsam_s/info.yaml @@ -0,0 +1,36 @@ +name: FastSam-S +# id must match with the model dir name in qai_hub_models +id: fastsam_s +status: public +headline: Generate high quality segmentation mask on device. +domain: Computer Vision +description: The Fast Segment Anything Model (FastSAM) is a novel, real-time CNN-based + solution for the Segment Anything task. This task is designed to segment any object + within an image based on various possible user interaction prompts. The model performs + competitively despite significantly reduced computation, making it a practical choice + for a variety of vision tasks. +use_case: Semantic Segmentation +tags: [] +research_paper: https://arxiv.org/abs/2306.12156 +research_paper_title: Fast Segment Anything +license: https://github.com/CASIA-IVA-Lab/FastSAM/blob/main/LICENSE +source_repo: https://github.com/CASIA-IVA-Lab/FastSAM +technical_details: + Model checkpoint: fastsam-s.pt + Inference latency: RealTime + Input resolution: 640x640 + Number of parameters: 11.8M + Model size: 45.1 MB +applicable_scenarios: + - Camera + - Photo Editing +related_models: + - sam + - fastsam_x +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: agpl-3.0 +dataset: [] diff --git a/qai_hub_models/models/fastsam_s/model.py b/qai_hub_models/models/fastsam_s/model.py new file mode 100644 index 00000000..5f091b50 --- /dev/null +++ b/qai_hub_models/models/fastsam_s/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.fastsam.model import Fast_SAM + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "FastSAM-s.pt" +MODEL_ASSET_VERSION = 1 + + +class FastSAM_S(Fast_SAM): + """Exportable FastSAM model, end-to-end.""" + + @classmethod + def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): + return Fast_SAM.from_pretrained.__func__(FastSAM_S, ckpt_name) diff --git a/qai_hub_models/models/fastsam_s/perf.yaml b/qai_hub_models/models/fastsam_s/perf.yaml new file mode 100644 index 00000000..ca26141a --- /dev/null +++ b/qai_hub_models/models/fastsam_s/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: FastSam-S + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 13071.0 + throughput: 76.50524060898171 + estimated_peak_memory_range: + min: 7827456 + max: 10814968 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 288 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 288 + job_id: jn5qlr97p + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:18:24.085348Z' diff --git a/qai_hub_models/models/fastsam_s/requirements.txt b/qai_hub_models/models/fastsam_s/requirements.txt new file mode 100644 index 00000000..8d55bfa4 --- /dev/null +++ b/qai_hub_models/models/fastsam_s/requirements.txt @@ -0,0 +1,2 @@ +ultralytics==8.0.193 +torchvision diff --git a/qai_hub_models/models/fastsam_s/test.py b/qai_hub_models/models/fastsam_s/test.py new file mode 100644 index 00000000..d1465d4e --- /dev/null +++ b/qai_hub_models/models/fastsam_s/test.py @@ -0,0 +1,34 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +from PIL import Image +from ultralytics.models.fastsam import FastSAM, FastSAMPrompt + +from qai_hub_models.models._shared.fastsam.app import FastSAMApp +from qai_hub_models.models.fastsam_s.demo import INPUT_IMAGE +from qai_hub_models.models.fastsam_s.demo import main as demo_main +from qai_hub_models.models.fastsam_s.model import DEFAULT_WEIGHTS, FastSAM_S +from qai_hub_models.utils.image_processing import preprocess_PIL_image + + +def test_task(): + image_path = INPUT_IMAGE.fetch() + image = Image.open(image_path) + image = preprocess_PIL_image(image) + app = FastSAMApp(FastSAM_S.from_pretrained()) + result, _ = app.segment_image(str(image_path)) + + model = FastSAM(DEFAULT_WEIGHTS) + everything_results = model( + image_path, device="cpu", retina_masks=True, imgsz=640, conf=0.4, iou=0.9 + ) + prompt = FastSAMPrompt(image_path, everything_results, device="cpu") + predictions = prompt.everything_prompt() + + assert np.allclose(result[0].masks.data, predictions[0].masks.data) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/fastsam_x/README.md b/qai_hub_models/models/fastsam_x/README.md new file mode 100644 index 00000000..7b42289b --- /dev/null +++ b/qai_hub_models/models/fastsam_x/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [FastSam-X: Generate high quality segmentation mask on device](https://aihub.qualcomm.com/models/fastsam_x) + +The Fast Segment Anything Model (FastSAM) is a novel, real-time CNN-based solution for the Segment Anything task. This task is designed to segment any object within an image based on various possible user interaction prompts. The model performs competitively despite significantly reduced computation, making it a practical choice for a variety of vision tasks. + +This is based on the implementation of FastSam-X found +[here](https://github.com/CASIA-IVA-Lab/FastSAM). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/fastsam_x). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[fastsam_x]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.fastsam_x.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.fastsam_x.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of FastSam-X can be found + [here](https://github.com/CASIA-IVA-Lab/FastSAM/blob/main/LICENSE). + + +## References +* [Fast Segment Anything](https://arxiv.org/abs/2306.12156) +* [Source Model Implementation](https://github.com/CASIA-IVA-Lab/FastSAM) diff --git a/qai_hub_models/models/fastsam_x/__init__.py b/qai_hub_models/models/fastsam_x/__init__.py new file mode 100644 index 00000000..aaa46d23 --- /dev/null +++ b/qai_hub_models/models/fastsam_x/__init__.py @@ -0,0 +1,8 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.fastsam.app import FastSAMApp as App # noqa: F401 + +from .model import MODEL_ID # noqa: F401 +from .model import FastSAM_X as Model # noqa: F401 diff --git a/qai_hub_models/models/fastsam_x/demo.py b/qai_hub_models/models/fastsam_x/demo.py new file mode 100644 index 00000000..0bce5d27 --- /dev/null +++ b/qai_hub_models/models/fastsam_x/demo.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.fastsam.demo import fastsam_demo +from qai_hub_models.models.fastsam_x.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + FastSAM_X, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +INPUT_IMAGE = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "image_640.jpg" +) + + +def main(is_test: bool = False): + fastsam_demo(FastSAM_X, image_path=INPUT_IMAGE, is_test=is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/fastsam_x/export.py b/qai_hub_models/models/fastsam_x/export.py new file mode 100644 index 00000000..036d7cbc --- /dev/null +++ b/qai_hub_models/models/fastsam_x/export.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.fastsam_x import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "fastsam_x" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "fastsam_x", + "FastSam-X", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace( + model, make_torch_inputs(input_spec), check_trace=False + ) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_1,output_2,output_3,output_5", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_1,output_2,output_3,output_5", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/fastsam_x/info.yaml b/qai_hub_models/models/fastsam_x/info.yaml new file mode 100644 index 00000000..a39f4a47 --- /dev/null +++ b/qai_hub_models/models/fastsam_x/info.yaml @@ -0,0 +1,36 @@ +name: FastSam-X +# id must match with the model dir name in qai_hub_models +id: fastsam_x +status: public +headline: Generate high quality segmentation mask on device. +domain: Computer Vision +description: The Fast Segment Anything Model (FastSAM) is a novel, real-time CNN-based + solution for the Segment Anything task. This task is designed to segment any object + within an image based on various possible user interaction prompts. The model performs + competitively despite significantly reduced computation, making it a practical choice + for a variety of vision tasks. +use_case: Semantic Segmentation +tags: [] +research_paper: https://arxiv.org/abs/2306.12156 +research_paper_title: Fast Segment Anything +license: https://github.com/CASIA-IVA-Lab/FastSAM/blob/main/LICENSE +source_repo: https://github.com/CASIA-IVA-Lab/FastSAM +technical_details: + Model checkpoint: fastsam-x.pt + Inference latency: RealTime + Input resolution: 640x640 + Number of parameters: 72.2M + Model size: 276 MB +applicable_scenarios: + - Camera + - Photo Editing +related_models: + - sam + - fastsam_s +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: agpl-3.0 +dataset: [] diff --git a/qai_hub_models/models/fastsam_x/model.py b/qai_hub_models/models/fastsam_x/model.py new file mode 100644 index 00000000..1116888d --- /dev/null +++ b/qai_hub_models/models/fastsam_x/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.fastsam.model import Fast_SAM + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "FastSAM-x.pt" +MODEL_ASSET_VERSION = 1 + + +class FastSAM_X(Fast_SAM): + """Exportable FastSAM model, end-to-end.""" + + @classmethod + def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): + return Fast_SAM.from_pretrained.__func__(FastSAM_X, ckpt_name) diff --git a/qai_hub_models/models/fastsam_x/perf.yaml b/qai_hub_models/models/fastsam_x/perf.yaml new file mode 100644 index 00000000..3fa52d1d --- /dev/null +++ b/qai_hub_models/models/fastsam_x/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: FastSam-X + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 64468.0 + throughput: 15.511571632437798 + estimated_peak_memory_range: + min: 9224192 + max: 14449200 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 420 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 420 + job_id: jz5wl3xzp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:38:35.191434Z' diff --git a/qai_hub_models/models/fastsam_x/requirements.txt b/qai_hub_models/models/fastsam_x/requirements.txt new file mode 100644 index 00000000..8d55bfa4 --- /dev/null +++ b/qai_hub_models/models/fastsam_x/requirements.txt @@ -0,0 +1,2 @@ +ultralytics==8.0.193 +torchvision diff --git a/qai_hub_models/models/fastsam_x/test.py b/qai_hub_models/models/fastsam_x/test.py new file mode 100644 index 00000000..90a58009 --- /dev/null +++ b/qai_hub_models/models/fastsam_x/test.py @@ -0,0 +1,34 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +from PIL import Image +from ultralytics.models.fastsam import FastSAM, FastSAMPrompt + +from qai_hub_models.models._shared.fastsam.app import FastSAMApp +from qai_hub_models.models.fastsam_x.demo import INPUT_IMAGE +from qai_hub_models.models.fastsam_x.demo import main as demo_main +from qai_hub_models.models.fastsam_x.model import DEFAULT_WEIGHTS, FastSAM_X +from qai_hub_models.utils.image_processing import preprocess_PIL_image + + +def test_task(): + image_path = INPUT_IMAGE.fetch() + image = Image.open(image_path) + image = preprocess_PIL_image(image) + app = FastSAMApp(FastSAM_X.from_pretrained()) + result, _ = app.segment_image(str(image_path)) + + model = FastSAM(DEFAULT_WEIGHTS) + everything_results = model( + image_path, device="cpu", retina_masks=True, imgsz=640, conf=0.4, iou=0.9 + ) + prompt = FastSAMPrompt(image_path, everything_results, device="cpu") + predictions = prompt.everything_prompt() + + assert np.allclose(result[0].masks.data, predictions[0].masks.data) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/fcn_resnet50/README.md b/qai_hub_models/models/fcn_resnet50/README.md new file mode 100644 index 00000000..3763ff51 --- /dev/null +++ b/qai_hub_models/models/fcn_resnet50/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [FCN_ResNet50: Fully-convolutional network model for image segmentation](https://aihub.qualcomm.com/models/fcn_resnet50) + +FCN_ResNet50 is a machine learning model that can segment images from the COCO dataset. It uses ResNet50 as a backbone. + +This is based on the implementation of FCN_ResNet50 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/segmentation/fcn.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/fcn_resnet50). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.fcn_resnet50.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.fcn_resnet50.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of FCN_ResNet50 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Fully Convolutional Networks for Semantic Segmentation](https://arxiv.org/abs/1411.4038) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/segmentation/fcn.py) diff --git a/qai_hub_models/models/fcn_resnet50/__init__.py b/qai_hub_models/models/fcn_resnet50/__init__.py new file mode 100644 index 00000000..c1ad741f --- /dev/null +++ b/qai_hub_models/models/fcn_resnet50/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import FCN_ResNet50App as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import FCN_ResNet50 as Model # noqa: F401 diff --git a/qai_hub_models/models/fcn_resnet50/app.py b/qai_hub_models/models/fcn_resnet50/app.py new file mode 100644 index 00000000..f00519d8 --- /dev/null +++ b/qai_hub_models/models/fcn_resnet50/app.py @@ -0,0 +1,86 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from collections import OrderedDict +from typing import Callable + +import numpy as np +import PIL +import torch +from PIL.Image import Image +from torchvision import transforms + +from qai_hub_models.models.fcn_resnet50.model import NUM_CLASSES +from qai_hub_models.utils.draw import create_color_map +from qai_hub_models.utils.image_processing import normalize_image_transform + + +def preprocess_image(image: Image) -> torch.Tensor: + """ + Preprocesses images to be run through torch FCN segmenter + as prescribed here: + https://pytorch.org/hub/pytorch_vision_resnet/ + + Parameters: + image: Input image to be run through the classifier model. + + Returns: + torch tensor to be directly passed to the model. + """ + transform = transforms.Compose( + [ + transforms.ToTensor(), + normalize_image_transform(), + ] + ) + out_tensor: torch.Tensor = transform(image) # type: ignore + return out_tensor.unsqueeze(0) + + +class FCN_ResNet50App: + """ + This class consists of light-weight "app code" that is required to + perform end to end inference with FCN_ResNet50. + + For a given image input, the app will: + * Pre-process the image (normalize) + * Run image segmentation + * Convert the raw output into probabilities using softmax + """ + + def __init__(self, model: Callable[[torch.Tensor], OrderedDict]): + self.model = model + + def predict(self, image: Image, raw_output: bool = False) -> Image | np.ndarray: + """ + From the provided image or tensor, segment the image + + Parameters: + image: A PIL Image in RGB format. + + Returns: + If raw_output is true, returns: + masks: np.ndarray + A list of predicted masks. + + Otherwise, returns: + segmented_images: List[PIL.Image] + Images with segmentation map overlaid with an alpha of 0.5. + """ + + input_tensor = preprocess_image(image) + with torch.no_grad(): + output = self.model(input_tensor) + output = output[0] + predictions = output.argmax(0).byte().cpu().numpy() + + if raw_output: + return predictions + + color_map = create_color_map(NUM_CLASSES) + out = PIL.Image.blend(image, PIL.Image.fromarray(color_map[predictions]), 0.5) + + return out diff --git a/qai_hub_models/models/fcn_resnet50/demo.py b/qai_hub_models/models/fcn_resnet50/demo.py new file mode 100644 index 00000000..bf5ac16c --- /dev/null +++ b/qai_hub_models/models/fcn_resnet50/demo.py @@ -0,0 +1,62 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.fcn_resnet50.app import FCN_ResNet50App +from qai_hub_models.models.fcn_resnet50.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + FCN_ResNet50, +) +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.display import display_or_save_image +from qai_hub_models.utils.image_processing import pil_resize_pad, pil_undo_resize_pad + +# Demo image comes from https://github.com/pytorch/hub/raw/master/images/deeplab1.png +# and has had alpha channel removed for use as input +INPUT_IMAGE_LOCAL_PATH = "fcn_demo.png" +INPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, INPUT_IMAGE_LOCAL_PATH +) + + +def main(is_test: bool = False): + # Demo parameters + parser = get_model_cli_parser(FCN_ResNet50) + parser = get_on_device_demo_parser(parser, add_output_dir=True) + parser.add_argument( + "--image", + type=str, + default=INPUT_IMAGE_ADDRESS, + help="image file path or URL.", + ) + + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, FCN_ResNet50.get_model_id()) + model = demo_model_from_cli_args(FCN_ResNet50, args) + + # This FCN ResNet 50 demo comes from + # https://pytorch.org/hub/pytorch_vision_fcn_resnet101/ + # load image + (_, _, height, width) = FCN_ResNet50.get_input_spec()["image"][0] + orig_image = load_image(args.image) + image, _, padding = pil_resize_pad(orig_image, (height, width)) + input_image = image.convert("RGB") + + app = FCN_ResNet50App(model) + output = app.predict(input_image, False) + + if not is_test: + # Resize / unpad annotated image + image_annotated = pil_undo_resize_pad(output, orig_image.size, padding) + display_or_save_image(image_annotated, args.output_dir, "fcn_demo_output.png") + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/fcn_resnet50/export.py b/qai_hub_models/models/fcn_resnet50/export.py new file mode 100644 index 00000000..d8d19f76 --- /dev/null +++ b/qai_hub_models/models/fcn_resnet50/export.py @@ -0,0 +1,197 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.fcn_resnet50 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "fcn_resnet50" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "fcn_resnet50", + "FCN_ResNet50", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/fcn_resnet50/info.yaml b/qai_hub_models/models/fcn_resnet50/info.yaml new file mode 100644 index 00000000..074affa6 --- /dev/null +++ b/qai_hub_models/models/fcn_resnet50/info.yaml @@ -0,0 +1,36 @@ +name: FCN_ResNet50 +# id must match with the model dir name in qai_hub_models +id: fcn_resnet50 +status: public +headline: Fully-convolutional network model for image segmentation. +domain: Computer Vision +use_case: Semantic Segmentation +description: FCN_ResNet50 is a machine learning model that can segment images from + the COCO dataset. It uses ResNet50 as a backbone. +tags: [] +research_paper: https://arxiv.org/abs/1411.4038 +research_paper_title: Fully Convolutional Networks for Semantic Segmentation +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: + https://github.com/pytorch/vision/blob/main/torchvision/models/segmentation/fcn.py +technical_details: + Model checkpoint: COCO_WITH_VOC_LABELS_V1 + Input resolution: 224x224 + Number of parameters: 32.9M + Model size: 126 MB +applicable_scenarios: + - Anomaly Detection + - Inventory Management +related_models: + - sam + - unet_segmentation + - ddrnet23_slim +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: no +license_type: bsd-3-clause +dataset: [] diff --git a/qai_hub_models/models/fcn_resnet50/model.py b/qai_hub_models/models/fcn_resnet50/model.py new file mode 100644 index 00000000..156e63ec --- /dev/null +++ b/qai_hub_models/models/fcn_resnet50/model.py @@ -0,0 +1,59 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +import torchvision.models as tv_models + +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_WEIGHTS = "COCO_WITH_VOC_LABELS_V1" +NUM_CLASSES = 21 + + +class FCN_ResNet50(BaseModel): + """Exportable FCNresNet50 image segmentation applications, end-to-end.""" + + def __init__( + self, + fcn_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = fcn_model + + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> FCN_ResNet50: + model = tv_models.segmentation.fcn_resnet50(weights=weights).eval() + return cls(model) + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run FCN_ResNet50 on `image`, and produce a tensor of classes for segmentation + + Parameters: + image: Pixel values pre-processed for model consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + tensor: 1x21xHxW tensor of class logits per pixel + """ + return self.model(image)["out"] + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 224, + width: int = 224, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} diff --git a/qai_hub_models/models/fcn_resnet50/perf.yaml b/qai_hub_models/models/fcn_resnet50/perf.yaml new file mode 100644 index 00000000..7c674dea --- /dev/null +++ b/qai_hub_models/models/fcn_resnet50/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: FCN_ResNet50 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 8563.0 + throughput: 116.78150181011328 + estimated_peak_memory_range: + min: 4263936 + max: 11057224 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 84 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 84 + job_id: joprl21vp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 7864.0 + throughput: 127.1617497456765 + estimated_peak_memory_range: + min: 20480 + max: 13081680 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 126 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 126 + job_id: jep2r93xg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:21:53.248417Z' diff --git a/qai_hub_models/models/fcn_resnet50/test.py b/qai_hub_models/models/fcn_resnet50/test.py new file mode 100644 index 00000000..7c30198c --- /dev/null +++ b/qai_hub_models/models/fcn_resnet50/test.py @@ -0,0 +1,48 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models.fcn_resnet50.app import FCN_ResNet50App +from qai_hub_models.models.fcn_resnet50.demo import INPUT_IMAGE_ADDRESS +from qai_hub_models.models.fcn_resnet50.demo import main as demo_main +from qai_hub_models.models.fcn_resnet50.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + FCN_ResNet50, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_LOCAL_PATH = "fcn_demo_output.png" +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, OUTPUT_IMAGE_LOCAL_PATH +) + + +def _test_impl(app: FCN_ResNet50App): + image = load_image(INPUT_IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + app_output_image = app.predict(image, False) + + np.testing.assert_allclose( + np.asarray(app_output_image, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_task(): + _test_impl(FCN_ResNet50App(FCN_ResNet50.from_pretrained())) + + +@skip_clone_repo_check +def test_trace(): + _test_impl(FCN_ResNet50App(FCN_ResNet50.from_pretrained().convert_to_torchscript())) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/ffnet_122ns_lowres/README.md b/qai_hub_models/models/ffnet_122ns_lowres/README.md new file mode 100644 index 00000000..348254c4 --- /dev/null +++ b/qai_hub_models/models/ffnet_122ns_lowres/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [FFNet-122NS-LowRes: Semantic segmentation for automotive street scenes](https://aihub.qualcomm.com/models/ffnet_122ns_lowres) + +FFNet-122NS-LowRes is a "fuss-free network" that segments street scene images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the Cityscapes dataset. + +This is based on the implementation of FFNet-122NS-LowRes found +[here](https://github.com/Qualcomm-AI-research/FFNet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/ffnet_122ns_lowres). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[ffnet_122ns_lowres]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.ffnet_122ns_lowres.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.ffnet_122ns_lowres.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of FFNet-122NS-LowRes can be found + [here](https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE). + + +## References +* [Simple and Efficient Architectures for Semantic Segmentation](https://arxiv.org/abs/2206.08236) +* [Source Model Implementation](https://github.com/Qualcomm-AI-research/FFNet) diff --git a/qai_hub_models/models/ffnet_122ns_lowres/__init__.py b/qai_hub_models/models/ffnet_122ns_lowres/__init__.py new file mode 100644 index 00000000..78d0c2c2 --- /dev/null +++ b/qai_hub_models/models/ffnet_122ns_lowres/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 + CityscapesSegmentationApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import FFNet122NSLowRes as Model # noqa: F401 diff --git a/qai_hub_models/models/ffnet_122ns_lowres/demo.py b/qai_hub_models/models/ffnet_122ns_lowres/demo.py new file mode 100644 index 00000000..e7ece0e6 --- /dev/null +++ b/qai_hub_models/models/ffnet_122ns_lowres/demo.py @@ -0,0 +1,16 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( + cityscapes_segmentation_demo, +) +from qai_hub_models.models.ffnet_122ns_lowres.model import MODEL_ID, FFNet122NSLowRes + + +def main(is_test: bool = False): + cityscapes_segmentation_demo(FFNet122NSLowRes, MODEL_ID, is_test=is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_122ns_lowres/export.py b/qai_hub_models/models/ffnet_122ns_lowres/export.py new file mode 100644 index 00000000..f6e6ae0d --- /dev/null +++ b/qai_hub_models/models/ffnet_122ns_lowres/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.ffnet_122ns_lowres import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "ffnet_122ns_lowres" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "ffnet_122ns_lowres", + "FFNet-122NS-LowRes", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_122ns_lowres/info.yaml b/qai_hub_models/models/ffnet_122ns_lowres/info.yaml new file mode 100644 index 00000000..10198c19 --- /dev/null +++ b/qai_hub_models/models/ffnet_122ns_lowres/info.yaml @@ -0,0 +1,36 @@ +name: FFNet-122NS-LowRes +# id must match with the model dir name in qai_hub_models +id: ffnet_122ns_lowres +status: public +headline: Semantic segmentation for automotive street scenes. +domain: Computer Vision +description: FFNet-122NS-LowRes is a "fuss-free network" that segments street scene + images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the + Cityscapes dataset. +use_case: Semantic Segmentation +tags: [] +research_paper: https://arxiv.org/abs/2206.08236 +research_paper_title: Simple and Efficient Architectures for Semantic Segmentation +license: https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE +source_repo: https://github.com/Qualcomm-AI-research/FFNet +technical_details: + Model checkpoint: ffnet122NS_CCC_cityscapes_state_dict_quarts_pre_down + Input resolution: 1024x512 + Number of parameters: 32.1M + Model size: 123 MB +applicable_scenarios: + - Automotive + - Autonomous Driving + - Camera +related_models: + - ffnet_78s_lowres + - ffnet_54s + - unet_segmentation +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - cityscapes diff --git a/qai_hub_models/models/ffnet_122ns_lowres/model.py b/qai_hub_models/models/ffnet_122ns_lowres/model.py new file mode 100644 index 00000000..28908df0 --- /dev/null +++ b/qai_hub_models/models/ffnet_122ns_lowres/model.py @@ -0,0 +1,17 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.ffnet.model import FFNetLowRes + +MODEL_ID = __name__.split(".")[-2] + + +class FFNet122NSLowRes(FFNetLowRes): + @classmethod + def from_pretrained(cls) -> FFNet122NSLowRes: + return FFNetLowRes.from_pretrained.__func__( + cls, "segmentation_ffnet122NS_CCC_mobile_pre_down" + ) diff --git a/qai_hub_models/models/ffnet_122ns_lowres/perf.yaml b/qai_hub_models/models/ffnet_122ns_lowres/perf.yaml new file mode 100644 index 00000000..f41f23a1 --- /dev/null +++ b/qai_hub_models/models/ffnet_122ns_lowres/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: FFNet-122NS-LowRes + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 10460.0 + throughput: 95.60229445506693 + estimated_peak_memory_range: + min: 643072 + max: 2912400 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 216 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 216 + job_id: jqpyojnr5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 10778.0 + throughput: 92.78159213212099 + estimated_peak_memory_range: + min: 6332416 + max: 39442976 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 349 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 349 + job_id: j2p0m2k2g + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:22:01.714758Z' diff --git a/qai_hub_models/models/ffnet_122ns_lowres/requirements.txt b/qai_hub_models/models/ffnet_122ns_lowres/requirements.txt new file mode 100644 index 00000000..73ad8aa8 --- /dev/null +++ b/qai_hub_models/models/ffnet_122ns_lowres/requirements.txt @@ -0,0 +1 @@ +scikit-image>=0.21.0 diff --git a/qai_hub_models/models/ffnet_122ns_lowres/test.py b/qai_hub_models/models/ffnet_122ns_lowres/test.py new file mode 100644 index 00000000..c04b4f07 --- /dev/null +++ b/qai_hub_models/models/ffnet_122ns_lowres/test.py @@ -0,0 +1,20 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.ffnet.test_utils import run_test_off_target_numerical +from qai_hub_models.models.ffnet_122ns_lowres.demo import main as demo_main +from qai_hub_models.models.ffnet_122ns_lowres.model import FFNet122NSLowRes +from qai_hub_models.utils.testing import skip_clone_repo_check + + +@skip_clone_repo_check +def test_off_target_numerical(): + run_test_off_target_numerical( + FFNet122NSLowRes, "segmentation_ffnet122NS_CCC_mobile_pre_down" + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/ffnet_40s/README.md b/qai_hub_models/models/ffnet_40s/README.md new file mode 100644 index 00000000..33ac664e --- /dev/null +++ b/qai_hub_models/models/ffnet_40s/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [FFNet-40S: Semantic segmentation for automotive street scenes](https://aihub.qualcomm.com/models/ffnet_40s) + +FFNet-40S is a "fuss-free network" that segments street scene images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the Cityscapes dataset. + +This is based on the implementation of FFNet-40S found +[here](https://github.com/Qualcomm-AI-research/FFNet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/ffnet_40s). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[ffnet_40s]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.ffnet_40s.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.ffnet_40s.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of FFNet-40S can be found + [here](https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE). + + +## References +* [Simple and Efficient Architectures for Semantic Segmentation](https://arxiv.org/abs/2206.08236) +* [Source Model Implementation](https://github.com/Qualcomm-AI-research/FFNet) diff --git a/qai_hub_models/models/ffnet_40s/__init__.py b/qai_hub_models/models/ffnet_40s/__init__.py new file mode 100644 index 00000000..7e83392b --- /dev/null +++ b/qai_hub_models/models/ffnet_40s/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 + CityscapesSegmentationApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import FFNet40S as Model # noqa: F401 diff --git a/qai_hub_models/models/ffnet_40s/demo.py b/qai_hub_models/models/ffnet_40s/demo.py new file mode 100644 index 00000000..9578f539 --- /dev/null +++ b/qai_hub_models/models/ffnet_40s/demo.py @@ -0,0 +1,16 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( + cityscapes_segmentation_demo, +) +from qai_hub_models.models.ffnet_40s.model import MODEL_ID, FFNet40S + + +def main(is_test: bool = False): + cityscapes_segmentation_demo(FFNet40S, MODEL_ID, is_test=is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_40s/export.py b/qai_hub_models/models/ffnet_40s/export.py new file mode 100644 index 00000000..22821d0e --- /dev/null +++ b/qai_hub_models/models/ffnet_40s/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.ffnet_40s import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "ffnet_40s" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "ffnet_40s", + "FFNet-40S", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_40s/info.yaml b/qai_hub_models/models/ffnet_40s/info.yaml new file mode 100644 index 00000000..94830c01 --- /dev/null +++ b/qai_hub_models/models/ffnet_40s/info.yaml @@ -0,0 +1,37 @@ +name: FFNet-40S +# id must match with the model dir name in qai_hub_models +id: ffnet_40s +status: public +headline: Semantic segmentation for automotive street scenes. +domain: Computer Vision +description: FFNet-40S is a "fuss-free network" that segments street scene images + with per-pixel classes like road, sidewalk, and pedestrian. Trained on the Cityscapes + dataset. +use_case: Semantic Segmentation +tags: + - real-time +research_paper: https://arxiv.org/abs/2206.08236 +research_paper_title: Simple and Efficient Architectures for Semantic Segmentation +license: https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE +source_repo: https://github.com/Qualcomm-AI-research/FFNet +technical_details: + Model checkpoint: ffnet40S_dBBB_cityscapes_state_dict_quarts + Input resolution: 2048x1024 + Number of parameters: 13.9M + Model size: 53.1 MB +applicable_scenarios: + - Automotive + - Autonomous Driving + - Camera +related_models: + - ffnet_54s + - ffnet_78s + - deeplabv3_plus_mobilenet +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - cityscapes diff --git a/qai_hub_models/models/ffnet_40s/model.py b/qai_hub_models/models/ffnet_40s/model.py new file mode 100644 index 00000000..29efab4d --- /dev/null +++ b/qai_hub_models/models/ffnet_40s/model.py @@ -0,0 +1,15 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.ffnet.model import FFNet + +MODEL_ID = __name__.split(".")[-2] + + +class FFNet40S(FFNet): + @classmethod + def from_pretrained(cls) -> FFNet40S: + return FFNet.from_pretrained.__func__(cls, "segmentation_ffnet40S_dBBB_mobile") diff --git a/qai_hub_models/models/ffnet_40s/perf.yaml b/qai_hub_models/models/ffnet_40s/perf.yaml new file mode 100644 index 00000000..cd79f677 --- /dev/null +++ b/qai_hub_models/models/ffnet_40s/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: FFNet-40S + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 22739.0 + throughput: 43.97730770922204 + estimated_peak_memory_range: + min: 2564096 + max: 5001048 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 92 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 92 + job_id: jegnzm9vg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 17313.0 + throughput: 57.760064691272454 + estimated_peak_memory_range: + min: 25202688 + max: 51306904 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 141 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 141 + job_id: jep2r97xg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:14:26.648274Z' diff --git a/qai_hub_models/models/ffnet_40s/requirements.txt b/qai_hub_models/models/ffnet_40s/requirements.txt new file mode 100644 index 00000000..73ad8aa8 --- /dev/null +++ b/qai_hub_models/models/ffnet_40s/requirements.txt @@ -0,0 +1 @@ +scikit-image>=0.21.0 diff --git a/qai_hub_models/models/ffnet_40s/test.py b/qai_hub_models/models/ffnet_40s/test.py new file mode 100644 index 00000000..9a9cf4e8 --- /dev/null +++ b/qai_hub_models/models/ffnet_40s/test.py @@ -0,0 +1,18 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.ffnet.test_utils import run_test_off_target_numerical +from qai_hub_models.models.ffnet_40s.demo import main as demo_main +from qai_hub_models.models.ffnet_40s.model import FFNet40S +from qai_hub_models.utils.testing import skip_clone_repo_check + + +@skip_clone_repo_check +def test_off_target_numerical(): + run_test_off_target_numerical(FFNet40S, "segmentation_ffnet40S_dBBB_mobile") + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/ffnet_40s_quantized/README.md b/qai_hub_models/models/ffnet_40s_quantized/README.md new file mode 100644 index 00000000..abb8c6d4 --- /dev/null +++ b/qai_hub_models/models/ffnet_40s_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [FFNet-40S-Quantized: Semantic segmentation for automotive street scenes](https://aihub.qualcomm.com/models/ffnet_40s_quantized) + +FFNet-40S-Quantized is a "fuss-free network" that segments street scene images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the Cityscapes dataset. + +This is based on the implementation of FFNet-40S-Quantized found +[here](https://github.com/Qualcomm-AI-research/FFNet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/ffnet_40s_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.ffnet_40s_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.ffnet_40s_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of FFNet-40S-Quantized can be found + [here](https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE). + + +## References +* [Simple and Efficient Architectures for Semantic Segmentation](https://arxiv.org/abs/2206.08236) +* [Source Model Implementation](https://github.com/Qualcomm-AI-research/FFNet) diff --git a/qai_hub_models/models/ffnet_40s_quantized/__init__.py b/qai_hub_models/models/ffnet_40s_quantized/__init__.py new file mode 100644 index 00000000..bfc01cb5 --- /dev/null +++ b/qai_hub_models/models/ffnet_40s_quantized/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 + CityscapesSegmentationApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import FFNet40SQuantizable as Model # noqa: F401 diff --git a/qai_hub_models/models/ffnet_40s_quantized/demo.py b/qai_hub_models/models/ffnet_40s_quantized/demo.py new file mode 100644 index 00000000..615c9643 --- /dev/null +++ b/qai_hub_models/models/ffnet_40s_quantized/demo.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( + cityscapes_segmentation_demo, +) +from qai_hub_models.models.ffnet_40s_quantized.model import ( + MODEL_ID, + FFNet40SQuantizable, +) + + +def main(is_test: bool = False): + cityscapes_segmentation_demo(FFNet40SQuantizable, MODEL_ID, is_test=is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_40s_quantized/export.py b/qai_hub_models/models/ffnet_40s_quantized/export.py new file mode 100644 index 00000000..ff0c2abc --- /dev/null +++ b/qai_hub_models/models/ffnet_40s_quantized/export.py @@ -0,0 +1,204 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.ffnet_40s_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "ffnet_40s_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "ffnet_40s_quantized", + "FFNet-40S-Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_40s_quantized/info.yaml b/qai_hub_models/models/ffnet_40s_quantized/info.yaml new file mode 100644 index 00000000..bf5bfbe3 --- /dev/null +++ b/qai_hub_models/models/ffnet_40s_quantized/info.yaml @@ -0,0 +1,38 @@ +name: FFNet-40S-Quantized +# id must match with the model dir name in qai_hub_models +id: ffnet_40s_quantized +status: public +headline: Semantic segmentation for automotive street scenes. +domain: Computer Vision +description: FFNet-40S-Quantized is a "fuss-free network" that segments street scene + images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the + Cityscapes dataset. +use_case: Semantic Segmentation +tags: + - quantized + - real-time +research_paper: https://arxiv.org/abs/2206.08236 +research_paper_title: Simple and Efficient Architectures for Semantic Segmentation +license: https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE +source_repo: https://github.com/Qualcomm-AI-research/FFNet +technical_details: + Model checkpoint: ffnet40S_dBBB_cityscapes_state_dict_quarts + Input resolution: 2048x1024 + Number of parameters: 13.9M + Model size: 13.5 MB +applicable_scenarios: + - Automotive + - Autonomous Driving + - Camera +related_models: + - ffnet_40s + - ffnet_54s_quantized + - ffnet_78s_quantized +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - cityscapes diff --git a/qai_hub_models/models/ffnet_40s_quantized/model.py b/qai_hub_models/models/ffnet_40s_quantized/model.py new file mode 100644 index 00000000..06e14e7d --- /dev/null +++ b/qai_hub_models/models/ffnet_40s_quantized/model.py @@ -0,0 +1,32 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.ffnet_quantized.model import FFNetQuantizable +from qai_hub_models.models.ffnet_40s.model import FFNet40S +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_ENCODINGS = "encodings.json" + + +class FFNet40SQuantizable(FFNetQuantizable, FFNet40S): + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> FFNet40SQuantizable: + return FFNetQuantizable.from_pretrained.__func__( + cls, + "segmentation_ffnet40S_dBBB_mobile", + aimet_encodings=aimet_encodings, + ) + + @classmethod + def default_aimet_encodings(cls) -> str: + return CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() diff --git a/qai_hub_models/models/ffnet_40s_quantized/perf.yaml b/qai_hub_models/models/ffnet_40s_quantized/perf.yaml new file mode 100644 index 00000000..d0408e2b --- /dev/null +++ b/qai_hub_models/models/ffnet_40s_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: FFNet-40S-Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 6451.0 + throughput: 155.0147263990079 + estimated_peak_memory_range: + min: 851968 + max: 2582296 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 97 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 97 + job_id: j0pxl6x9p + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:15:22.015621Z' diff --git a/qai_hub_models/models/ffnet_40s_quantized/test.py b/qai_hub_models/models/ffnet_40s_quantized/test.py new file mode 100644 index 00000000..12e10323 --- /dev/null +++ b/qai_hub_models/models/ffnet_40s_quantized/test.py @@ -0,0 +1,21 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.ffnet.test_utils import run_test_off_target_numerical +from qai_hub_models.models.ffnet_40s_quantized.demo import main as demo_main +from qai_hub_models.models.ffnet_40s_quantized.model import FFNet40SQuantizable +from qai_hub_models.utils.testing import skip_clone_repo_check + + +@skip_clone_repo_check +def test_off_target_numerical(): + run_test_off_target_numerical( + FFNet40SQuantizable, + "segmentation_ffnet40S_dBBB_mobile", + relax_numerics=True, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/ffnet_54s/README.md b/qai_hub_models/models/ffnet_54s/README.md new file mode 100644 index 00000000..81639336 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [FFNet-54S: Semantic segmentation for automotive street scenes](https://aihub.qualcomm.com/models/ffnet_54s) + +FFNet-54S is a "fuss-free network" that segments street scene images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the Cityscapes dataset. + +This is based on the implementation of FFNet-54S found +[here](https://github.com/Qualcomm-AI-research/FFNet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/ffnet_54s). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[ffnet_54s]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.ffnet_54s.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.ffnet_54s.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of FFNet-54S can be found + [here](https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE). + + +## References +* [Simple and Efficient Architectures for Semantic Segmentation](https://arxiv.org/abs/2206.08236) +* [Source Model Implementation](https://github.com/Qualcomm-AI-research/FFNet) diff --git a/qai_hub_models/models/ffnet_54s/__init__.py b/qai_hub_models/models/ffnet_54s/__init__.py new file mode 100644 index 00000000..b7f34d3e --- /dev/null +++ b/qai_hub_models/models/ffnet_54s/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 + CityscapesSegmentationApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import FFNet54S as Model # noqa: F401 diff --git a/qai_hub_models/models/ffnet_54s/demo.py b/qai_hub_models/models/ffnet_54s/demo.py new file mode 100644 index 00000000..cd9a8038 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s/demo.py @@ -0,0 +1,16 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( + cityscapes_segmentation_demo, +) +from qai_hub_models.models.ffnet_54s.model import MODEL_ID, FFNet54S + + +def main(is_test: bool = False): + cityscapes_segmentation_demo(FFNet54S, MODEL_ID, is_test=is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_54s/export.py b/qai_hub_models/models/ffnet_54s/export.py new file mode 100644 index 00000000..7b9c3524 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.ffnet_54s import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "ffnet_54s" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "ffnet_54s", + "FFNet-54S", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_54s/info.yaml b/qai_hub_models/models/ffnet_54s/info.yaml new file mode 100644 index 00000000..be93ffc8 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s/info.yaml @@ -0,0 +1,36 @@ +name: FFNet-54S +# id must match with the model dir name in qai_hub_models +id: ffnet_54s +status: public +headline: Semantic segmentation for automotive street scenes. +domain: Computer Vision +description: FFNet-54S is a "fuss-free network" that segments street scene images + with per-pixel classes like road, sidewalk, and pedestrian. Trained on the Cityscapes + dataset. +use_case: Semantic Segmentation +tags: [] +research_paper: https://arxiv.org/abs/2206.08236 +research_paper_title: Simple and Efficient Architectures for Semantic Segmentation +license: https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE +source_repo: https://github.com/Qualcomm-AI-research/FFNet +technical_details: + Model checkpoint: ffnet54S_dBBB_cityscapes_state_dict_quarts + Input resolution: 2048x1024 + Number of parameters: 18.0M + Model size: 68.8 MB +applicable_scenarios: + - Automotive + - Autonomous Driving + - Camera +related_models: + - ffnet_40s + - ffnet_78s + - fcn_resnet50 +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - cityscapes diff --git a/qai_hub_models/models/ffnet_54s/model.py b/qai_hub_models/models/ffnet_54s/model.py new file mode 100644 index 00000000..8024d6ed --- /dev/null +++ b/qai_hub_models/models/ffnet_54s/model.py @@ -0,0 +1,15 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.ffnet.model import FFNet + +MODEL_ID = __name__.split(".")[-2] + + +class FFNet54S(FFNet): + @classmethod + def from_pretrained(cls) -> FFNet54S: + return FFNet.from_pretrained.__func__(cls, "segmentation_ffnet54S_dBBB_mobile") diff --git a/qai_hub_models/models/ffnet_54s/perf.yaml b/qai_hub_models/models/ffnet_54s/perf.yaml new file mode 100644 index 00000000..13243d9e --- /dev/null +++ b/qai_hub_models/models/ffnet_54s/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: FFNet-54S + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 25261.0 + throughput: 39.58671469854717 + estimated_peak_memory_range: + min: 2551808 + max: 4912232 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 113 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 113 + job_id: jygzlj8z5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 20585.0 + throughput: 48.57906242409521 + estimated_peak_memory_range: + min: 25206784 + max: 41071808 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 176 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 176 + job_id: jz5wl38zp + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:31:19.360420Z' diff --git a/qai_hub_models/models/ffnet_54s/requirements.txt b/qai_hub_models/models/ffnet_54s/requirements.txt new file mode 100644 index 00000000..73ad8aa8 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s/requirements.txt @@ -0,0 +1 @@ +scikit-image>=0.21.0 diff --git a/qai_hub_models/models/ffnet_54s/test.py b/qai_hub_models/models/ffnet_54s/test.py new file mode 100644 index 00000000..fbb17be4 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s/test.py @@ -0,0 +1,18 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.ffnet.test_utils import run_test_off_target_numerical +from qai_hub_models.models.ffnet_54s.demo import main as demo_main +from qai_hub_models.models.ffnet_54s.model import FFNet54S +from qai_hub_models.utils.testing import skip_clone_repo_check + + +@skip_clone_repo_check +def test_off_target_numerical(): + run_test_off_target_numerical(FFNet54S, "segmentation_ffnet54S_dBBB_mobile") + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/ffnet_54s_quantized/README.md b/qai_hub_models/models/ffnet_54s_quantized/README.md new file mode 100644 index 00000000..d978a2e7 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [FFNet-54S-Quantized: Semantic segmentation for automotive street scenes](https://aihub.qualcomm.com/models/ffnet_54s_quantized) + +FFNet-54S-Quantized is a "fuss-free network" that segments street scene images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the Cityscapes dataset. + +This is based on the implementation of FFNet-54S-Quantized found +[here](https://github.com/Qualcomm-AI-research/FFNet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/ffnet_54s_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.ffnet_54s_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.ffnet_54s_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of FFNet-54S-Quantized can be found + [here](https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE). + + +## References +* [Simple and Efficient Architectures for Semantic Segmentation](https://arxiv.org/abs/2206.08236) +* [Source Model Implementation](https://github.com/Qualcomm-AI-research/FFNet) diff --git a/qai_hub_models/models/ffnet_54s_quantized/__init__.py b/qai_hub_models/models/ffnet_54s_quantized/__init__.py new file mode 100644 index 00000000..3e90e433 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s_quantized/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 + CityscapesSegmentationApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import FFNet54SQuantizable as Model # noqa: F401 diff --git a/qai_hub_models/models/ffnet_54s_quantized/demo.py b/qai_hub_models/models/ffnet_54s_quantized/demo.py new file mode 100644 index 00000000..f6fd96a2 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s_quantized/demo.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( + cityscapes_segmentation_demo, +) +from qai_hub_models.models.ffnet_54s_quantized.model import ( + MODEL_ID, + FFNet54SQuantizable, +) + + +def main(is_test: bool = False): + cityscapes_segmentation_demo(FFNet54SQuantizable, MODEL_ID, is_test=is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_54s_quantized/export.py b/qai_hub_models/models/ffnet_54s_quantized/export.py new file mode 100644 index 00000000..e759e283 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s_quantized/export.py @@ -0,0 +1,204 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.ffnet_54s_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "ffnet_54s_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "ffnet_54s_quantized", + "FFNet-54S-Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_54s_quantized/info.yaml b/qai_hub_models/models/ffnet_54s_quantized/info.yaml new file mode 100644 index 00000000..a97c6bbf --- /dev/null +++ b/qai_hub_models/models/ffnet_54s_quantized/info.yaml @@ -0,0 +1,38 @@ +name: FFNet-54S-Quantized +# id must match with the model dir name in qai_hub_models +id: ffnet_54s_quantized +status: public +headline: Semantic segmentation for automotive street scenes. +domain: Computer Vision +description: FFNet-54S-Quantized is a "fuss-free network" that segments street scene + images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the + Cityscapes dataset. +use_case: Semantic Segmentation +tags: + - quantized + - real-time +research_paper: https://arxiv.org/abs/2206.08236 +research_paper_title: Simple and Efficient Architectures for Semantic Segmentation +license: https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE +source_repo: https://github.com/Qualcomm-AI-research/FFNet +technical_details: + Model checkpoint: ffnet54S_dBBB_cityscapes_state_dict_quarts + Input resolution: 2048x1024 + Number of parameters: 18.0M + Model size: 17.5 MB +applicable_scenarios: + - Automotive + - Autonomous Driving + - Camera +related_models: + - ffnet_54s + - ffnet_40s_quantized + - ffnet_78s_quantized +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - cityscapes diff --git a/qai_hub_models/models/ffnet_54s_quantized/model.py b/qai_hub_models/models/ffnet_54s_quantized/model.py new file mode 100644 index 00000000..f68a37fa --- /dev/null +++ b/qai_hub_models/models/ffnet_54s_quantized/model.py @@ -0,0 +1,30 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.ffnet_quantized.model import FFNetQuantizable +from qai_hub_models.models.ffnet_54s.model import FFNet54S +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_ENCODINGS = "encodings.json" + + +class FFNet54SQuantizable(FFNetQuantizable, FFNet54S): + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> FFNet54SQuantizable: + return FFNetQuantizable.from_pretrained.__func__( + cls, "segmentation_ffnet54S_dBBB_mobile", aimet_encodings=aimet_encodings + ) + + @classmethod + def default_aimet_encodings(cls) -> str: + return CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() diff --git a/qai_hub_models/models/ffnet_54s_quantized/perf.yaml b/qai_hub_models/models/ffnet_54s_quantized/perf.yaml new file mode 100644 index 00000000..4e723a91 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: FFNet-54S-Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 7130.0 + throughput: 140.25245441795232 + estimated_peak_memory_range: + min: 643072 + max: 23970880 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 118 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 118 + job_id: jep2r9wmg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:19:49.268425Z' diff --git a/qai_hub_models/models/ffnet_54s_quantized/test.py b/qai_hub_models/models/ffnet_54s_quantized/test.py new file mode 100644 index 00000000..0b5b9132 --- /dev/null +++ b/qai_hub_models/models/ffnet_54s_quantized/test.py @@ -0,0 +1,21 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.ffnet.test_utils import run_test_off_target_numerical +from qai_hub_models.models.ffnet_54s_quantized.demo import main as demo_main +from qai_hub_models.models.ffnet_54s_quantized.model import FFNet54SQuantizable +from qai_hub_models.utils.testing import skip_clone_repo_check + + +@skip_clone_repo_check +def test_off_target_numerical(): + run_test_off_target_numerical( + FFNet54SQuantizable, + "segmentation_ffnet54S_dBBB_mobile", + relax_numerics=True, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/ffnet_78s/README.md b/qai_hub_models/models/ffnet_78s/README.md new file mode 100644 index 00000000..016fde5d --- /dev/null +++ b/qai_hub_models/models/ffnet_78s/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [FFNet-78S: Semantic segmentation for automotive street scenes](https://aihub.qualcomm.com/models/ffnet_78s) + +FFNet-78S is a "fuss-free network" that segments street scene images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the Cityscapes dataset. + +This is based on the implementation of FFNet-78S found +[here](https://github.com/Qualcomm-AI-research/FFNet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/ffnet_78s). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[ffnet_78s]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.ffnet_78s.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.ffnet_78s.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of FFNet-78S can be found + [here](https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE). + + +## References +* [Simple and Efficient Architectures for Semantic Segmentation](https://arxiv.org/abs/2206.08236) +* [Source Model Implementation](https://github.com/Qualcomm-AI-research/FFNet) diff --git a/qai_hub_models/models/ffnet_78s/__init__.py b/qai_hub_models/models/ffnet_78s/__init__.py new file mode 100644 index 00000000..69488c63 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 + CityscapesSegmentationApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import FFNet78S as Model # noqa: F401 diff --git a/qai_hub_models/models/ffnet_78s/demo.py b/qai_hub_models/models/ffnet_78s/demo.py new file mode 100644 index 00000000..33feec14 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s/demo.py @@ -0,0 +1,16 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( + cityscapes_segmentation_demo, +) +from qai_hub_models.models.ffnet_78s.model import MODEL_ID, FFNet78S + + +def main(is_test: bool = False): + cityscapes_segmentation_demo(FFNet78S, MODEL_ID, is_test=is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_78s/export.py b/qai_hub_models/models/ffnet_78s/export.py new file mode 100644 index 00000000..01c22165 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.ffnet_78s import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "ffnet_78s" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "ffnet_78s", + "FFNet-78S", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_78s/info.yaml b/qai_hub_models/models/ffnet_78s/info.yaml new file mode 100644 index 00000000..56fc5f75 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s/info.yaml @@ -0,0 +1,36 @@ +name: FFNet-78S +# id must match with the model dir name in qai_hub_models +id: ffnet_78s +status: public +headline: Semantic segmentation for automotive street scenes. +domain: Computer Vision +description: FFNet-78S is a "fuss-free network" that segments street scene images + with per-pixel classes like road, sidewalk, and pedestrian. Trained on the Cityscapes + dataset. +use_case: Semantic Segmentation +tags: [] +research_paper: https://arxiv.org/abs/2206.08236 +research_paper_title: Simple and Efficient Architectures for Semantic Segmentation +license: https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE +source_repo: https://github.com/Qualcomm-AI-research/FFNet +technical_details: + Model checkpoint: ffnet78S_dBBB_cityscapes_state_dict_quarts + Input resolution: 2048x1024 + Number of parameters: 27.5M + Model size: 105 MB +applicable_scenarios: + - Automotive + - Autonomous Driving + - Camera +related_models: + - ffnet_40s + - ffnet_54s + - unet_segmentation +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - cityscapes diff --git a/qai_hub_models/models/ffnet_78s/model.py b/qai_hub_models/models/ffnet_78s/model.py new file mode 100644 index 00000000..3d799c04 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s/model.py @@ -0,0 +1,15 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.ffnet.model import FFNet + +MODEL_ID = __name__.split(".")[-2] + + +class FFNet78S(FFNet): + @classmethod + def from_pretrained(cls) -> FFNet78S: + return FFNet.from_pretrained.__func__(cls, "segmentation_ffnet78S_dBBB_mobile") diff --git a/qai_hub_models/models/ffnet_78s/perf.yaml b/qai_hub_models/models/ffnet_78s/perf.yaml new file mode 100644 index 00000000..2db45bd8 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: FFNet-78S + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 29611.0 + throughput: 33.77123366316572 + estimated_peak_memory_range: + min: 2596864 + max: 5429112 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 149 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 149 + job_id: jep2r9emg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 24120.0 + throughput: 41.459369817578775 + estimated_peak_memory_range: + min: 2215936 + max: 32957000 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 236 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 236 + job_id: jqpyojm45 + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:36:14.251855Z' diff --git a/qai_hub_models/models/ffnet_78s/requirements.txt b/qai_hub_models/models/ffnet_78s/requirements.txt new file mode 100644 index 00000000..73ad8aa8 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s/requirements.txt @@ -0,0 +1 @@ +scikit-image>=0.21.0 diff --git a/qai_hub_models/models/ffnet_78s/test.py b/qai_hub_models/models/ffnet_78s/test.py new file mode 100644 index 00000000..4646a449 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s/test.py @@ -0,0 +1,18 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.ffnet.test_utils import run_test_off_target_numerical +from qai_hub_models.models.ffnet_78s.demo import main as demo_main +from qai_hub_models.models.ffnet_78s.model import FFNet78S +from qai_hub_models.utils.testing import skip_clone_repo_check + + +@skip_clone_repo_check +def test_off_target_numerical(): + run_test_off_target_numerical(FFNet78S, "segmentation_ffnet78S_dBBB_mobile") + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/ffnet_78s_lowres/README.md b/qai_hub_models/models/ffnet_78s_lowres/README.md new file mode 100644 index 00000000..5b8d5e2d --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_lowres/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [FFNet-78S-LowRes: Semantic segmentation for automotive street scenes](https://aihub.qualcomm.com/models/ffnet_78s_lowres) + +FFNet-78S-LowRes is a "fuss-free network" that segments street scene images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the Cityscapes dataset. + +This is based on the implementation of FFNet-78S-LowRes found +[here](https://github.com/Qualcomm-AI-research/FFNet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/ffnet_78s_lowres). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[ffnet_78s_lowres]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.ffnet_78s_lowres.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.ffnet_78s_lowres.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of FFNet-78S-LowRes can be found + [here](https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE). + + +## References +* [Simple and Efficient Architectures for Semantic Segmentation](https://arxiv.org/abs/2206.08236) +* [Source Model Implementation](https://github.com/Qualcomm-AI-research/FFNet) diff --git a/qai_hub_models/models/ffnet_78s_lowres/__init__.py b/qai_hub_models/models/ffnet_78s_lowres/__init__.py new file mode 100644 index 00000000..bc064a46 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_lowres/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 + CityscapesSegmentationApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import FFNet78SLowRes as Model # noqa: F401 diff --git a/qai_hub_models/models/ffnet_78s_lowres/demo.py b/qai_hub_models/models/ffnet_78s_lowres/demo.py new file mode 100644 index 00000000..7c1869f2 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_lowres/demo.py @@ -0,0 +1,16 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( + cityscapes_segmentation_demo, +) +from qai_hub_models.models.ffnet_78s_lowres.model import MODEL_ID, FFNet78SLowRes + + +def main(is_test: bool = False): + cityscapes_segmentation_demo(FFNet78SLowRes, MODEL_ID, is_test=is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_78s_lowres/export.py b/qai_hub_models/models/ffnet_78s_lowres/export.py new file mode 100644 index 00000000..78c0431d --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_lowres/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.ffnet_78s_lowres import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "ffnet_78s_lowres" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "ffnet_78s_lowres", + "FFNet-78S-LowRes", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_78s_lowres/info.yaml b/qai_hub_models/models/ffnet_78s_lowres/info.yaml new file mode 100644 index 00000000..9a4a4992 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_lowres/info.yaml @@ -0,0 +1,37 @@ +name: FFNet-78S-LowRes +# id must match with the model dir name in qai_hub_models +id: ffnet_78s_lowres +status: public +headline: Semantic segmentation for automotive street scenes. +domain: Computer Vision +description: FFNet-78S-LowRes is a "fuss-free network" that segments street scene + images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the + Cityscapes dataset. +use_case: Semantic Segmentation +tags: + - real-time +research_paper: https://arxiv.org/abs/2206.08236 +research_paper_title: Simple and Efficient Architectures for Semantic Segmentation +license: https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE +source_repo: https://github.com/Qualcomm-AI-research/FFNet +technical_details: + Model checkpoint: ffnet78S_BCC_cityscapes_state_dict_quarts_pre_down + Input resolution: 1024x512 + Number of parameters: 26.8M + Model size: 102 MB +applicable_scenarios: + - Automotive + - Autonomous Driving + - Camera +related_models: + - ffnet_122ns_lowres + - ffnet_54s + - unet_segmentation +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - cityscapes diff --git a/qai_hub_models/models/ffnet_78s_lowres/model.py b/qai_hub_models/models/ffnet_78s_lowres/model.py new file mode 100644 index 00000000..bf819182 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_lowres/model.py @@ -0,0 +1,17 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.ffnet.model import FFNetLowRes + +MODEL_ID = __name__.split(".")[-2] + + +class FFNet78SLowRes(FFNetLowRes): + @classmethod + def from_pretrained(cls) -> FFNet78SLowRes: + return FFNetLowRes.from_pretrained.__func__( + cls, "segmentation_ffnet78S_BCC_mobile_pre_down" + ) diff --git a/qai_hub_models/models/ffnet_78s_lowres/perf.yaml b/qai_hub_models/models/ffnet_78s_lowres/perf.yaml new file mode 100644 index 00000000..4fceeb4f --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_lowres/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: FFNet-78S-LowRes + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 10833.0 + throughput: 92.31053263177328 + estimated_peak_memory_range: + min: 671744 + max: 3588808 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 149 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 149 + job_id: j0pxl6d9p + job_status: Passed + torchscript_onnx_qnn: + inference_time: 11410.0 + throughput: 87.64241893076249 + estimated_peak_memory_range: + min: 565248 + max: 42397168 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 237 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 237 + job_id: jegnzm7mg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:29:56.988054Z' diff --git a/qai_hub_models/models/ffnet_78s_lowres/requirements.txt b/qai_hub_models/models/ffnet_78s_lowres/requirements.txt new file mode 100644 index 00000000..73ad8aa8 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_lowres/requirements.txt @@ -0,0 +1 @@ +scikit-image>=0.21.0 diff --git a/qai_hub_models/models/ffnet_78s_lowres/test.py b/qai_hub_models/models/ffnet_78s_lowres/test.py new file mode 100644 index 00000000..9933e75d --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_lowres/test.py @@ -0,0 +1,20 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.ffnet.test_utils import run_test_off_target_numerical +from qai_hub_models.models.ffnet_78s_lowres.demo import main as demo_main +from qai_hub_models.models.ffnet_78s_lowres.model import FFNet78SLowRes +from qai_hub_models.utils.testing import skip_clone_repo_check + + +@skip_clone_repo_check +def test_off_target_numerical(): + run_test_off_target_numerical( + FFNet78SLowRes, "segmentation_ffnet78S_BCC_mobile_pre_down" + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/ffnet_78s_quantized/README.md b/qai_hub_models/models/ffnet_78s_quantized/README.md new file mode 100644 index 00000000..74185134 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [FFNet-78S-Quantized: Semantic segmentation for automotive street scenes](https://aihub.qualcomm.com/models/ffnet_78s_quantized) + +FFNet-78S-Quantized is a "fuss-free network" that segments street scene images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the Cityscapes dataset. + +This is based on the implementation of FFNet-78S-Quantized found +[here](https://github.com/Qualcomm-AI-research/FFNet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/ffnet_78s_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.ffnet_78s_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.ffnet_78s_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of FFNet-78S-Quantized can be found + [here](https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE). + + +## References +* [Simple and Efficient Architectures for Semantic Segmentation](https://arxiv.org/abs/2206.08236) +* [Source Model Implementation](https://github.com/Qualcomm-AI-research/FFNet) diff --git a/qai_hub_models/models/ffnet_78s_quantized/__init__.py b/qai_hub_models/models/ffnet_78s_quantized/__init__.py new file mode 100644 index 00000000..ffb78be5 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_quantized/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.app import ( # noqa: F401 + CityscapesSegmentationApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import FFNet78SQuantizable as Model # noqa: F401 diff --git a/qai_hub_models/models/ffnet_78s_quantized/demo.py b/qai_hub_models/models/ffnet_78s_quantized/demo.py new file mode 100644 index 00000000..7641d65c --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_quantized/demo.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.cityscapes_segmentation.demo import ( + cityscapes_segmentation_demo, +) +from qai_hub_models.models.ffnet_78s_quantized.model import ( + MODEL_ID, + FFNet78SQuantizable, +) + + +def main(is_test: bool = False): + cityscapes_segmentation_demo(FFNet78SQuantizable, MODEL_ID, is_test=is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_78s_quantized/export.py b/qai_hub_models/models/ffnet_78s_quantized/export.py new file mode 100644 index 00000000..6bd05626 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_quantized/export.py @@ -0,0 +1,204 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.ffnet_78s_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "ffnet_78s_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "ffnet_78s_quantized", + "FFNet-78S-Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/ffnet_78s_quantized/info.yaml b/qai_hub_models/models/ffnet_78s_quantized/info.yaml new file mode 100644 index 00000000..cdb2f813 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_quantized/info.yaml @@ -0,0 +1,38 @@ +name: FFNet-78S-Quantized +# id must match with the model dir name in qai_hub_models +id: ffnet_78s_quantized +status: public +headline: Semantic segmentation for automotive street scenes. +domain: Computer Vision +description: FFNet-78S-Quantized is a "fuss-free network" that segments street scene + images with per-pixel classes like road, sidewalk, and pedestrian. Trained on the + Cityscapes dataset. +use_case: Semantic Segmentation +tags: + - quantized + - real-time +research_paper: https://arxiv.org/abs/2206.08236 +research_paper_title: Simple and Efficient Architectures for Semantic Segmentation +license: https://github.com/Qualcomm-AI-research/FFNet/blob/master/LICENSE +source_repo: https://github.com/Qualcomm-AI-research/FFNet +technical_details: + Model checkpoint: ffnet78S_dBBB_cityscapes_state_dict_quarts + Input resolution: 2048x1024 + Number of parameters: 27.5M + Model size: 26.7 MB +applicable_scenarios: + - Automotive + - Autonomous Driving + - Camera +related_models: + - ffnet_78s + - ffnet_40s_quantized + - ffnet_54s_quantized +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - cityscapes diff --git a/qai_hub_models/models/ffnet_78s_quantized/model.py b/qai_hub_models/models/ffnet_78s_quantized/model.py new file mode 100644 index 00000000..5437093d --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_quantized/model.py @@ -0,0 +1,30 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.ffnet_quantized.model import FFNetQuantizable +from qai_hub_models.models.ffnet_78s.model import FFNet78S +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_ENCODINGS = "encodings.json" + + +class FFNet78SQuantizable(FFNetQuantizable, FFNet78S): + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> FFNet78SQuantizable: + return FFNetQuantizable.from_pretrained.__func__( + cls, "segmentation_ffnet78S_dBBB_mobile", aimet_encodings=aimet_encodings + ) + + @classmethod + def default_aimet_encodings(cls) -> str: + return CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() diff --git a/qai_hub_models/models/ffnet_78s_quantized/perf.yaml b/qai_hub_models/models/ffnet_78s_quantized/perf.yaml new file mode 100644 index 00000000..17c2c4c9 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: FFNet-78S-Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 8362.0 + throughput: 119.58861516383641 + estimated_peak_memory_range: + min: 655360 + max: 2403480 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 154 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 154 + job_id: j1gly2oe5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:13:29.270963Z' diff --git a/qai_hub_models/models/ffnet_78s_quantized/test.py b/qai_hub_models/models/ffnet_78s_quantized/test.py new file mode 100644 index 00000000..1c3c8a51 --- /dev/null +++ b/qai_hub_models/models/ffnet_78s_quantized/test.py @@ -0,0 +1,21 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.ffnet.test_utils import run_test_off_target_numerical +from qai_hub_models.models.ffnet_78s_quantized.demo import main as demo_main +from qai_hub_models.models.ffnet_78s_quantized.model import FFNet78SQuantizable +from qai_hub_models.utils.testing import skip_clone_repo_check + + +@skip_clone_repo_check +def test_off_target_numerical(): + run_test_off_target_numerical( + FFNet78SQuantizable, + "segmentation_ffnet78S_dBBB_mobile", + relax_numerics=True, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/googlenet/README.md b/qai_hub_models/models/googlenet/README.md new file mode 100644 index 00000000..3ceaaa5a --- /dev/null +++ b/qai_hub_models/models/googlenet/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [GoogLeNet: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/googlenet) + +GoogLeNet is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of GoogLeNet found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/googlenet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/googlenet). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.googlenet.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.googlenet.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of GoogLeNet can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Going Deeper with Convolutions](https://arxiv.org/abs/1409.4842) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/googlenet.py) diff --git a/qai_hub_models/models/googlenet/__init__.py b/qai_hub_models/models/googlenet/__init__.py new file mode 100644 index 00000000..eb798baf --- /dev/null +++ b/qai_hub_models/models/googlenet/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import GoogLeNet as Model # noqa: F401 diff --git a/qai_hub_models/models/googlenet/demo.py b/qai_hub_models/models/googlenet/demo.py new file mode 100644 index 00000000..7be6a572 --- /dev/null +++ b/qai_hub_models/models/googlenet/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.googlenet.model import GoogLeNet + + +def main(is_test: bool = False): + imagenet_demo(GoogLeNet, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/googlenet/export.py b/qai_hub_models/models/googlenet/export.py new file mode 100644 index 00000000..42660252 --- /dev/null +++ b/qai_hub_models/models/googlenet/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.googlenet import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "googlenet" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "googlenet", + "GoogLeNet", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/googlenet/info.yaml b/qai_hub_models/models/googlenet/info.yaml new file mode 100644 index 00000000..047a032f --- /dev/null +++ b/qai_hub_models/models/googlenet/info.yaml @@ -0,0 +1,39 @@ +name: GoogLeNet +# id must match with the model dir name in qai_hub_models +id: googlenet +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: GoogLeNet is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +use_case: Image Classification +tags: [] +research_paper: https://arxiv.org/abs/1409.4842 +research_paper_title: Going Deeper with Convolutions +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/googlenet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 6.62M + Model size: 25.3 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - squeezenet1_1 +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/googlenet/model.py b/qai_hub_models/models/googlenet/model.py new file mode 100644 index 00000000..4f1dcd5d --- /dev/null +++ b/qai_hub_models/models/googlenet/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class GoogLeNet(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.googlenet(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/googlenet/perf.yaml b/qai_hub_models/models/googlenet/perf.yaml new file mode 100644 index 00000000..5a446a3a --- /dev/null +++ b/qai_hub_models/models/googlenet/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: GoogLeNet + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1471.0 + throughput: 679.8096532970768 + estimated_peak_memory_range: + min: 16384 + max: 1850752 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 94 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 94 + job_id: jw568z3vg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 1808.0 + throughput: 553.0973451327434 + estimated_peak_memory_range: + min: 24576 + max: 31167584 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 156 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 156 + job_id: j1p3z14x5 + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:07:34.463888Z' diff --git a/qai_hub_models/models/googlenet/test.py b/qai_hub_models/models/googlenet/test.py new file mode 100644 index 00000000..6f0b2a66 --- /dev/null +++ b/qai_hub_models/models/googlenet/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.googlenet.demo import main as demo_main +from qai_hub_models.models.googlenet.model import MODEL_ID, GoogLeNet + + +def test_task(): + run_imagenet_classifier_test(GoogLeNet.from_pretrained(), MODEL_ID) + + +def test_trace(): + run_imagenet_classifier_trace_test(GoogLeNet.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/googlenet_quantized/README.md b/qai_hub_models/models/googlenet_quantized/README.md new file mode 100644 index 00000000..fd3b845b --- /dev/null +++ b/qai_hub_models/models/googlenet_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [GoogLeNetQuantized: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/googlenet_quantized) + +GoogLeNet is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of GoogLeNetQuantized found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/googlenet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/googlenet_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.googlenet_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.googlenet_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of GoogLeNetQuantized can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Going Deeper with Convolutions](https://arxiv.org/abs/1409.4842) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/googlenet.py) diff --git a/qai_hub_models/models/googlenet_quantized/__init__.py b/qai_hub_models/models/googlenet_quantized/__init__.py new file mode 100644 index 00000000..4470c45f --- /dev/null +++ b/qai_hub_models/models/googlenet_quantized/__init__.py @@ -0,0 +1,11 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) +from qai_hub_models.models.googlenet_quantized.model import MODEL_ID # noqa: F401 +from qai_hub_models.models.googlenet_quantized.model import ( # noqa: F401 + GoogLeNetQuantizable as Model, +) diff --git a/qai_hub_models/models/googlenet_quantized/demo.py b/qai_hub_models/models/googlenet_quantized/demo.py new file mode 100644 index 00000000..5f363507 --- /dev/null +++ b/qai_hub_models/models/googlenet_quantized/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.googlenet_quantized.model import GoogLeNetQuantizable + + +def main(is_test: bool = False): + imagenet_demo(GoogLeNetQuantizable, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/googlenet_quantized/export.py b/qai_hub_models/models/googlenet_quantized/export.py new file mode 100644 index 00000000..cf2be9a3 --- /dev/null +++ b/qai_hub_models/models/googlenet_quantized/export.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.googlenet_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "googlenet_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "googlenet_quantized", + "GoogLeNetQuantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/googlenet_quantized/info.yaml b/qai_hub_models/models/googlenet_quantized/info.yaml new file mode 100644 index 00000000..c7f1e45c --- /dev/null +++ b/qai_hub_models/models/googlenet_quantized/info.yaml @@ -0,0 +1,40 @@ +name: GoogLeNetQuantized +# id must match with the model dir name in qai_hub_models +id: googlenet_quantized +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: GoogLeNet is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +use_case: Image Classification +tags: + - quantized +research_paper: https://arxiv.org/abs/1409.4842 +research_paper_title: Going Deeper with Convolutions +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/googlenet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 6.62M + Model size: 16.0 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - squeezenet1_1 +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/googlenet_quantized/model.py b/qai_hub_models/models/googlenet_quantized/model.py new file mode 100644 index 00000000..59323b6d --- /dev/null +++ b/qai_hub_models/models/googlenet_quantized/model.py @@ -0,0 +1,76 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +# isort: off +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( + AIMETQuantizableMixin, +) + +# isort: on + +import torch +from aimet_torch.cross_layer_equalization import equalize_model +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models.googlenet.model import GoogLeNet +from qai_hub_models.utils.aimet.config_loader import get_per_channel_aimet_config +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_ENCODINGS = "googlenet_quantized_encodings.json" + + +class GoogLeNetQuantizable(AIMETQuantizableMixin, GoogLeNet): + """GoogleNet with post train quantization support. + + Supports only 8 bit weights and activations, and only loads pre-quantized checkpoints. + Support for quantizing using your own weights & data will come at a later date.""" + + def __init__( + self, + sim_model: QuantizationSimModel, + ) -> None: + GoogLeNet.__init__(self, sim_model.model) + AIMETQuantizableMixin.__init__( + self, sim_model, needs_onnx_direct_aimet_export=True + ) + + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> "GoogLeNet": + """ + Parameters: + aimet_encodings: + if "DEFAULT": Loads the model with aimet encodings calibrated on imagenette. + elif None: Doesn't load any encodings. Used when computing encodings. + else: Interprets as a filepath and loads the encodings stored there. + """ + model = GoogLeNet.from_pretrained() + input_shape = model.get_input_spec()["image_tensor"][0] + + equalize_model(model, input_shape) + sim = QuantizationSimModel( + model.net, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=get_per_channel_aimet_config(), + dummy_input=torch.rand(input_shape), + ) + + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() + load_encodings_to_sim(sim, aimet_encodings) + + sim.model.eval() + return cls(sim) diff --git a/qai_hub_models/models/googlenet_quantized/perf.yaml b/qai_hub_models/models/googlenet_quantized/perf.yaml new file mode 100644 index 00000000..78455c1d --- /dev/null +++ b/qai_hub_models/models/googlenet_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: GoogLeNetQuantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1026.0 + throughput: 974.6588693957115 + estimated_peak_memory_range: + min: 20480 + max: 1771688 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 183 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 183 + job_id: j2p0m2d2g + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:34:34.707459Z' diff --git a/qai_hub_models/models/googlenet_quantized/test.py b/qai_hub_models/models/googlenet_quantized/test.py new file mode 100644 index 00000000..65afe84d --- /dev/null +++ b/qai_hub_models/models/googlenet_quantized/test.py @@ -0,0 +1,40 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.googlenet_quantized.demo import main as demo_main +from qai_hub_models.models.googlenet_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + GoogLeNetQuantizable, +) + + +def test_task(): + run_imagenet_classifier_test( + GoogLeNetQuantizable.from_pretrained(), + MODEL_ID, + asset_version=MODEL_ASSET_VERSION, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test( + GoogLeNetQuantizable.from_pretrained(), + diff_tol=0.01, + rtol=0.02, + atol=0.2, + is_quantized=True, + ) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/hrnet_pose/README.md b/qai_hub_models/models/hrnet_pose/README.md new file mode 100644 index 00000000..bae7d57c --- /dev/null +++ b/qai_hub_models/models/hrnet_pose/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [HRNetPose: Perform accurate human pose estimation](https://aihub.qualcomm.com/models/hrnet_pose) + +HRNet performs pose estimation in high-resolution representations. + +This is based on the implementation of HRNetPose found +[here](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/hrnet_posenet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/hrnet_pose). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[hrnet_pose]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.hrnet_pose.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.hrnet_pose.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of HRNetPose can be found + [here](https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf). + + +## References +* [Deep High-Resolution Representation Learning for Human Pose Estimation](https://arxiv.org/abs/1902.09212) +* [Source Model Implementation](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/hrnet_posenet) diff --git a/qai_hub_models/models/hrnet_pose/__init__.py b/qai_hub_models/models/hrnet_pose/__init__.py new file mode 100644 index 00000000..7e7281fd --- /dev/null +++ b/qai_hub_models/models/hrnet_pose/__init__.py @@ -0,0 +1,8 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.hrnet_pose.app import HRNetPoseApp # noqa: F401 + +from .model import MODEL_ID # noqa: F401 +from .model import HRNetPose as Model # noqa: F401 diff --git a/qai_hub_models/models/hrnet_pose/app.py b/qai_hub_models/models/hrnet_pose/app.py new file mode 100644 index 00000000..d4a948f5 --- /dev/null +++ b/qai_hub_models/models/hrnet_pose/app.py @@ -0,0 +1,153 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, List, Tuple + +import numpy as np +import torch +from mmpose.apis import MMPoseInferencer +from mmpose.codecs.utils import refine_keypoints +from PIL.Image import Image, fromarray + +from qai_hub_models.utils.draw import draw_points +from qai_hub_models.utils.image_processing import app_to_net_image_inputs + +# More inferencer architectures for litehrnet can be found at +# https://github.com/open-mmlab/mmpose/tree/main/configs/body_2d_keypoint/topdown_heatmap/coco +DEFAULT_INFERENCER_ARCH = "td-hm_hrnet-w32_8xb64-210e_coco-256x192" + + +def get_max_preds(batch_heatmaps): + """ + get predictions from score maps + heatmaps: numpy.ndarray([batch_size, num_joints, height, width]) + """ + assert isinstance( + batch_heatmaps, np.ndarray + ), "batch_heatmaps should be numpy.ndarray" + assert batch_heatmaps.ndim == 4, "batch_images should be 4-ndim" + + batch_size = batch_heatmaps.shape[0] + num_joints = batch_heatmaps.shape[1] + width = batch_heatmaps.shape[3] + heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1)) + idx = np.argmax(heatmaps_reshaped, 2) + maxvals = np.amax(heatmaps_reshaped, 2) + + maxvals = maxvals.reshape((batch_size, num_joints, 1)) + idx = idx.reshape((batch_size, num_joints, 1)) + + preds = np.tile(idx, (1, 1, 2)).astype(np.float32) + + preds[:, :, 0] = (preds[:, :, 0]) % width + preds[:, :, 1] = np.floor((preds[:, :, 1]) / width) + + pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2)) + pred_mask = pred_mask.astype(np.float32) + + preds *= pred_mask + return preds, maxvals + + +class HRNetPoseApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with LiteHRNet. + + The app uses 1 model: + * LiteHRNet + + For a given image input, the app will: + * pre-process the image + * Run LiteHRNet inference + * Convert the output into a list of keypoint coordiates + """ + + def __init__( + self, + model: Callable[ + [torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor] + ], + ): + self.model = model + # Use mmpose inferencer for example preprocessing + self.inferencer = MMPoseInferencer( + DEFAULT_INFERENCER_ARCH, device=torch.device(type="cpu") + ) + self.pre_processor = self.inferencer.inferencer.model.data_preprocessor + + def predict(self, *args, **kwargs): + # See predict_pose_keypoints. + return self.predict_pose_keypoints(*args, **kwargs) + + def predict_pose_keypoints( + self, + pixel_values_or_image: torch.Tensor | np.ndarray | Image | List[Image], + raw_output=False, + ) -> np.ndarray | List[Image]: + """ + Predicts pose keypoints for a person in the image. + + Parameters: + pixel_values_or_image + PIL image(s) + or + numpy array (N H W C x uint8) or (H W C x uint8) -- both RGB channel layout + or + pyTorch tensor (N C H W x fp32, value range is [0, 1]), RGB channel layout + + raw_output: bool + See "returns" doc section for details. + + Returns: + If raw_output is true, returns: + keypoints: np.ndarray, shape [B, N, 2] + Numpy array of keypoints within the images Each keypoint is an (x, y) pair of coordinates within the image. + + Otherwise, returns: + predicted_images: List[PIL.Image] + Images with keypoints drawn. + """ + # Preprocess image to get data required for post processing + NHWC_int_numpy_frames, _ = app_to_net_image_inputs(pixel_values_or_image) + inputs = self.inferencer.preprocess(NHWC_int_numpy_frames, batch_size=1) + proc_inputs, _ = list(inputs)[0] + proc_inputs_ = proc_inputs["inputs"][0] + + x = proc_inputs_[[2, 1, 0], ...] + x = (x - self.pre_processor.mean) / self.pre_processor.std + x = torch.unsqueeze(x, 0) + + # run inference + heatmaps = self.model(x) + heatmaps = heatmaps.detach().numpy() + + # create predictions from heatmap + pred_kps, scores = get_max_preds(heatmaps) + + # get the bounding box center from the preprocessing + # In older versions of the MM modules the center is directly a member + # of gt_instances and does not need to be computed. + bbox = proc_inputs["data_samples"][0].gt_instances.bboxes[0] + center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2] + + scale = proc_inputs["data_samples"][0].gt_instances.bbox_scales[0] + + # perform refinement + keypoints = refine_keypoints(pred_kps, np.squeeze(heatmaps)) + scale_factor = np.array([4.0, 4.0]) + keypoints = keypoints * scale_factor + input_size = proc_inputs["data_samples"][0].metainfo["input_size"] + keypoints = keypoints / input_size * scale + center - 0.5 * scale + keypoints = np.round(keypoints).astype(np.int32) + + if raw_output: + return keypoints + + predicted_images = [] + for i, img in enumerate(NHWC_int_numpy_frames): + draw_points(img, keypoints[i], color=(255, 0, 0), size=2) + predicted_images.append(fromarray(img)) + return predicted_images diff --git a/qai_hub_models/models/hrnet_pose/demo.py b/qai_hub_models/models/hrnet_pose/demo.py new file mode 100644 index 00000000..c5844da9 --- /dev/null +++ b/qai_hub_models/models/hrnet_pose/demo.py @@ -0,0 +1,53 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.hrnet_pose.app import HRNetPoseApp +from qai_hub_models.models.hrnet_pose.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + HRNetPose, +) +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.display import display_or_save_image + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "hrnet_pose_demo.png" +) + + +# The demo will display a image with the predicted keypoints. +def main(is_test: bool = False): + # Demo parameters + parser = get_model_cli_parser(HRNetPose) + parser = get_on_device_demo_parser(parser, add_output_dir=True) + parser.add_argument( + "--image", + type=str, + default=IMAGE_ADDRESS, + help="image file path or URL", + ) + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, HRNetPose.get_model_id()) + + # Load image & model + model = demo_model_from_cli_args(HRNetPose, args) + image = load_image(args.image) + print("Model Loaded") + + app = HRNetPoseApp(model) + keypoints = app.predict_pose_keypoints(image)[0] + if not is_test: + display_or_save_image( + keypoints, args.output_dir, "hrnetpose_demo_output.png", "keypoints" + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/hrnet_pose/export.py b/qai_hub_models/models/hrnet_pose/export.py new file mode 100644 index 00000000..ffb8c70f --- /dev/null +++ b/qai_hub_models/models/hrnet_pose/export.py @@ -0,0 +1,197 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.hrnet_pose import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "hrnet_pose" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "hrnet_pose", + "HRNetPose", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image_tensor" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/hrnet_pose/info.yaml b/qai_hub_models/models/hrnet_pose/info.yaml new file mode 100644 index 00000000..e70da7c1 --- /dev/null +++ b/qai_hub_models/models/hrnet_pose/info.yaml @@ -0,0 +1,33 @@ +name: HRNetPose +# id must match with the model dir name in qai_hub_models +id: hrnet_pose +status: public +headline: Perform accurate human pose estimation. +domain: Computer Vision +use_case: Pose Estimation +description: HRNet performs pose estimation in high-resolution representations. +tags: [] +research_paper: https://arxiv.org/abs/1902.09212 +research_paper_title: Deep High-Resolution Representation Learning for Human Pose + Estimation +license: https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf +source_repo: + https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/hrnet_posenet +technical_details: + Model checkpoint: hrnet_posenet_FP32_state_dict + Input resolution: 192x256 + Number of parameters: 28.5M + Model size: 109 MB +applicable_scenarios: + - Injury prevention training + - Sports performance analysis + - Posture recognition +form_factors: + - Phone + - Tablet + - IoT +related_models: [litehrnet, openpose] +has_static_banner: yes +has_animated_banner: no +license_type: other +dataset: [] diff --git a/qai_hub_models/models/hrnet_pose/model.py b/qai_hub_models/models/hrnet_pose/model.py new file mode 100644 index 00000000..065ecc06 --- /dev/null +++ b/qai_hub_models/models/hrnet_pose/model.py @@ -0,0 +1,63 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import sys + +import torch +import torch.nn as nn + +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, SourceAsRoot +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +# This model originally comes from https://github.com/leoxiaobin/deep-high-resolution-net.pytorch +# but we'll use the weights from AIMET +# Weights and config stored in S3 are sourced from +# https://github.com/quic/aimet-model-zoo/blob/develop/aimet_zoo_torch/hrnet_posenet/models/model_cards/hrnet_posenet_w8a8.json +# Weights are found here +# https://github.com/quic/aimet-model-zoo/releases/download/phase_2_march_artifacts/hrnet_posenet_FP32_state_dict.pth +DEFAULT_WEIGHTS = "hrnet_posenet_FP32_state_dict.pth" +SOURCE_REPOSITORY = "https://github.com/leoxiaobin/deep-high-resolution-net.pytorch" +COMMIT_HASH = "6f69e4676ad8d43d0d61b64b1b9726f0c369e7b1" +CONFIG_FILE = "experiments/coco/hrnet/w32_256x192_adam_lr1e-3.yaml" + + +class HRNetPose(BaseModel): + def __init__(self, model: nn.Module) -> None: + super().__init__() + self.model = model + + @classmethod + def from_pretrained(cls) -> HRNetPose: + + weights_file = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_WEIGHTS + ).fetch() + weights = torch.load(weights_file, map_location="cpu") + with SourceAsRoot( + SOURCE_REPOSITORY, COMMIT_HASH, MODEL_ID, MODEL_ASSET_VERSION + ): + sys.path.append("./lib") + from lib.config import cfg + from models.pose_hrnet import PoseHighResolutionNet + + cfg.merge_from_file(CONFIG_FILE) + cfg.freeze() + net = PoseHighResolutionNet(cfg) + net.load_state_dict(weights) + return cls(net).eval() + + def forward(self, image: torch.Tensor): + return self.model(image) + + @staticmethod + def get_input_spec( + height: int = 256, + width: int = 192, + ) -> InputSpec: + return {"image": ((1, 3, height, width), "float32")} diff --git a/qai_hub_models/models/hrnet_pose/perf.yaml b/qai_hub_models/models/hrnet_pose/perf.yaml new file mode 100644 index 00000000..bcbe5478 --- /dev/null +++ b/qai_hub_models/models/hrnet_pose/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: HRNetPose + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 2574.0 + throughput: 388.5003885003885 + estimated_peak_memory_range: + min: 16384 + max: 2027656 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 515 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 515 + job_id: jwgoln14g + job_status: Passed + torchscript_onnx_qnn: + inference_time: 2611.0 + throughput: 382.99502106472613 + estimated_peak_memory_range: + min: 12288 + max: 48352008 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 747 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 747 + job_id: j1pvlr175 + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:31:51.091359Z' diff --git a/qai_hub_models/models/hrnet_pose/requirements.txt b/qai_hub_models/models/hrnet_pose/requirements.txt new file mode 100644 index 00000000..69edf6ae --- /dev/null +++ b/qai_hub_models/models/hrnet_pose/requirements.txt @@ -0,0 +1,4 @@ +yacs==0.1.8 +mmpose<=1.2.0 +mmcv==2.1.0 +mmdet<=3.2.0 diff --git a/qai_hub_models/models/hrnet_pose/test.py b/qai_hub_models/models/hrnet_pose/test.py new file mode 100644 index 00000000..358b0f0c --- /dev/null +++ b/qai_hub_models/models/hrnet_pose/test.py @@ -0,0 +1,43 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models.hrnet_pose.app import HRNetPoseApp +from qai_hub_models.models.hrnet_pose.demo import IMAGE_ADDRESS +from qai_hub_models.models.hrnet_pose.demo import main as demo_main +from qai_hub_models.models.hrnet_pose.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + HRNetPose, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_close, skip_clone_repo_check + +OUTPUT_IMAGE_LOCAL_PATH = "hrnetpose_output.png" +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, OUTPUT_IMAGE_LOCAL_PATH +) + + +@skip_clone_repo_check +def test_task(): + image = load_image(IMAGE_ADDRESS) + model = HRNetPose.from_pretrained() + app = HRNetPoseApp(model=model) + output = app.predict(image)[0] + + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + assert_most_close( + np.asarray(output, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + 0.005, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/hrnet_pose_quantized/README.md b/qai_hub_models/models/hrnet_pose_quantized/README.md new file mode 100644 index 00000000..b6f64114 --- /dev/null +++ b/qai_hub_models/models/hrnet_pose_quantized/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [HRNetPoseQuantized: Perform accurate human pose estimation](https://aihub.qualcomm.com/models/hrnet_pose_quantized) + +HRNet performs pose estimation in high-resolution representations. + +This is based on the implementation of HRNetPoseQuantized found +[here](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/hrnet_posenet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/hrnet_pose_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[hrnet_pose_quantized]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.hrnet_pose_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.hrnet_pose_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of HRNetPoseQuantized can be found + [here](https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf). + + +## References +* [Deep High-Resolution Representation Learning for Human Pose Estimation](https://arxiv.org/abs/1902.09212) +* [Source Model Implementation](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/hrnet_posenet) diff --git a/qai_hub_models/models/hrnet_pose_quantized/__init__.py b/qai_hub_models/models/hrnet_pose_quantized/__init__.py new file mode 100644 index 00000000..26dbe409 --- /dev/null +++ b/qai_hub_models/models/hrnet_pose_quantized/__init__.py @@ -0,0 +1,8 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.hrnet_pose.app import HRNetPoseApp # noqa: F401 + +from .model import MODEL_ID # noqa: F401 +from .model import HRNetPoseQuantizable as Model # noqa: F401 diff --git a/qai_hub_models/models/hrnet_pose_quantized/demo.py b/qai_hub_models/models/hrnet_pose_quantized/demo.py new file mode 100644 index 00000000..e17f276e --- /dev/null +++ b/qai_hub_models/models/hrnet_pose_quantized/demo.py @@ -0,0 +1,57 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.hrnet_pose.app import HRNetPoseApp +from qai_hub_models.models.hrnet_pose_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + HRNetPoseQuantizable, +) +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.display import display_or_save_image + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "hrnet_pose_demo.png" +) + + +# The demo will display a image with the predicted keypoints. +def main(is_test: bool = False): + # Demo parameters + parser = get_model_cli_parser(HRNetPoseQuantizable) + parser = get_on_device_demo_parser(parser, add_output_dir=True) + parser.add_argument( + "--image", + type=str, + default=IMAGE_ADDRESS, + help="image file path or URL", + ) + + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, HRNetPoseQuantizable.get_model_id()) + + # Load image & model + model = demo_model_from_cli_args(HRNetPoseQuantizable, args) + image = load_image(args.image) + print("Model Loaded") + + app = HRNetPoseApp(model) + keypoints = app.predict_pose_keypoints(image)[0] + if not is_test: + display_or_save_image( + keypoints, + args.output_dir, + "hrnetpose_quantized_demo_output.png", + "keypoints", + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/hrnet_pose_quantized/export.py b/qai_hub_models/models/hrnet_pose_quantized/export.py new file mode 100644 index 00000000..cce76951 --- /dev/null +++ b/qai_hub_models/models/hrnet_pose_quantized/export.py @@ -0,0 +1,207 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.hrnet_pose_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "hrnet_pose_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "hrnet_pose_quantized", + "HRNetPoseQuantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image_tensor" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/hrnet_pose_quantized/info.yaml b/qai_hub_models/models/hrnet_pose_quantized/info.yaml new file mode 100644 index 00000000..0870433d --- /dev/null +++ b/qai_hub_models/models/hrnet_pose_quantized/info.yaml @@ -0,0 +1,34 @@ +name: HRNetPoseQuantized +# id must match with the model dir name in qai_hub_models +id: hrnet_pose_quantized +status: public +headline: Perform accurate human pose estimation. +domain: Computer Vision +use_case: Pose Estimation +description: HRNet performs pose estimation in high-resolution representations. +tags: + - quantized +research_paper: https://arxiv.org/abs/1902.09212 +research_paper_title: Deep High-Resolution Representation Learning for Human Pose + Estimation +license: https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf +source_repo: + https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/hrnet_posenet +technical_details: + Model checkpoint: hrnet_posenet_FP32_state_dict + Input resolution: 192x256 + Number of parameters: 28.5M + Model size: 109 MB +applicable_scenarios: + - Injury prevention training + - Sports performance analysis + - Posture recognition +form_factors: + - Phone + - Tablet + - IoT +related_models: [litehrnet, hrnet_pose] +has_static_banner: yes +has_animated_banner: no +license_type: other +dataset: [] diff --git a/qai_hub_models/models/hrnet_pose_quantized/model.py b/qai_hub_models/models/hrnet_pose_quantized/model.py new file mode 100644 index 00000000..957ecff1 --- /dev/null +++ b/qai_hub_models/models/hrnet_pose_quantized/model.py @@ -0,0 +1,76 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +from aimet_torch.cross_layer_equalization import equalize_model +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models.hrnet_pose.model import HRNetPose +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( # isort: skip + AIMETQuantizableMixin, +) + + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +# Weights and config stored in S3 are sourced from +# https://github.com/quic/aimet-model-zoo/blob/develop/aimet_zoo_torch/hrnet_posenet/models/model_cards/hrnet_posenet_w8a8.json: +# https://github.com/quic/aimet-model-zoo/releases/download/phase_2_march_artifacts/hrnet_posenet_W8A8_state_dict.pth +# Encodings were generated with AIMET QuantSim export +QUANTIZED_WEIGHTS = "hrnet_posenet_W8A8_state_dict.pth" +AIMET_ENCODINGS = "hrnetpose_aimet_quantization_encodings.json" +AIMET_CONFIG = "default_config_per_channel.json" + + +class HRNetPoseQuantizable(AIMETQuantizableMixin, HRNetPose): + """HRNetPose with post training quantization suport + + Supports only 8 bit weights and activations, and only loads pre-quantized checkpoints. + Support for quantizing using your own weights & data will come at a later date.""" + + def __init__( + self, + hrnet_model: QuantizationSimModel, + ) -> None: + HRNetPose.__init__(self, hrnet_model.model) + AIMETQuantizableMixin.__init__( + self, hrnet_model, needs_onnx_direct_aimet_export=True + ) + + @classmethod + def from_pretrained(cls) -> HRNetPoseQuantizable: + model = HRNetPose.from_pretrained() + input_shape = HRNetPose.get_input_spec()["image"][0] + equalize_model(model, input_shape) + + weights = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, QUANTIZED_WEIGHTS + ).fetch() + aimet_config = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, AIMET_CONFIG + ).fetch() + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, AIMET_ENCODINGS + ).fetch() + + # Load the model weights and quantization parameters + state_dict = torch.load(weights, map_location=torch.device("cpu")) + new_state_dict = {"model." + key: value for key, value in state_dict.items()} + model.load_state_dict(new_state_dict) + sim = QuantizationSimModel( + model, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=aimet_config, + dummy_input=torch.rand(input_shape), + ) + load_encodings_to_sim(sim, aimet_encodings) + + return cls(sim) diff --git a/qai_hub_models/models/hrnet_pose_quantized/perf.yaml b/qai_hub_models/models/hrnet_pose_quantized/perf.yaml new file mode 100644 index 00000000..8e720351 --- /dev/null +++ b/qai_hub_models/models/hrnet_pose_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: HRNetPoseQuantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 2508.0 + throughput: 398.72408293460927 + estimated_peak_memory_range: + min: 16384 + max: 3642928 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 515 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 515 + job_id: jz57eljqp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:23:32.990808Z' diff --git a/qai_hub_models/models/hrnet_pose_quantized/requirements.txt b/qai_hub_models/models/hrnet_pose_quantized/requirements.txt new file mode 100644 index 00000000..69edf6ae --- /dev/null +++ b/qai_hub_models/models/hrnet_pose_quantized/requirements.txt @@ -0,0 +1,4 @@ +yacs==0.1.8 +mmpose<=1.2.0 +mmcv==2.1.0 +mmdet<=3.2.0 diff --git a/qai_hub_models/models/hrnet_pose_quantized/test.py b/qai_hub_models/models/hrnet_pose_quantized/test.py new file mode 100644 index 00000000..343af7bc --- /dev/null +++ b/qai_hub_models/models/hrnet_pose_quantized/test.py @@ -0,0 +1,46 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import torch + +from qai_hub_models.models.hrnet_pose.app import HRNetPoseApp +from qai_hub_models.models.hrnet_pose.demo import IMAGE_ADDRESS +from qai_hub_models.models.hrnet_pose.demo import main as demo_main +from qai_hub_models.models.hrnet_pose_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + HRNetPoseQuantizable, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_close, skip_clone_repo_check + +OUTPUT_IMAGE_LOCAL_PATH = "hrnetpose_quantized_output.png" +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, OUTPUT_IMAGE_LOCAL_PATH +) + + +@skip_clone_repo_check +def test_task(): + # AIMET Quantization Simulator introduces randomness. Eliminate that for this test. + torch.manual_seed(0) + image = load_image(IMAGE_ADDRESS) + model = HRNetPoseQuantizable.from_pretrained() + app = HRNetPoseApp(model=model) + output = app.predict(image)[0] + + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + assert_most_close( + np.asarray(output, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + 0.005, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/huggingface_wavlm_base_plus/README.md b/qai_hub_models/models/huggingface_wavlm_base_plus/README.md new file mode 100644 index 00000000..170ac687 --- /dev/null +++ b/qai_hub_models/models/huggingface_wavlm_base_plus/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [HuggingFace-WavLM-Base-Plus: Real-time Speech processing](https://aihub.qualcomm.com/models/huggingface_wavlm_base_plus) + +HuggingFaceWavLMBasePlus is a real time speech processing backbone based on Microsoft's WavLM model. + +This is based on the implementation of HuggingFace-WavLM-Base-Plus found +[here](https://huggingface.co/patrickvonplaten/wavlm-libri-clean-100h-base-plus/tree/main). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/huggingface_wavlm_base_plus). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[huggingface_wavlm_base_plus]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.huggingface_wavlm_base_plus.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.huggingface_wavlm_base_plus.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of HuggingFace-WavLM-Base-Plus can be found + [here](https://github.com/microsoft/unilm/blob/master/LICENSE). + + +## References +* [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) +* [Source Model Implementation](https://huggingface.co/patrickvonplaten/wavlm-libri-clean-100h-base-plus/tree/main) diff --git a/qai_hub_models/models/huggingface_wavlm_base_plus/__init__.py b/qai_hub_models/models/huggingface_wavlm_base_plus/__init__.py new file mode 100644 index 00000000..62b91be1 --- /dev/null +++ b/qai_hub_models/models/huggingface_wavlm_base_plus/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import HuggingFaceWavLMBasePlusApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import HuggingFaceWavLMBasePlus as Model # noqa: F401 diff --git a/qai_hub_models/models/huggingface_wavlm_base_plus/app.py b/qai_hub_models/models/huggingface_wavlm_base_plus/app.py new file mode 100644 index 00000000..039d570b --- /dev/null +++ b/qai_hub_models/models/huggingface_wavlm_base_plus/app.py @@ -0,0 +1,64 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import numpy as np +import torch + +from qai_hub_models.models.huggingface_wavlm_base_plus.model import ( + DEFAULT_INPUT_LENGTH_SECONDS, +) + + +class HuggingFaceWavLMBasePlusApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with HuggingFaceWavLMBasePlus. + + The app uses 1 model: + * HuggingFaceWavLMBasePlus + + For a given audio input, the app will: + * Run HuggingFaceWavLMBasePlus inference on the input and return the output feature vectors + """ + + def __init__(self, wavlm_model): + self.model = wavlm_model + + def predict(self, *args, **kwargs): + # See predict_features. + return self.predict_features(*args, **kwargs) + + def predict_features( + self, input: np.ndarray, sampling_rate=16000.0 + ) -> torch.Tensor: + """ + Predict a feature vector from an audio sample + + Parameters: + input: a 1xn array representing an audio sample, where n is length. + This will be clipped to the appropriate length if too long, + and padded if too short + sampling_rate: the sampling rate of the audio - default 16kHz + + Returns: + feature_vec: a tuple of tensors + 1x999x768 + 1x999x512 + features detected in the audio stream + """ + + # preprocess audio + input_len = int(DEFAULT_INPUT_LENGTH_SECONDS * sampling_rate) + x = input[:input_len] + x = torch.from_numpy(x).float() + x = torch.nn.functional.pad( + x, (0, input_len - x.shape[0]), mode="constant", value=0 + ) + audio_tensor = x.unsqueeze(0) + + # Run prediction + features = self.model(audio_tensor) + + return features diff --git a/qai_hub_models/models/huggingface_wavlm_base_plus/demo.py b/qai_hub_models/models/huggingface_wavlm_base_plus/demo.py new file mode 100644 index 00000000..8ed1ae2b --- /dev/null +++ b/qai_hub_models/models/huggingface_wavlm_base_plus/demo.py @@ -0,0 +1,45 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from datasets import load_dataset + +from qai_hub_models.models.huggingface_wavlm_base_plus.app import ( + HuggingFaceWavLMBasePlusApp, +) +from qai_hub_models.models.huggingface_wavlm_base_plus.model import ( + HuggingFaceWavLMBasePlus, +) +from qai_hub_models.utils.args import get_model_cli_parser, model_from_cli_args + +HUGGINGFACE_WAVLM_DATASET = "hf-internal-testing/librispeech_asr_demo" + + +# Run HuggingFace WavLM on a sample audio input, and produce +# a feature vector from the audio. The feature vector will be printed to terminal +def demo_main(is_test: bool = False): + # Demo parameters + parser = get_model_cli_parser(HuggingFaceWavLMBasePlus) + args = parser.parse_args([] if is_test else None) + + # load model + model = model_from_cli_args(HuggingFaceWavLMBasePlus, args) + + # Load Application + app = HuggingFaceWavLMBasePlusApp(model) + + # Load audio + dataset = load_dataset(HUGGINGFACE_WAVLM_DATASET, "clean", split="validation") + audio = [x["array"] for x in dataset[:2]["audio"]][0] + + feature_vec = app.predict_features(input=audio) + + # Get output from model + if not is_test: + print("Feature vec from audio:\n") + print(feature_vec) + print("\n") + + +if __name__ == "__main__": + demo_main() diff --git a/qai_hub_models/models/huggingface_wavlm_base_plus/export.py b/qai_hub_models/models/huggingface_wavlm_base_plus/export.py new file mode 100644 index 00000000..a21bb9c9 --- /dev/null +++ b/qai_hub_models/models/huggingface_wavlm_base_plus/export.py @@ -0,0 +1,181 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.huggingface_wavlm_base_plus import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "huggingface_wavlm_base_plus" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "huggingface_wavlm_base_plus", + "HuggingFace-WavLM-Base-Plus", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=sample_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/huggingface_wavlm_base_plus/info.yaml b/qai_hub_models/models/huggingface_wavlm_base_plus/info.yaml new file mode 100644 index 00000000..37a46dd9 --- /dev/null +++ b/qai_hub_models/models/huggingface_wavlm_base_plus/info.yaml @@ -0,0 +1,35 @@ +name: HuggingFace-WavLM-Base-Plus +# id must match with the model dir name in qai_hub_models +id: huggingface_wavlm_base_plus +status: public +headline: Real-time Speech processing. +domain: Audio +description: HuggingFaceWavLMBasePlus is a real time speech processing backbone based + on Microsoft's WavLM model. +use_case: Speech Recognition +tags: + - backbone +research_paper: https://arxiv.org/abs/2110.13900 +research_paper_title: 'WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack + Speech Processing' +license: https://github.com/microsoft/unilm/blob/master/LICENSE +source_repo: + https://huggingface.co/patrickvonplaten/wavlm-libri-clean-100h-base-plus/tree/main +technical_details: + Model checkpoint: wavlm-libri-clean-100h-base-plus + Input resolution: 1x320000 + Number of parameters: 95.1M + Model size: 363 MB +applicable_scenarios: + - Smart Home + - Accessibility +form_factors: + - Phone + - Tablet + - IoT +related_models: + - whisper_asr +has_static_banner: yes +has_animated_banner: yes +license_type: mit +dataset: [] diff --git a/qai_hub_models/models/huggingface_wavlm_base_plus/model.py b/qai_hub_models/models/huggingface_wavlm_base_plus/model.py new file mode 100644 index 00000000..6bdbbe6f --- /dev/null +++ b/qai_hub_models/models/huggingface_wavlm_base_plus/model.py @@ -0,0 +1,190 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import math +from typing import Tuple + +import torch +from transformers import WavLMModel +from transformers.models.wavlm.modeling_wavlm import WavLMGroupNormConvLayer + +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +OPENPOSE_SOURCE_REPOSITORY = ( + "https://huggingface.co/patrickvonplaten/wavlm-libri-clean-100h-base-plus/tree/main" +) +OPENPOSE_SOURCE_REPO_COMMIT = "02c289c4471cd1ba4b0ff3e7c304afe395c5026a" +DEFAULT_WEIGHTS = "patrickvonplaten/wavlm-libri-clean-100h-base-plus" +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 + +DEFAULT_INPUT_VEC_LENGTH = 320000 +DEFAULT_INPUT_LENGTH_SECONDS = 20 + + +class HuggingFaceWavLMBasePlus(BaseModel): + """Exportable Voice Recognition model""" + + def __init__( + self, wavlm_model: torch.nn.Module, apply_npu_opt: bool = False + ) -> None: + super().__init__() + + if apply_npu_opt: + wavlm_model = convert_to_wavlm_npu(wavlm_model) + + self.model = wavlm_model + + @classmethod + def from_pretrained( + cls, weights_path: str | None = None, apply_npu_opt: bool = False + ) -> HuggingFaceWavLMBasePlus: + """Load WavLM from a weightfile created by the source HUggingFaceWavLM repository.""" + if weights_path is None: + weights_path = "patrickvonplaten/wavlm-libri-clean-100h-base-plus" + + model = WavLMModel.from_pretrained(weights_path, torchscript=True) + + return cls(model, apply_npu_opt) + + def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Run WAvLM on `input`, and produce feature vector + + Parameters: + input: 1x320000 tensor + 20 seconds of audio sampled at 16kHz + + Returns: + feature_vec: a tuple of tensors + 1x999x768 + 1x999x512 + features detected in the audio stream + """ + return self.model(input) + + def get_input_spec( + self, + batch_size: int = 1, + sample_length: int = 80000, + ) -> InputSpec: + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"input": ((batch_size, sample_length), "float32")} + + +# Modules used to override Huggingface WavLM to be NPU friendly +class SliceConv1d(torch.nn.Module): + def __init__(self, orig_module: torch.nn.Conv1d, slice_size: int = 16000): + """Slice inputs to conv1d to limit the input size to any conv""" + super().__init__() + assert isinstance(orig_module, torch.nn.Conv1d) + self.orig_module = orig_module + self.slice_size = slice_size + + _, _, kernel_size_1d = orig_module.weight.shape + self.half_kernel_size = kernel_size_1d // 2 + self.stride = orig_module.stride[0] + + def forward(self, x: torch.Tensor): + num_slices = int(math.ceil(x.shape[-1] / self.slice_size)) + + xs = [] + for i in range(num_slices): + # align begin to stride boundary + begin = i * self.slice_size + begin = int(math.ceil(begin / self.stride)) * self.stride + end = min(begin + self.slice_size + self.half_kernel_size, x.shape[-1]) + conv_out = self.orig_module(x[:, :, begin:end]) + xs.append(conv_out) + return torch.concat(xs, dim=-1) + + +class WavLMGroupNormConvLayerNPU(torch.nn.Module): + def __init__(self, orig_module: WavLMGroupNormConvLayer): + """ + Apple NPU prefer spatial dim not much higher than 16000. We + wrap WavLMGroupNormConvLayer to adhere to that as much as + possible + """ + super().__init__() + assert isinstance(orig_module, WavLMGroupNormConvLayer) + self.orig_module = orig_module + # stack conv1d to conv2d to reduce input dim + conv1d = orig_module.conv + out_channels, in_channels, kernel_size_1d = conv1d.weight.shape + stride_1d = conv1d.stride[0] + self.stride_1d = stride_1d + assert kernel_size_1d % stride_1d == 0 + assert conv1d.padding == (0,) + kernel_size_2d = (stride_1d, kernel_size_1d // stride_1d) + self.conv2d = torch.nn.Conv2d( + in_channels, out_channels, kernel_size_2d, bias=conv1d.bias is not None + ) + self.conv2d.weight.data = ( + conv1d.weight.data.clone() + .view(out_channels, in_channels, kernel_size_1d // stride_1d, stride_1d) + .permute(0, 1, 3, 2) + ) + if conv1d.bias is not None: + assert self.conv2d.bias is not None # for mypy + self.conv2d.bias.data = conv1d.bias.data + self.half_kernel_size = kernel_size_2d[1] // 2 + + def forward(self, x): + # x: [1, 1, seq_len] (e.g. seq_len = 160000 for 10s audio) + seq_len = x.shape[-1] + assert seq_len % self.stride_1d == 0 + x = x.view(1, 1, seq_len // self.stride_1d, self.stride_1d).permute(0, 1, 3, 2) + # x has shape [1, 1, 5, 32000] + # divide it into segments of roughly 16000 + slice_size = 16000 + num_slices = x.shape[-1] // slice_size + xs = [] + for i in range(num_slices): + begin = i * slice_size + end = min(begin + slice_size + self.half_kernel_size, x.shape[-1]) + conv_out = self.conv2d(x[:, :, :, begin:end]) + if i == num_slices - 1: + # last slice can have 1 fewer element than previous + # slides. In order to stack it, we pad 1 + # (good apprxoimatino) + num_pad = slice_size - conv_out.shape[-1] + if num_pad > 1: + raise ValueError("Should only have 1 elem missing") + elif num_pad == 1: + conv_out = torch.nn.functional.pad(conv_out, (0, 1)) + # conv_out have shape [1, 512, 1, 16000] + xs.append(conv_out) + # x has shape [1, 512, 2, 16000] + x = torch.concat(xs, axis=2) + + # apply group norm + x = self.orig_module.layer_norm(x) + x = self.orig_module.activation(x) + x = torch.concat(torch.unbind(x, axis=2), axis=-1) + return x[:, :, :-1] + + +def convert_to_wavlm_npu(model: WavLMModel): + """ + Apply changes to make model NPU friendly + """ + assert isinstance(model, WavLMModel) + conv_layer = model.feature_extractor.conv_layers[0] + assert isinstance(conv_layer, WavLMGroupNormConvLayer) + # Replace with NPU friendly implementation + conv_layer_npu = WavLMGroupNormConvLayerNPU(conv_layer).eval() + model.feature_extractor.conv_layers[0] = conv_layer_npu + + conv_layer1 = model.feature_extractor.conv_layers[1].conv + assert isinstance(conv_layer1, torch.nn.Conv1d) + # Replace with NPU friendly implementation + conv_layer1_npu = SliceConv1d(conv_layer1).eval() + model.feature_extractor.conv_layers[1].conv = conv_layer1_npu + + return model diff --git a/qai_hub_models/models/huggingface_wavlm_base_plus/perf.yaml b/qai_hub_models/models/huggingface_wavlm_base_plus/perf.yaml new file mode 100644 index 00000000..3391d06e --- /dev/null +++ b/qai_hub_models/models/huggingface_wavlm_base_plus/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: HuggingFace-WavLM-Base-Plus + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 463847.0 + throughput: 2.1558832977253277 + estimated_peak_memory_range: + min: 10719232 + max: 13863736 + primary_compute_unit: CPU + precision: fp32 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 88 + layers_on_cpu: 748 + total_layers: 836 + job_id: jo5m06wyg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:32:02.862530Z' diff --git a/qai_hub_models/models/huggingface_wavlm_base_plus/requirements.txt b/qai_hub_models/models/huggingface_wavlm_base_plus/requirements.txt new file mode 100644 index 00000000..0e2962fb --- /dev/null +++ b/qai_hub_models/models/huggingface_wavlm_base_plus/requirements.txt @@ -0,0 +1,4 @@ +transformers>=4.31.0 +soundfile>=0.12.1 +librosa>=0.10.1 +datasets>=2.14.5 diff --git a/qai_hub_models/models/huggingface_wavlm_base_plus/test.py b/qai_hub_models/models/huggingface_wavlm_base_plus/test.py new file mode 100644 index 00000000..f1267acd --- /dev/null +++ b/qai_hub_models/models/huggingface_wavlm_base_plus/test.py @@ -0,0 +1,79 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import torch +from datasets import load_dataset + +from qai_hub_models.models.huggingface_wavlm_base_plus.app import ( + HuggingFaceWavLMBasePlusApp, +) +from qai_hub_models.models.huggingface_wavlm_base_plus.demo import demo_main +from qai_hub_models.models.huggingface_wavlm_base_plus.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + HuggingFaceWavLMBasePlus, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_TENSOR_1 = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "wavlm_output_tensor_1.pth" +) +OUTPUT_TENSOR_2 = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "wavlm_output_tensor_2.pth" +) + + +def _test_impl(app: HuggingFaceWavLMBasePlusApp): + # Load input data + dataset = load_dataset( + "hf-internal-testing/librispeech_asr_demo", "clean", split="validation" + ) + dataset = dataset.sort("id") + x = dataset[0]["audio"]["array"] + sampling_rate = dataset.features["audio"].sampling_rate + + # Load expected output data + first_output_tensor = torch.load(OUTPUT_TENSOR_1.fetch()) + output_array1 = first_output_tensor.detach().numpy() + second_output_tensor = torch.load(OUTPUT_TENSOR_2.fetch()) + output_array2 = second_output_tensor.detach().numpy() + + # Run inference + app_output_features = app.predict_features(x, sampling_rate) + + # Compare outputs + np.testing.assert_allclose( + np.asarray(app_output_features[0].detach().numpy(), dtype=np.float32), + np.asarray(output_array1, dtype=np.float32), + rtol=0.02, + atol=0.2, + ) + + np.testing.assert_allclose( + np.asarray(app_output_features[1].detach().numpy(), dtype=np.float32), + np.asarray(output_array2, dtype=np.float32), + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_task(): + _test_impl(HuggingFaceWavLMBasePlusApp(HuggingFaceWavLMBasePlus.from_pretrained())) + + +@skip_clone_repo_check +def test_trace(): + _test_impl( + HuggingFaceWavLMBasePlusApp( + HuggingFaceWavLMBasePlus.from_pretrained().convert_to_torchscript() + ) + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/inception_v3/README.md b/qai_hub_models/models/inception_v3/README.md new file mode 100644 index 00000000..af6c9e68 --- /dev/null +++ b/qai_hub_models/models/inception_v3/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Inception-v3: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/inception_v3) + +InceptionNetV3 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of Inception-v3 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/inception_v3). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.inception_v3.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.inception_v3.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Inception-v3 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py) diff --git a/qai_hub_models/models/inception_v3/__init__.py b/qai_hub_models/models/inception_v3/__init__.py new file mode 100644 index 00000000..444e7d3b --- /dev/null +++ b/qai_hub_models/models/inception_v3/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import InceptionNetV3 as Model # noqa: F401 diff --git a/qai_hub_models/models/inception_v3/demo.py b/qai_hub_models/models/inception_v3/demo.py new file mode 100644 index 00000000..487dadc1 --- /dev/null +++ b/qai_hub_models/models/inception_v3/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.inception_v3.model import InceptionNetV3 + + +def main(is_test: bool = False): + imagenet_demo(InceptionNetV3, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/inception_v3/export.py b/qai_hub_models/models/inception_v3/export.py new file mode 100644 index 00000000..e9f2ff1b --- /dev/null +++ b/qai_hub_models/models/inception_v3/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.inception_v3 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "inception_v3" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "inception_v3", + "Inception-v3", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/inception_v3/info.yaml b/qai_hub_models/models/inception_v3/info.yaml new file mode 100644 index 00000000..58042435 --- /dev/null +++ b/qai_hub_models/models/inception_v3/info.yaml @@ -0,0 +1,40 @@ +name: Inception-v3 +# id must match with the model dir name in qai_hub_models +id: inception_v3 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: InceptionNetV3 is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: + - backbone +research_paper: http://arxiv.org/abs/1512.00567 +research_paper_title: Rethinking the Inception Architecture for Computer Vision +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 23.8M + Model size: 90.9 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - inception_v3_quantized + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/inception_v3/model.py b/qai_hub_models/models/inception_v3/model.py new file mode 100644 index 00000000..66e94e93 --- /dev/null +++ b/qai_hub_models/models/inception_v3/model.py @@ -0,0 +1,17 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class InceptionNetV3(ImagenetClassifier): + model_builder = tv_models.inception_v3 + DEFAULT_WEIGHTS = DEFAULT_WEIGHTS diff --git a/qai_hub_models/models/inception_v3/perf.yaml b/qai_hub_models/models/inception_v3/perf.yaml new file mode 100644 index 00000000..cbb39be7 --- /dev/null +++ b/qai_hub_models/models/inception_v3/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Inception-v3 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1944.0 + throughput: 514.40329218107 + estimated_peak_memory_range: + min: 24576 + max: 2564456 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 141 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 141 + job_id: j1p8em8zp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 2266.0 + throughput: 441.306266548985 + estimated_peak_memory_range: + min: 360448 + max: 133509928 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 232 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 232 + job_id: jogk2qdyg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:21:24.010787Z' diff --git a/qai_hub_models/models/inception_v3/test.py b/qai_hub_models/models/inception_v3/test.py new file mode 100644 index 00000000..95c0dc88 --- /dev/null +++ b/qai_hub_models/models/inception_v3/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.inception_v3.demo import main as demo_main +from qai_hub_models.models.inception_v3.model import MODEL_ID, InceptionNetV3 + + +def test_task(): + run_imagenet_classifier_test(InceptionNetV3.from_pretrained(), MODEL_ID) + + +def test_trace(): + run_imagenet_classifier_trace_test(InceptionNetV3.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/inception_v3_quantized/README.md b/qai_hub_models/models/inception_v3_quantized/README.md new file mode 100644 index 00000000..6198c751 --- /dev/null +++ b/qai_hub_models/models/inception_v3_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Inception-v3Quantized: Quantized Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/inception_v3_quantized) + +InceptionNetV3 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. This model is post-training quantized to int8 using samples from [Google's open images dataset](https://storage.googleapis.com/openimages/web/index.html). + +This is based on the implementation of Inception-v3Quantized found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/inception_v3_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.inception_v3_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.inception_v3_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Inception-v3Quantized can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py) diff --git a/qai_hub_models/models/inception_v3_quantized/__init__.py b/qai_hub_models/models/inception_v3_quantized/__init__.py new file mode 100644 index 00000000..dcec5c4d --- /dev/null +++ b/qai_hub_models/models/inception_v3_quantized/__init__.py @@ -0,0 +1,11 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) +from qai_hub_models.models.inception_v3_quantized.model import MODEL_ID # noqa: F401 +from qai_hub_models.models.inception_v3_quantized.model import ( # noqa: F401 + InceptionNetV3Quantizable as Model, +) diff --git a/qai_hub_models/models/inception_v3_quantized/demo.py b/qai_hub_models/models/inception_v3_quantized/demo.py new file mode 100644 index 00000000..e17bb3be --- /dev/null +++ b/qai_hub_models/models/inception_v3_quantized/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.inception_v3_quantized.model import InceptionNetV3Quantizable + + +def main(is_test: bool = False): + imagenet_demo(InceptionNetV3Quantizable, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/inception_v3_quantized/export.py b/qai_hub_models/models/inception_v3_quantized/export.py new file mode 100644 index 00000000..569e757c --- /dev/null +++ b/qai_hub_models/models/inception_v3_quantized/export.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.inception_v3_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "inception_v3_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "inception_v3_quantized", + "Inception-v3Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/inception_v3_quantized/info.yaml b/qai_hub_models/models/inception_v3_quantized/info.yaml new file mode 100644 index 00000000..e7da6c74 --- /dev/null +++ b/qai_hub_models/models/inception_v3_quantized/info.yaml @@ -0,0 +1,43 @@ +name: Inception-v3Quantized +# id must match with the model dir name in qai_hub_models +id: inception_v3_quantized +status: public +headline: Quantized Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: InceptionNetV3 is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. This model is post-training quantized to int8 using + samples from [Google's open images dataset](https://storage.googleapis.com/openimages/web/index.html). +use_case: Image Classification +tags: + - backbone + - quantized +research_paper: http://arxiv.org/abs/1512.00567 +research_paper_title: Rethinking the Inception Architecture for Computer Vision +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 23.8M + Model size: 65.6 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - inception_v3 + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: no +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/inception_v3_quantized/model.py b/qai_hub_models/models/inception_v3_quantized/model.py new file mode 100644 index 00000000..ef630f60 --- /dev/null +++ b/qai_hub_models/models/inception_v3_quantized/model.py @@ -0,0 +1,76 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +# isort: off +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( + AIMETQuantizableMixin, +) + +# isort: on + +import torch +from aimet_torch.cross_layer_equalization import equalize_model +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models.inception_v3.model import InceptionNetV3 +from qai_hub_models.utils.aimet.config_loader import get_per_channel_aimet_config +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 3 +DEFAULT_ENCODINGS = "inception_v3_quantized_encodings.json" + + +class InceptionNetV3Quantizable(AIMETQuantizableMixin, InceptionNetV3): + """InceptionNetV3 with post train quantization support. + + Supports only 8 bit weights and activations, and only loads pre-quantized checkpoints. + Support for quantizing using your own weights & data will come at a later date.""" + + def __init__( + self, + sim_model: QuantizationSimModel, + ) -> None: + InceptionNetV3.__init__(self, sim_model.model) + AIMETQuantizableMixin.__init__( + self, sim_model, needs_onnx_direct_aimet_export=True + ) + + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> "InceptionNetV3": + """ + Parameters: + aimet_encodings: + if "DEFAULT": Loads the model with aimet encodings calibrated on imagenette. + elif None: Doesn't load any encodings. Used when computing encodings. + else: Interprets as a filepath and loads the encodings stored there. + """ + model = InceptionNetV3.from_pretrained() + input_shape = model.get_input_spec()["image_tensor"][0] + + equalize_model(model, input_shape) + sim = QuantizationSimModel( + model.net, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=get_per_channel_aimet_config(), + dummy_input=torch.rand(input_shape), + ) + + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() + load_encodings_to_sim(sim, aimet_encodings) + + sim.model.eval() + return cls(sim) diff --git a/qai_hub_models/models/inception_v3_quantized/perf.yaml b/qai_hub_models/models/inception_v3_quantized/perf.yaml new file mode 100644 index 00000000..5d7edfbb --- /dev/null +++ b/qai_hub_models/models/inception_v3_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Inception-v3Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1772.0 + throughput: 564.3340857787811 + estimated_peak_memory_range: + min: 28672 + max: 2456712 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 394 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 394 + job_id: jep2r9jxg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:37:27.025184Z' diff --git a/qai_hub_models/models/inception_v3_quantized/test.py b/qai_hub_models/models/inception_v3_quantized/test.py new file mode 100644 index 00000000..ce2cba52 --- /dev/null +++ b/qai_hub_models/models/inception_v3_quantized/test.py @@ -0,0 +1,40 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.inception_v3_quantized.demo import main as demo_main +from qai_hub_models.models.inception_v3_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + InceptionNetV3Quantizable, +) + + +def test_task(): + run_imagenet_classifier_test( + InceptionNetV3Quantizable.from_pretrained(), + MODEL_ID, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + asset_version=MODEL_ASSET_VERSION, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test( + InceptionNetV3Quantizable.from_pretrained(), + diff_tol=0.01, + rtol=0.02, + atol=0.2, + is_quantized=True, + ) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/lama_dilated/README.md b/qai_hub_models/models/lama_dilated/README.md new file mode 100644 index 00000000..28f6ea2e --- /dev/null +++ b/qai_hub_models/models/lama_dilated/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [LaMa-Dilated: High resolution image in-painting on-device](https://aihub.qualcomm.com/models/lama_dilated) + +LaMa-Dilated is a machine learning model that allows to erase and in-paint part of given input image. + +This is based on the implementation of LaMa-Dilated found +[here](https://github.com/advimman/lama). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/lama_dilated). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[lama_dilated]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.lama_dilated.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.lama_dilated.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of LaMa-Dilated can be found + [here](https://github.com/advimman/lama/blob/main/LICENSE). + + +## References +* [Resolution-robust Large Mask Inpainting with Fourier Convolutions](https://arxiv.org/abs/2109.07161) +* [Source Model Implementation](https://github.com/advimman/lama) diff --git a/qai_hub_models/models/lama_dilated/__init__.py b/qai_hub_models/models/lama_dilated/__init__.py new file mode 100644 index 00000000..7feae952 --- /dev/null +++ b/qai_hub_models/models/lama_dilated/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.repaint.app import ( # noqa: F401 + RepaintMaskApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import LamaDilated as Model # noqa: F401 diff --git a/qai_hub_models/models/lama_dilated/demo.py b/qai_hub_models/models/lama_dilated/demo.py new file mode 100644 index 00000000..adabbf3f --- /dev/null +++ b/qai_hub_models/models/lama_dilated/demo.py @@ -0,0 +1,26 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.repaint.demo import repaint_demo +from qai_hub_models.models.lama_dilated.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + LamaDilated, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_images/test_input_image.png" +) +MASK_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_images/test_input_mask.png" +) + + +def main(is_test: bool = False): + repaint_demo(LamaDilated, IMAGE_ADDRESS, MASK_ADDRESS, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/lama_dilated/export.py b/qai_hub_models/models/lama_dilated/export.py new file mode 100644 index 00000000..488b05f1 --- /dev/null +++ b/qai_hub_models/models/lama_dilated/export.py @@ -0,0 +1,197 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.lama_dilated import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "lama_dilated" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "lama_dilated", + "LaMa-Dilated", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image,mask" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image,mask", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/lama_dilated/info.yaml b/qai_hub_models/models/lama_dilated/info.yaml new file mode 100644 index 00000000..54e07b78 --- /dev/null +++ b/qai_hub_models/models/lama_dilated/info.yaml @@ -0,0 +1,31 @@ +name: LaMa-Dilated +# id must match with the model dir name in qai_hub_models +id: lama_dilated +status: public +headline: High resolution image in-painting on-device. +domain: Computer Vision +description: LaMa-Dilated is a machine learning model that allows to erase and in-paint + part of given input image. +use_case: Image Editing +tags: + - backbone +research_paper: https://arxiv.org/abs/2109.07161 +research_paper_title: Resolution-robust Large Mask Inpainting with Fourier Convolutions +license: https://github.com/advimman/lama/blob/main/LICENSE +source_repo: https://github.com/advimman/lama +technical_details: + Model checkpoint: Dilated CelebAHQ + Input resolution: 512x512 + Number of parameters: 45.6M + Model size: 174 MB +applicable_scenarios: + - Image editing +related_models: + - aotgan +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: [] diff --git a/qai_hub_models/models/lama_dilated/model.py b/qai_hub_models/models/lama_dilated/model.py new file mode 100644 index 00000000..fff9bf32 --- /dev/null +++ b/qai_hub_models/models/lama_dilated/model.py @@ -0,0 +1,129 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +from omegaconf import OmegaConf + +from qai_hub_models.utils.asset_loaders import ( + CachedWebModelAsset, + SourceAsRoot, + load_json, + load_torch, +) +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +LAMA_SOURCE_REPOSITORY = "https://github.com/advimman/lama" +LAMA_SOURCE_REPO_COMMIT = "7dee0e4a3cf5f73f86a820674bf471454f52b74f" +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_WEIGHTS = "lama-dilated_celeba-hq" +MODEL_ASSET_VERSION = 1 + + +class LamaDilated(BaseModel): + """Exportable LamaDilated inpainting algorithm by Samsung Research.""" + + def __init__( + self, + lama_dilated_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = lama_dilated_model + + @staticmethod + def from_pretrained(weights_name: str = DEFAULT_WEIGHTS) -> LamaDilated: + """Load LamaDilated from a weights file created by the source LaMa repository.""" + + # Load PyTorch model from disk + lama_dilated_model = _load_lama_dilated_source_model_from_weights(weights_name) + + return LamaDilated(lama_dilated_model) + + def forward(self, image: torch.Tensor, mask: torch.Tensor) -> torch.Tensor: + """ + Run LamaDilated on `image` and `mask`, and produce an image with mask area inpainted. + + Parameters: + image: Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + mask: Pixel values pre-processed to have have mask values either 0. or 1. + Range: float[0, 1] and only values of 0. or 1. + 1-channel binary image. + + Returns: + inpainted_image: Pixel values + Range: float[0, 1] + 3-channel Color Space: RGB + """ + + masked_img = image * (1 - mask) + + if self.model.concat_mask: + masked_img = torch.cat([masked_img, mask], dim=1) + + predicted_image = self.model.generator(masked_img) + inpainted = mask * predicted_image + (1 - mask) * image + return inpainted + + def get_input_spec( + self, + batch_size: int = 1, + num_channels: int = 3, + height: int = 512, + width: int = 512, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return { + "image": ((batch_size, num_channels, height, width), "float32"), + "mask": ((batch_size, 1, height, width), "float32"), + } + + +def _get_weightsfile_from_name(weights_name: str): + """Convert from names of weights files to the url for the weights file""" + return CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, f"checkpoints/{weights_name}.ckpt" + ) + + +def _get_config_url(): + """Get the url for the config file""" + return CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "checkpoints/training_config.json" + ) + + +def _load_lama_dilated_source_model_from_weights(weights_name: str) -> torch.nn.Module: + # Load LamaDilated model from the source repository using the given weights. + weights_url = _get_weightsfile_from_name(weights_name) + config_url = _get_config_url() + + with SourceAsRoot( + LAMA_SOURCE_REPOSITORY, LAMA_SOURCE_REPO_COMMIT, MODEL_ID, MODEL_ASSET_VERSION + ): + # Import module + from saicinpainting.training.trainers.default import ( + DefaultInpaintingTrainingModule, + ) + + # Pass config as needed to create the module for tracing. + config = load_json(config_url) + config = OmegaConf.create(config) + kwargs = dict(config.training_model) + kwargs.pop("kind") + kwargs["use_ddp"] = True + state = load_torch(weights_url) + lama_dilated_model = DefaultInpaintingTrainingModule(config, **kwargs) + lama_dilated_model.load_state_dict(state["state_dict"], strict=False) + lama_dilated_model.on_load_checkpoint(state) + lama_dilated_model.freeze() + return lama_dilated_model diff --git a/qai_hub_models/models/lama_dilated/perf.yaml b/qai_hub_models/models/lama_dilated/perf.yaml new file mode 100644 index 00000000..f951db3e --- /dev/null +++ b/qai_hub_models/models/lama_dilated/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: LaMa-Dilated + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 88596.0 + throughput: 11.287191295318072 + estimated_peak_memory_range: + min: 3289088 + max: 139215624 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 346 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 346 + job_id: jqpyojvr5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 84076.0 + throughput: 11.894000666064038 + estimated_peak_memory_range: + min: 4313088 + max: 34733320 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 333 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 333 + job_id: j2p0m2e2g + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:27:42.653097Z' diff --git a/qai_hub_models/models/lama_dilated/requirements.txt b/qai_hub_models/models/lama_dilated/requirements.txt new file mode 100644 index 00000000..a21b654f --- /dev/null +++ b/qai_hub_models/models/lama_dilated/requirements.txt @@ -0,0 +1,10 @@ +matplotlib +pandas +albumentations==0.5.2 +pytorch-lightning==1.6.0 +webdataset +easydict==1.10 +kornia==0.5.0 +hydra-core==1.3.0 +omegaconf==2.3.0 +scikit-learn==1.3.0 diff --git a/qai_hub_models/models/lama_dilated/test.py b/qai_hub_models/models/lama_dilated/test.py new file mode 100644 index 00000000..47b13144 --- /dev/null +++ b/qai_hub_models/models/lama_dilated/test.py @@ -0,0 +1,65 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models._shared.repaint.app import RepaintMaskApp +from qai_hub_models.models.lama_dilated.demo import IMAGE_ADDRESS, MASK_ADDRESS +from qai_hub_models.models.lama_dilated.demo import main as demo_main +from qai_hub_models.models.lama_dilated.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + LamaDilated, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_close, skip_clone_repo_check + +OUTPUT_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_images/test_output.png" +) + + +@skip_clone_repo_check +def test_task(): + app = RepaintMaskApp(LamaDilated.from_pretrained()) + + img = load_image(IMAGE_ADDRESS) + mask_image = load_image(MASK_ADDRESS) + + out_img = app.paint_mask_on_image(img, mask_image) + expected_out = load_image(OUTPUT_ADDRESS) + assert_most_close( + np.asarray(out_img[0], dtype=np.float32), + np.asarray(expected_out, dtype=np.float32), + 0.005, + rtol=0.02, + atol=1.5, + ) + + +@skip_clone_repo_check +def test_trace(): + net = LamaDilated.from_pretrained() + input_spec = net.get_input_spec() + trace = net.convert_to_torchscript(input_spec) + + img = load_image(IMAGE_ADDRESS) + mask_image = load_image(MASK_ADDRESS) + app = RepaintMaskApp(trace) + + out_imgs = app.paint_mask_on_image(img, mask_image) + expected_out = load_image(OUTPUT_ADDRESS) + assert_most_close( + np.asarray(out_imgs[0], dtype=np.float32), + np.asarray(expected_out, dtype=np.float32), + 0.005, + rtol=0.02, + atol=1.5, + ) + + +@skip_clone_repo_check +def test_demo(): + # Run demo and verify it does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/litehrnet/README.md b/qai_hub_models/models/litehrnet/README.md new file mode 100644 index 00000000..fe566187 --- /dev/null +++ b/qai_hub_models/models/litehrnet/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [LiteHRNet: Human pose estimation](https://aihub.qualcomm.com/models/litehrnet) + +LiteHRNet is a machine learning model that detects human pose and returns a location and confidence for each of 17 joints. + +This is based on the implementation of LiteHRNet found +[here](https://github.com/HRNet/Lite-HRNet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/litehrnet). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[litehrnet]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.litehrnet.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.litehrnet.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of LiteHRNet can be found + [here](https://github.com/HRNet/Lite-HRNet/blob/hrnet/LICENSE). + + +## References +* [Lite-HRNet: A Lightweight High-Resolution Network](https://arxiv.org/abs/2104.06403) +* [Source Model Implementation](https://github.com/HRNet/Lite-HRNet) diff --git a/qai_hub_models/models/litehrnet/__init__.py b/qai_hub_models/models/litehrnet/__init__.py new file mode 100644 index 00000000..3199aaa5 --- /dev/null +++ b/qai_hub_models/models/litehrnet/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import LiteHRNetApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import LiteHRNet as Model # noqa: F401 diff --git a/qai_hub_models/models/litehrnet/app.py b/qai_hub_models/models/litehrnet/app.py new file mode 100644 index 00000000..f2f714f4 --- /dev/null +++ b/qai_hub_models/models/litehrnet/app.py @@ -0,0 +1,108 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Any, Callable, List, Tuple + +import numpy as np +import torch +from mmpose.codecs.utils import refine_keypoints +from PIL.Image import Image, fromarray + +from qai_hub_models.utils.draw import draw_points +from qai_hub_models.utils.image_processing import app_to_net_image_inputs + + +class LiteHRNetApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with LiteHRNet. + + The app uses 1 model: + * LiteHRNet + + For a given image input, the app will: + * pre-process the image + * Run LiteHRNet inference + * Convert the output into a list of keypoint coordiates + """ + + def __init__( + self, + model: Callable[ + [torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor] + ], + inferencer: Any, + ): + self.inferencer = inferencer + self.model = model + + def predict(self, *args, **kwargs): + # See predict_pose_keypoints. + return self.predict_pose_keypoints(*args, **kwargs) + + def predict_pose_keypoints( + self, + pixel_values_or_image: torch.Tensor | np.ndarray | Image | List[Image], + raw_output=False, + ) -> np.ndarray | List[Image]: + """ + Predicts pose keypoints for a person in the image. + + Parameters: + pixel_values_or_image + PIL image(s) + or + numpy array (N H W C x uint8) or (H W C x uint8) -- both RGB channel layout + or + pyTorch tensor (N C H W x fp32, value range is [0, 1]), RGB channel layout + + raw_output: bool + See "returns" doc section for details. + + Returns: + If raw_output is true, returns: + keypoints: np.ndarray, shape [B, N, 2] + Numpy array of keypoints within the images Each keypoint is an (x, y) pair of coordinates within the image. + + Otherwise, returns: + predicted_images: List[PIL.Image] + Images with keypoints drawn. + """ + # Preprocess image to get data required for post processing + NHWC_int_numpy_frames, _ = app_to_net_image_inputs(pixel_values_or_image) + inputs = self.inferencer.preprocess(NHWC_int_numpy_frames, batch_size=1) + proc_inputs, _ = list(inputs)[0] + proc_inputs_ = proc_inputs["inputs"][0] + + # run inference + input = proc_inputs_.to(torch.float32) + predictions, _, heatmaps = self.model(input) + + # get the bounding box center from the preprocessing + # In older versions of the MM modules the center is directly a member + # of gt_instances and does not need to be computed. + bbox = proc_inputs["data_samples"][0].gt_instances.bboxes[0] + center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2] + + scale = proc_inputs["data_samples"][0].gt_instances.bbox_scales[0] + + # perform refinement + keypoints = refine_keypoints( + predictions.unsqueeze(0).detach().numpy(), heatmaps.detach().numpy() + ) + scale_factor = np.array([4.0, 4.0]) + keypoints = keypoints * scale_factor + input_size = proc_inputs["data_samples"][0].metainfo["input_size"] + keypoints = keypoints / input_size * scale + center - 0.5 * scale + keypoints = np.round(keypoints).astype(np.int32) + + if raw_output: + return keypoints + + predicted_images = [] + for i, img in enumerate(NHWC_int_numpy_frames): + draw_points(img, keypoints[i], color=(255, 0, 0), size=2) + predicted_images.append(fromarray(img)) + return predicted_images diff --git a/qai_hub_models/models/litehrnet/demo.py b/qai_hub_models/models/litehrnet/demo.py new file mode 100644 index 00000000..8ad0ad05 --- /dev/null +++ b/qai_hub_models/models/litehrnet/demo.py @@ -0,0 +1,56 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.litehrnet.app import LiteHRNetApp +from qai_hub_models.models.litehrnet.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + LiteHRNet, +) +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + model_from_cli_args, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.display import display_or_save_image + +IA_HELP_MSG = "More inferencer architectures for litehrnet can be found at https://github.com/open-mmlab/mmpose/tree/main/configs/body_2d_keypoint/topdown_heatmap/coco" +IMAGE_LOCAL_PATH = "litehrnet_demo.png" +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, IMAGE_LOCAL_PATH +) + + +# Run LiteHRNet end-to-end on a sample image. +# The demo will display a image with the predicted keypoints. +def main(is_test: bool = False): + # Demo parameters + parser = get_model_cli_parser(LiteHRNet) + parser = get_on_device_demo_parser(parser, add_output_dir=True) + parser.add_argument( + "--image", + type=str, + default=IMAGE_ADDRESS, + help="image file path or URL", + ) + args = parser.parse_args([] if is_test else None) + litehrnet_model = model_from_cli_args(LiteHRNet, args) + hub_model = demo_model_from_cli_args(LiteHRNet, args) + validate_on_device_demo_args(args, LiteHRNet.get_model_id()) + + # Load image & model + image = load_image(args.image) + print("Model Loaded") + + app = LiteHRNetApp(hub_model, litehrnet_model.inferencer) + keypoints = app.predict_pose_keypoints(image)[0] + if not is_test: + display_or_save_image(keypoints, args.output_dir, "litehrnet_demo_output.png") + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/litehrnet/export.py b/qai_hub_models/models/litehrnet/export.py new file mode 100644 index 00000000..5e45b9d7 --- /dev/null +++ b/qai_hub_models/models/litehrnet/export.py @@ -0,0 +1,184 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.litehrnet import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "litehrnet" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "litehrnet", + "LiteHRNet", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=sample_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/litehrnet/info.yaml b/qai_hub_models/models/litehrnet/info.yaml new file mode 100644 index 00000000..9e62d950 --- /dev/null +++ b/qai_hub_models/models/litehrnet/info.yaml @@ -0,0 +1,31 @@ +name: LiteHRNet +# id must match with the model dir name in qai_hub_models +id: litehrnet +status: public +headline: Human pose estimation. +domain: Computer Vision +description: LiteHRNet is a machine learning model that detects human pose and returns + a location and confidence for each of 17 joints. +use_case: Pose Estimation +tags: [] +research_paper: https://arxiv.org/abs/2104.06403 +research_paper_title: 'Lite-HRNet: A Lightweight High-Resolution Network' +license: https://github.com/HRNet/Lite-HRNet/blob/hrnet/LICENSE +source_repo: https://github.com/HRNet/Lite-HRNet +technical_details: + Input resolution: 256x192 + Number of parameters: 1.11M + Model size: 4.56 MB +applicable_scenarios: + - Injury prevention training + - Sports performance analysis + - Posture recognition +form_factors: + - Phone + - Tablet + - IoT +related_models: [openpose, hrnet_pose] +has_static_banner: yes +has_animated_banner: no +license_type: apache-2.0 +dataset: [] diff --git a/qai_hub_models/models/litehrnet/model.py b/qai_hub_models/models/litehrnet/model.py new file mode 100644 index 00000000..e2c31db2 --- /dev/null +++ b/qai_hub_models/models/litehrnet/model.py @@ -0,0 +1,93 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Tuple + +import torch +from mmpose.apis import MMPoseInferencer + +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 + +# More inferencer architectures for litehrnet can be found here +# https://github.com/open-mmlab/mmpose/tree/main/configs/body_2d_keypoint/topdown_heatmap/coco +DEFAULT_INFERENCER_ARCH = "td-hm_litehrnet-18_8xb64-210e_coco-256x192" + + +class LiteHRNet(BaseModel): + """Exportable LiteHRNet pose joint detector, end-to-end.""" + + def __init__(self, inferencer) -> None: + super().__init__() + + self.inferencer = inferencer + self.model = self.inferencer.inferencer.model + self.pre_processor = self.inferencer.inferencer.model.data_preprocessor + self.H, self.W = self.inferencer.inferencer.model.head.decoder.heatmap_size + self.K = self.inferencer.inferencer.model.head.out_channels + self.B = 1 + + @classmethod + def from_pretrained(cls, inferencer_arch=DEFAULT_INFERENCER_ARCH) -> LiteHRNet: + """LiteHRNet comes from the MMPose library, so we load using an internal config + rather than a public weights file""" + inferencer = MMPoseInferencer(inferencer_arch, device=torch.device(type="cpu")) + return cls(inferencer) + + def forward( + self, image: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Run LiteHRNet on `image`, and produce an upscaled image + + Parameters: + image: Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + keypoints: 1x17x2 array of coordinate pairs (in x,y format) denoting joint keypoints in the original image + scores: 1x17 array of float[0,1] denoting the score of each corresponding keypoint + heatmaps: 1x17 array of 64x48 heatmaps. These hold the raw confidence values of the locations + of each joint in the image. The keypoints and scores are derived from this + """ + # Preprocess + x = image[[2, 1, 0], ...] + x = (x - self.pre_processor.mean) / self.pre_processor.std + x = torch.unsqueeze(x, 0) + + # Model prediction + heatmaps = self.model._forward(x) + + # Convert from heatmap to keypoints and scores + # heatmap is 1 x 17 x 64 x 48, BxKxHxW + heatmaps = torch.squeeze(heatmaps) + heatmaps_flatten = heatmaps.flatten(1) + indices = torch.argmax(heatmaps_flatten, dim=1) + # get the (x,y) coords of the maxes in the original heatmap shape - (H, W) + y_locs = (indices // self.H).type(torch.float32) + x_locs = (indices % self.H).type(torch.float32) + + # get the max scores and corresponding keypoints + scores, _ = torch.max(heatmaps_flatten, dim=1) + keypoints = torch.stack((x_locs, y_locs), dim=-1) + + return keypoints, scores, heatmaps + + def get_input_spec( + self, + num_channels: int = 3, + height: int = 256, + width: int = 192, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((num_channels, height, width), "float32")} diff --git a/qai_hub_models/models/litehrnet/perf.yaml b/qai_hub_models/models/litehrnet/perf.yaml new file mode 100644 index 00000000..2b7cc7c9 --- /dev/null +++ b/qai_hub_models/models/litehrnet/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: LiteHRNet + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 15966.0 + throughput: 62.63309532757109 + estimated_peak_memory_range: + min: 6561792 + max: 13503904 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 1226 + layers_on_gpu: 0 + layers_on_cpu: 10 + total_layers: 1236 + job_id: jqp4ydwqp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:25:31.033915Z' diff --git a/qai_hub_models/models/litehrnet/requirements.txt b/qai_hub_models/models/litehrnet/requirements.txt new file mode 100644 index 00000000..048feb99 --- /dev/null +++ b/qai_hub_models/models/litehrnet/requirements.txt @@ -0,0 +1,3 @@ +mmpose<=1.2.0 +mmcv==2.1.0 +mmdet<=3.2.0 diff --git a/qai_hub_models/models/litehrnet/test.py b/qai_hub_models/models/litehrnet/test.py new file mode 100644 index 00000000..3fe634ef --- /dev/null +++ b/qai_hub_models/models/litehrnet/test.py @@ -0,0 +1,61 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models.litehrnet.app import LiteHRNetApp +from qai_hub_models.models.litehrnet.demo import IMAGE_ADDRESS +from qai_hub_models.models.litehrnet.demo import main as demo_main +from qai_hub_models.models.litehrnet.model import LiteHRNet +from qai_hub_models.utils.asset_loaders import load_image + +EXPECTED_KEYPOINTS = np.array( + [ + [ + [70, 34], + [77, 32], + [72, 30], + [91, 37], + [72, 32], + [109, 67], + [67, 67], + [130, 104], + [63, 104], + [112, 125], + [40, 102], + [105, 144], + [77, 144], + [119, 202], + [81, 190], + [142, 251], + [88, 230], + ] + ] +) + + +def _test_impl(app: LiteHRNetApp): + image = load_image(IMAGE_ADDRESS) + keypoints = app.predict_pose_keypoints(image, True) + + np.testing.assert_allclose( + np.asarray(EXPECTED_KEYPOINTS, dtype=np.float32), + np.asarray(keypoints, dtype=np.float32), + rtol=0.02, + atol=1.5, + ) + + +def test_task(): + litehrnet = LiteHRNet.from_pretrained() + _test_impl(LiteHRNetApp(litehrnet, litehrnet.inferencer)) + + +def test_trace(): + litehrnet = LiteHRNet.from_pretrained() + _test_impl(LiteHRNetApp(litehrnet.convert_to_torchscript(), litehrnet.inferencer)) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/mediapipe_face/README.md b/qai_hub_models/models/mediapipe_face/README.md new file mode 100644 index 00000000..01c85c99 --- /dev/null +++ b/qai_hub_models/models/mediapipe_face/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [MediaPipe-Face-Detection: Detect faces and locate facial features in real-time video and image streams](https://aihub.qualcomm.com/models/mediapipe_face) + +Designed for sub-millisecond processing, this model predicts bounding boxes and pose skeletons (left eye, right eye, nose tip, mouth, left eye tragion, and right eye tragion) of faces in an image. + +This is based on the implementation of MediaPipe-Face-Detection found +[here](https://github.com/zmurez/MediaPipePyTorch/). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/mediapipe_face). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[mediapipe_face]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.mediapipe_face.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.mediapipe_face.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of MediaPipe-Face-Detection can be found + [here](https://github.com/zmurez/MediaPipePyTorch/blob/master/LICENSE). + + +## References +* [BlazeFace: Sub-millisecond Neural Face Detection on Mobile GPUs](https://arxiv.org/abs/1907.05047) +* [Source Model Implementation](https://github.com/zmurez/MediaPipePyTorch/) diff --git a/qai_hub_models/models/mediapipe_face/__init__.py b/qai_hub_models/models/mediapipe_face/__init__.py new file mode 100644 index 00000000..4dd5d1bf --- /dev/null +++ b/qai_hub_models/models/mediapipe_face/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import MediaPipeFaceApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import MediaPipeFace as Model # noqa: F401 diff --git a/qai_hub_models/models/mediapipe_face/app.py b/qai_hub_models/models/mediapipe_face/app.py new file mode 100644 index 00000000..81136db5 --- /dev/null +++ b/qai_hub_models/models/mediapipe_face/app.py @@ -0,0 +1,63 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from qai_hub_models.models._shared.mediapipe.app import MediaPipeApp +from qai_hub_models.models.mediapipe_face.model import ( + DETECT_DSCALE, + DETECT_DXY, + DETECT_SCORE_SLIPPING_THRESHOLD, + FACE_LANDMARK_CONNECTIONS, + LEFT_EYE_KEYPOINT_INDEX, + RIGHT_EYE_KEYPOINT_INDEX, + ROTATION_VECTOR_OFFSET_RADS, + MediaPipeFace, +) + + +class MediaPipeFaceApp(MediaPipeApp): + """ + This class consists of light-weight "app code" that is required to perform end to end inference with MediaPipe's hand landmark detector. + + The app uses 2 models: + * MediaPipeFaceDetector + * MediaPipeFaceLandmark + + See the class comment for the parent class for details. + """ + + def __init__( + self, + model: MediaPipeFace, + min_detector_face_box_score: float = 0.75, + nms_iou_threshold: float = 0.3, + min_landmark_score: float = 0.5, + ): + """ + Construct a mediapipe face application. + + Inputs: + model: MediaPipeFace model + Face detection & landmark model container. + + See parent initializer for further parameter documentation. + """ + super().__init__( + model.face_detector, + model.face_detector.anchors, + model.face_landmark_detector, + model.face_detector.get_input_spec()["image"][0][-2:], + model.face_landmark_detector.get_input_spec()["image"][0][-2:], + RIGHT_EYE_KEYPOINT_INDEX, + LEFT_EYE_KEYPOINT_INDEX, + ROTATION_VECTOR_OFFSET_RADS, + DETECT_DXY, + DETECT_DSCALE, + min_detector_face_box_score, + DETECT_SCORE_SLIPPING_THRESHOLD, + nms_iou_threshold, + min_landmark_score, + FACE_LANDMARK_CONNECTIONS, + ) diff --git a/qai_hub_models/models/mediapipe_face/demo.py b/qai_hub_models/models/mediapipe_face/demo.py new file mode 100644 index 00000000..53310133 --- /dev/null +++ b/qai_hub_models/models/mediapipe_face/demo.py @@ -0,0 +1,91 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse + +import numpy as np +from PIL import Image + +from qai_hub_models.models.mediapipe_face.app import MediaPipeFaceApp +from qai_hub_models.models.mediapipe_face.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + MediaPipeFace, +) +from qai_hub_models.utils.args import add_output_dir_arg +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.camera_capture import capture_and_display_processed_frames +from qai_hub_models.utils.display import display_or_save_image + +INPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "face.jpeg" +) + + +# Run Mediapipe Face landmark detection end-to-end on a sample image or camera stream. +# The demo will display output with the predicted landmarks & bounding boxes drawn. +def main(is_test: bool = False): + # Demo parameters + parser = argparse.ArgumentParser() + parser.add_argument( + "--image", + type=str, + default=None, + help="image file path or URL", + ) + parser.add_argument( + "--camera", + type=int, + default=0, + help="Camera Input ID", + ) + parser.add_argument( + "--score-threshold", + type=float, + default=0.75, + help="Score threshold for NonMaximumSuppression", + ) + parser.add_argument( + "--iou-threshold", + type=float, + default=0.3, + help="Intersection over Union (IoU) threshold for NonMaximumSuppression", + ) + add_output_dir_arg(parser) + + print( + "Note: This readme is running through torch, and not meant to be real-time without dedicated ML hardware." + ) + print("Use Ctrl+C in your terminal to exit.") + + args = parser.parse_args([] if is_test else None) + if is_test: + args.image = INPUT_IMAGE_ADDRESS + + # Load app + app = MediaPipeFaceApp( + MediaPipeFace.from_pretrained(), + args.score_threshold, + args.iou_threshold, + ) + print("Model and App Loaded") + + if args.image: + image = load_image(args.image).convert("RGB") + pred_image = app.predict_landmarks_from_image(image) + out_image = Image.fromarray(pred_image[0], "RGB") + if not is_test: + display_or_save_image(out_image, args.output_dir) + else: + + def frame_processor(frame: np.ndarray) -> np.ndarray: + return app.predict_landmarks_from_image(frame)[0] # type: ignore + + capture_and_display_processed_frames( + frame_processor, "QAIHM Mediapipe Face Demo", args.camera + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mediapipe_face/export.py b/qai_hub_models/models/mediapipe_face/export.py new file mode 100644 index 00000000..59733441 --- /dev/null +++ b/qai_hub_models/models/mediapipe_face/export.py @@ -0,0 +1,219 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Mapping, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.mediapipe_face import Model +from qai_hub_models.utils.args import ( + export_parser, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + +ALL_COMPONENTS = ["MediaPipeFaceDetector", "MediaPipeFaceLandmarkDetector"] + + +def export_model( + device: str = "Samsung Galaxy S23", + components: Optional[List[str]] = None, + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Mapping[ + str, Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] +] | List[str]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + components: List of sub-components of the model that will be exported. + Each component is compiled and profiled separately. + Defaults to ALL_COMPONENTS if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` + + Returns: + A Mapping from component_name to a 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "mediapipe_face" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + component_arg = components + components = components or ALL_COMPONENTS + for component in components: + if component not in ALL_COMPONENTS: + raise ValueError(f"Invalid component {component}.") + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "mediapipe_face", + "MediaPipe-Face-Detection", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + component_arg, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + components_dict = {} + if "MediaPipeFaceDetector" in components: + components_dict["MediaPipeFaceDetector"] = model.face_detector + if "MediaPipeFaceLandmarkDetector" in components: + components_dict["MediaPipeFaceLandmarkDetector"] = model.face_landmark_detector + + compile_jobs = {} + for component_name, component in components_dict.items(): + # Trace the model + input_spec = component.get_input_spec() + source_model = torch.jit.trace(component, make_torch_inputs(input_spec)) + + # 2. Compile the models to an on-device asset + model_compile_options = component.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {component_name} to run on-device.") + compile_jobs[component_name] = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=f"{component_name}", + options=model_compile_options, + ) + + # 3. Profile the model assets on real devices + profile_jobs = {} + if not skip_profiling: + for component_name in components: + print(f"Profiling model {component_name} on a hosted device.") + profile_jobs[component_name] = hub.submit_profile_job( + model=compile_jobs[component_name].get_target_model(), + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_jobs = {} + if not skip_inferencing: + for component_name in components: + print( + f"Running inference for {component_name} on a hosted device with example inputs." + ) + sample_inputs = components_dict[component_name].sample_inputs() + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_jobs[component_name] = hub.submit_inference_job( + model=compile_jobs[component_name].get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 5. Download the model assets to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + for component_name, compile_job in compile_jobs.items(): + target_model = compile_job.get_target_model() + target_model.download( + str(output_path / f"{model_name}_{component_name}.tflite") + ) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + for component_name in components: + profile_job = profile_jobs[component_name] + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + for component_name in components: + inference_job = inference_jobs[component_name] + sample_inputs = components_dict[component_name].sample_inputs() + torch_out = torch_inference(components_dict[component_name], sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + return { + component_name: ( + compile_jobs[component_name], + profile_jobs.get(component_name, None), + inference_jobs.get(component_name, None), + ) + for component_name in components + } + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, components=ALL_COMPONENTS) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mediapipe_face/info.yaml b/qai_hub_models/models/mediapipe_face/info.yaml new file mode 100644 index 00000000..6f80819c --- /dev/null +++ b/qai_hub_models/models/mediapipe_face/info.yaml @@ -0,0 +1,39 @@ +name: MediaPipe-Face-Detection +# id must match with the model dir name in qai_hub_models +id: mediapipe_face +status: public +headline: Detect faces and locate facial features in real-time video and image streams. +domain: Computer Vision +description: Designed for sub-millisecond processing, this model predicts bounding + boxes and pose skeletons (left eye, right eye, nose tip, mouth, left eye tragion, + and right eye tragion) of faces in an image. +use_case: Object Detection +tags: + - real-time +research_paper: https://arxiv.org/abs/1907.05047 +research_paper_title: 'BlazeFace: Sub-millisecond Neural Face Detection on Mobile + GPUs' +license: https://github.com/zmurez/MediaPipePyTorch/blob/master/LICENSE +source_repo: https://github.com/zmurez/MediaPipePyTorch/ +technical_details: + Input resolution: 256x256 + Number of parameters (MediaPipeFaceDetector): 135K + Model size (MediaPipeFaceDetector): 565 KB + Number of parameters (MediaPipeFaceLandmarkDetector): 603K + Model size (MediaPipeFaceLandmarkDetector): 2.34 MB +applicable_scenarios: + - Accessibility + - Augmented Reality + - Gaming +related_models: + - mediapipe_hand + - mediapipe_pose + - mediapipe_selfie +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: [] diff --git a/qai_hub_models/models/mediapipe_face/model.py b/qai_hub_models/models/mediapipe_face/model.py new file mode 100644 index 00000000..89844824 --- /dev/null +++ b/qai_hub_models/models/mediapipe_face/model.py @@ -0,0 +1,282 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, Tuple + +import torch + +from qai_hub_models.models._shared.mediapipe.utils import MediaPipePyTorchAsRoot +from qai_hub_models.utils.base_model import BaseModel, CollectionModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 + +# Vertex indices can be found in +# https://github.com/google/mediapipe/blob/0.8.1/mediapipe/modules/face_geometry/data/canonical_face_model_uv_visualization.png +# Found in https://github.com/google/mediapipe/blob/v0.10.3/mediapipe/python/solutions/face_mesh.py +FACE_LANDMARK_CONNECTIONS = [ + # Lips. + (61, 146), + (146, 91), + (91, 181), + (181, 84), + (84, 17), + (17, 314), + (314, 405), + (405, 321), + (321, 375), + (375, 291), + (61, 185), + (185, 40), + (40, 39), + (39, 37), + (37, 0), + (0, 267), + (267, 269), + (269, 270), + (270, 409), + (409, 291), + (78, 95), + (95, 88), + (88, 178), + (178, 87), + (87, 14), + (14, 317), + (317, 402), + (402, 318), + (318, 324), + (324, 308), + (78, 191), + (191, 80), + (80, 81), + (81, 82), + (82, 13), + (13, 312), + (312, 311), + (311, 310), + (310, 415), + (415, 308), + # Left eye. + (263, 249), + (249, 390), + (390, 373), + (373, 374), + (374, 380), + (380, 381), + (381, 382), + (382, 362), + (263, 466), + (466, 388), + (388, 387), + (387, 386), + (386, 385), + (385, 384), + (384, 398), + (398, 362), + # Left eyebrow. + (276, 283), + (283, 282), + (282, 295), + (295, 285), + (300, 293), + (293, 334), + (334, 296), + (296, 336), + # Right eye. + (33, 7), + (7, 163), + (163, 144), + (144, 145), + (145, 153), + (153, 154), + (154, 155), + (155, 133), + (33, 246), + (246, 161), + (161, 160), + (160, 159), + (159, 158), + (158, 157), + (157, 173), + (173, 133), + # Right eyebrow. + (46, 53), + (53, 52), + (52, 65), + (65, 55), + (70, 63), + (63, 105), + (105, 66), + (66, 107), + # Face oval. + (10, 338), + (338, 297), + (297, 332), + (332, 284), + (284, 251), + (251, 389), + (389, 356), + (356, 454), + (454, 323), + (323, 361), + (361, 288), + (288, 397), + (397, 365), + (365, 379), + (379, 378), + (378, 400), + (400, 377), + (377, 152), + (152, 148), + (148, 176), + (176, 149), + (149, 150), + (150, 136), + (136, 172), + (172, 58), + (58, 132), + (132, 93), + (93, 234), + (234, 127), + (127, 162), + (162, 21), + (21, 54), + (54, 103), + (103, 67), + (67, 109), + (109, 10), +] + + +# Face detector model parameters. +BATCH_SIZE = 1 +DETECT_SCORE_SLIPPING_THRESHOLD = 100 # Clip output scores to this maximum value. +DETECT_DXY, DETECT_DSCALE = ( + 0, + 1.1, +) # Modifiers applied to face detector output bounding box to encapsulate the entire face. +LEFT_EYE_KEYPOINT_INDEX = 0 # The face detector outputs several keypoints. This is the keypoint index for the left eye. +RIGHT_EYE_KEYPOINT_INDEX = 1 # The face detector outputs several keypoints. This is the keypoint index for the right eye. +ROTATION_VECTOR_OFFSET_RADS = ( + 0 # Offset required when computing rotation of the detected face. +) + + +class MediaPipeFace(CollectionModel): + def __init__( + self, + face_detector: FaceDetector, + face_landmark_detector: FaceLandmarkDetector, + ) -> None: + """ + Construct a mediapipe face model. + + Inputs: + face_detector: Callable[[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]] + Face detection model. Input is an image, output is + [bounding boxes & keypoints, box & kp scores] + + face_landmark_detector + Face landmark detector model. Input is an image cropped to the face. The face must be upright + and un-tilted in the frame. Returns [landmark_scores, landmarks] + """ + super().__init__() + self.face_detector = face_detector + self.face_landmark_detector = face_landmark_detector + + @classmethod + def from_pretrained( + cls, + detector_weights: str = "blazefaceback.pth", + detector_anchors: str = "anchors_face_back.npy", + landmark_detector_weights: str = "blazeface_landmark.pth", + ) -> MediaPipeFace: + """ + Load mediapipe models from the source repository. + Returns tuple[ + .blazeface.BlazeFace, + BlazeFace Anchors, + .blazeface_landmark.BlazeFaceLandmark, + ] + """ + with MediaPipePyTorchAsRoot(): + from blazeface import BlazeFace + from blazeface_landmark import BlazeFaceLandmark + + face_detector = BlazeFace(back_model=True) + face_detector.load_weights(detector_weights) + face_detector.load_anchors(detector_anchors) + face_regressor = BlazeFaceLandmark() + face_regressor.load_weights(landmark_detector_weights) + + return cls( + FaceDetector(face_detector, face_detector.anchors), + FaceLandmarkDetector(face_regressor), + ) + + +class FaceDetector(BaseModel): + def __init__( + self, + detector: Callable[[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]], + anchors: torch.Tensor, + ): + super().__init__() + self.detector = detector + self.anchors = anchors + + def forward(self, image: torch.Tensor): + return self.detector(image) + + @classmethod + def from_pretrained( + cls, + detector_weights: str = "blazefaceback.pth", + detector_anchors: str = "anchors_face_back.npy", + ): + with MediaPipePyTorchAsRoot(): + from blazeface import BlazeFace + + face_detector = BlazeFace(back_model=True) + face_detector.load_weights(detector_weights) + face_detector.load_anchors(detector_anchors) + return cls(face_detector, face_detector.anchors) + + def get_input_spec(self, batch_size: int = BATCH_SIZE) -> InputSpec: + """ + Returns the input specification (name -> (shape, type) of the face detector. + This can be used to submit profiling job on Qualcomm AI Hub. + """ + return {"image": ((batch_size, 3, 256, 256), "float32")} + + +class FaceLandmarkDetector(BaseModel): + def __init__( + self, + detector: Callable[[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]], + ): + super().__init__() + self.detector = detector + + def forward(self, image: torch.Tensor): + return self.detector(image) + + @classmethod + def from_pretrained(cls, landmark_detector_weights: str = "blazeface_landmark.pth"): + with MediaPipePyTorchAsRoot(): + from blazeface_landmark import BlazeFaceLandmark + + face_regressor = BlazeFaceLandmark() + face_regressor.load_weights(landmark_detector_weights) + return cls(face_regressor) + + def get_input_spec(self, batch_size: int = BATCH_SIZE) -> InputSpec: + """ + Returns the input specification (name -> (shape, type) of the face landmark detector. + This can be used to submit profiling job on Qualcomm AI Hub. + """ + return {"image": ((batch_size, 3, 192, 192), "float32")} diff --git a/qai_hub_models/models/mediapipe_face/perf.yaml b/qai_hub_models/models/mediapipe_face/perf.yaml new file mode 100644 index 00000000..3df1ee28 --- /dev/null +++ b/qai_hub_models/models/mediapipe_face/perf.yaml @@ -0,0 +1,107 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: MediaPipeFaceDetector + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 536.0 + throughput: 1865.6716417910447 + estimated_peak_memory_range: + min: 12288 + max: 1539856 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 111 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 111 + job_id: jqp4ydjqp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 592.0 + throughput: 1689.1891891891892 + estimated_peak_memory_range: + min: 802816 + max: 57565728 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 147 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 147 + job_id: jo5m06vyg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:08:54.792595Z' +- name: MediaPipeFaceLandmarkDetector + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 209.0 + throughput: 4784.688995215311 + estimated_peak_memory_range: + min: 24576 + max: 1806472 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 100 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 100 + job_id: j0pxl6ejp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 286.0 + throughput: 3496.5034965034965 + estimated_peak_memory_range: + min: 462848 + max: 8766648 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 106 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 106 + job_id: jegnzmxvg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:12:20.881454Z' diff --git a/qai_hub_models/models/mediapipe_face/requirements.txt b/qai_hub_models/models/mediapipe_face/requirements.txt new file mode 100644 index 00000000..9c11ddeb --- /dev/null +++ b/qai_hub_models/models/mediapipe_face/requirements.txt @@ -0,0 +1,2 @@ +opencv-python +requests diff --git a/qai_hub_models/models/mediapipe_face/test.py b/qai_hub_models/models/mediapipe_face/test.py new file mode 100644 index 00000000..616b7158 --- /dev/null +++ b/qai_hub_models/models/mediapipe_face/test.py @@ -0,0 +1,41 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models.mediapipe_face.app import MediaPipeFaceApp +from qai_hub_models.models.mediapipe_face.demo import INPUT_IMAGE_ADDRESS +from qai_hub_models.models.mediapipe_face.demo import main as demo_main +from qai_hub_models.models.mediapipe_face.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + MediaPipeFace, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "face_output.png" +) + + +# Because we have not made a modification to the pytorch source network, +# no numerical tests are included for the model; only for the app. +@skip_clone_repo_check +def test_face_app(): + input = load_image( + INPUT_IMAGE_ADDRESS, + ) + expected_output = load_image( + OUTPUT_IMAGE_ADDRESS, + ).convert("RGB") + app = MediaPipeFaceApp(MediaPipeFace.from_pretrained()) + assert np.allclose( + app.predict_landmarks_from_image(input)[0], np.asarray(expected_output) + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/mediapipe_hand/README.md b/qai_hub_models/models/mediapipe_hand/README.md new file mode 100644 index 00000000..7ad1da5e --- /dev/null +++ b/qai_hub_models/models/mediapipe_hand/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [MediaPipe-Hand-Detection: Real-time hand detection optimized for mobile and edge](https://aihub.qualcomm.com/models/mediapipe_hand) + +The MediaPipe Hand Landmark Detector is a machine learning pipeline that predicts bounding boxes and pose skeletons of hands in an image. + +This is based on the implementation of MediaPipe-Hand-Detection found +[here](https://github.com/zmurez/MediaPipePyTorch/). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/mediapipe_hand). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[mediapipe_hand]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.mediapipe_hand.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.mediapipe_hand.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of MediaPipe-Hand-Detection can be found + [here](https://github.com/zmurez/MediaPipePyTorch/blob/master/LICENSE). + + +## References +* [MediaPipe Hands: On-device Real-time Hand Tracking](https://arxiv.org/abs/2006.10214) +* [Source Model Implementation](https://github.com/zmurez/MediaPipePyTorch/) diff --git a/qai_hub_models/models/mediapipe_hand/__init__.py b/qai_hub_models/models/mediapipe_hand/__init__.py new file mode 100644 index 00000000..87329fcb --- /dev/null +++ b/qai_hub_models/models/mediapipe_hand/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import MediaPipeHandApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import MediaPipeHand as Model # noqa: F401 diff --git a/qai_hub_models/models/mediapipe_hand/app.py b/qai_hub_models/models/mediapipe_hand/app.py new file mode 100644 index 00000000..5495fc11 --- /dev/null +++ b/qai_hub_models/models/mediapipe_hand/app.py @@ -0,0 +1,248 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import List, Tuple + +import cv2 +import numpy as np +import torch +from PIL.Image import Image + +from qai_hub_models.models._shared.mediapipe.app import MediaPipeApp +from qai_hub_models.models.mediapipe_hand.model import ( + DETECT_DSCALE, + DETECT_DXY, + DETECT_SCORE_SLIPPING_THRESHOLD, + HAND_LANDMARK_CONNECTIONS, + MIDDLE_FINDER_KEYPOINT_INDEX, + ROTATION_VECTOR_OFFSET_RADS, + WRIST_CENTER_KEYPOINT_INDEX, + MediaPipeHand, +) +from qai_hub_models.utils.bounding_box_processing import ( + compute_box_affine_crop_resize_matrix, +) +from qai_hub_models.utils.draw import draw_connections, draw_points +from qai_hub_models.utils.image_processing import ( + apply_affine_to_coordinates, + apply_batched_affines_to_frame, + numpy_image_to_torch, +) + + +class MediaPipeHandApp(MediaPipeApp): + """ + This class consists of light-weight "app code" that is required to perform end to end inference with MediaPipe's hand landmark detector. + + The app uses 2 models: + * MediaPipeHandDetector + * MediaPipeHandLandmark + + See the class comment for the parent class for details. + """ + + def __init__( + self, + model: MediaPipeHand, + min_detector_hand_box_score: float = 0.95, + nms_iou_threshold: float = 0.3, + min_landmark_score: float = 0.5, + ): + """ + Construct a mediapipe hand application. + + Inputs: + model: MediaPipeHand model + Hand detection & landmark model container. + + See parent initializer for further parameter documentation. + """ + super().__init__( + model.hand_detector, + model.hand_detector.anchors, + model.hand_landmark_detector, + model.hand_detector.get_input_spec()["image"][0][-2:], + model.hand_landmark_detector.get_input_spec()["image"][0][-2:], + WRIST_CENTER_KEYPOINT_INDEX, + MIDDLE_FINDER_KEYPOINT_INDEX, + ROTATION_VECTOR_OFFSET_RADS, + DETECT_DXY, + DETECT_DSCALE, + min_detector_hand_box_score, + DETECT_SCORE_SLIPPING_THRESHOLD, + nms_iou_threshold, + min_landmark_score, + HAND_LANDMARK_CONNECTIONS, + ) + + def predict_landmarks_from_image( + self, + pixel_values_or_image: torch.Tensor | np.ndarray | Image | List[Image], + raw_output: bool = False, + ) -> Tuple[ + List[torch.Tensor | None], + List[torch.Tensor | None], + List[torch.Tensor | None], + List[List[bool] | None], + ] | List[np.ndarray]: + """ + From the provided image or tensor, predict the bounding boxes & classes of the hand detected within. + + Parameters: + See parent function documentation. + + Returns: + See parent function documentation for generic return values. + + If raw_output is false, returns an additional output: + + batched_is_right_hand: List[List[bool] | None]] + Whether each landmark represents a right (True) or left (False) hand. + Organized like the following: + [ + # Batch 0 (for Input Image 0) + [ + True (for Selected Landmark 1) + False (Selected Landmark 2) + ... + ] + # Batch 1 (for Input Image 1) + None # (this image has no detected palm) + ... + ] + """ + return super().predict_landmarks_from_image(pixel_values_or_image, raw_output) # type: ignore + + def _draw_predictions( + self, + NHWC_int_numpy_frames: List[np.ndarray], + batched_selected_boxes: List[torch.Tensor | None], + batched_selected_keypoints: List[torch.Tensor | None], + batched_roi_4corners: List[torch.Tensor | None], + batched_selected_landmarks: List[torch.Tensor | None], + batched_is_right_hand: List[List[bool] | None], + ): + """ + Override of mediapipe::app.py::MediaPipeApp::draw_outputs + Also draws whether the detection is a right or left hand. + + Additional inputs: + batched_is_right_hand: List[List[bool] | None] + True if the detection is a right hand, false if it's a left hand. None if no hand detected. + """ + for batch_idx in range(len(NHWC_int_numpy_frames)): + image = NHWC_int_numpy_frames[batch_idx] + ld = batched_selected_landmarks[batch_idx] + box = batched_selected_boxes[batch_idx] + kp = batched_selected_keypoints[batch_idx] + roi_4corners = batched_roi_4corners[batch_idx] + irh = batched_is_right_hand[batch_idx] + + if box is not None and kp is not None and roi_4corners is not None: + self._draw_box_and_roi(image, box, kp, roi_4corners) + if ld is not None and irh is not None: + self._draw_landmarks(image, ld, irh) + + def _draw_landmarks( + self, + NHWC_int_numpy_frame: np.ndarray, + landmarks: torch.Tensor, + is_right_hand: List[bool], + ): + """ + Override of mediapipe::app.py::MediaPipeApp::draw_landmarks + Also draws whether the detection is a right or left hand. + """ + for ldm, irh in zip(landmarks, is_right_hand): + # Draw landmark points + draw_points(NHWC_int_numpy_frame, ldm[:, :2], (0, 255, 0)) + # Draw connections between landmark points + if self.landmark_connections: + draw_connections( + NHWC_int_numpy_frame, + ldm[:, :2], + self.landmark_connections, + (255 if irh else 0, 0, 0 if irh else 255), + 2, + ) + + def _run_landmark_detector( + self, + NHWC_int_numpy_frames: List[np.ndarray], + batched_roi_4corners: List[torch.Tensor | None], + ) -> Tuple[List[torch.Tensor | None], List[List[bool] | None]]: + """ + Override of mediapipe::app.py::MediaPipeApp::run_landmark_detector + Additionally returns whether the detection is a right or left hand. + """ + + # selected landmarks for the ROI (if any) + # List[torch.Tensor(shape=[Num Selected Landmarks, K, 3])], + # where K == number of landmark keypoints, 3 == (x, y, p) + # + # A list element will be None if there is no ROI. + batched_selected_landmarks: List[torch.Tensor | None] = [] + + # whether the selected landmarks for the ROI (if applicable) are for a left or right hand + # + # A list element will be None if there is no ROI. + batched_is_right_hand: List[List[bool] | None] = [] + + # For each input image... + for batch_idx, roi_4corners in enumerate(batched_roi_4corners): + if roi_4corners is None: + continue + affines = compute_box_affine_crop_resize_matrix( + roi_4corners[:, :3], self.landmark_input_dims + ) + + # Create input images by applying the affine transforms. + keypoint_net_inputs = numpy_image_to_torch( + apply_batched_affines_to_frame( + NHWC_int_numpy_frames[batch_idx], affines, self.landmark_input_dims + ) + ) + + # Compute hand landmarks. + ld_scores, lr, landmarks = self.landmark_detector( # type: ignore + keypoint_net_inputs + ) + + # Convert [0-1] ranged values of landmarks to integer pixel space. + landmarks[:, :, 0] *= self.landmark_input_dims[0] + landmarks[:, :, 1] *= self.landmark_input_dims[1] + + # 1 landmark is predicted for each ROI of each input image. + # For each region of interest & associated predicted landmarks... + all_landmarks = [] + all_lr = [] + for ld_batch_idx in range(landmarks.shape[0]): + # Exclude landmarks that don't meet the appropriate score threshold. + if ld_scores[ld_batch_idx] >= self.min_detector_box_score: + # Apply the inverse of affine transform used above to the landmark coordinates. + # This will convert the coordinates to their locations in the original input image. + inverted_affine = torch.from_numpy( + cv2.invertAffineTransform(affines[ld_batch_idx]) + ).float() + landmarks[ld_batch_idx][:, :2] = apply_affine_to_coordinates( + landmarks[ld_batch_idx][:, :2], inverted_affine + ) + + # Add the predicted landmarks to our list. + all_landmarks.append(landmarks[ld_batch_idx]) + all_lr.append(torch.round(lr[ld_batch_idx]).item() == 1) + + # Add this batch of landmarks to the output list. + batched_selected_landmarks.append( + torch.stack(all_landmarks, dim=0) if all_landmarks else None + ) + batched_is_right_hand.append(all_lr) + else: + # Add None for these lists, since this batch has no predicted bounding boxes. + batched_selected_landmarks.append(None) + batched_is_right_hand.append(None) + + return (batched_selected_landmarks, batched_is_right_hand) diff --git a/qai_hub_models/models/mediapipe_hand/demo.py b/qai_hub_models/models/mediapipe_hand/demo.py new file mode 100644 index 00000000..f0ba8fbf --- /dev/null +++ b/qai_hub_models/models/mediapipe_hand/demo.py @@ -0,0 +1,89 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse + +import numpy as np +from PIL import Image + +from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp +from qai_hub_models.models.mediapipe_hand.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + MediaPipeHand, +) +from qai_hub_models.utils.args import add_output_dir_arg +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.camera_capture import capture_and_display_processed_frames +from qai_hub_models.utils.display import display_or_save_image + +INPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "hand.jpeg" +) + + +# Run Mediapipe Hand landmark detection end-to-end on a sample image or camera stream. +# The demo will display output with the predicted landmarks & bounding boxes drawn. +def main(is_test: bool = False): + # Demo parameters + parser = argparse.ArgumentParser() + parser.add_argument( + "--image", + type=str, + required=False, + help="image file path or URL", + ) + parser.add_argument( + "--camera", + type=int, + default=0, + help="Camera Input ID", + ) + parser.add_argument( + "--score-threshold", + type=float, + default=0.95, + help="Score threshold for NonMaximumSuppression", + ) + parser.add_argument( + "--iou-threshold", + type=float, + default=0.3, + help="Intersection over Union (IoU) threshold for NonMaximumSuppression", + ) + add_output_dir_arg(parser) + + print( + "Note: This readme is running through torch, and not meant to be real-time without dedicated ML hardware." + ) + print("Use Ctrl+C in your terminal to exit.") + + args = parser.parse_args([] if is_test else None) + if is_test: + args.image = INPUT_IMAGE_ADDRESS + + # Load app + app = MediaPipeHandApp( + MediaPipeHand.from_pretrained(), args.score_threshold, args.iou_threshold + ) + print("Model and App Loaded") + + if args.image: + image = load_image(args.image) + pred_image = app.predict_landmarks_from_image(image) + out_image = Image.fromarray(pred_image[0], "RGB") + if not is_test: + display_or_save_image(out_image, args.output_dir) + else: + + def frame_processor(frame: np.ndarray) -> np.ndarray: + return app.predict_landmarks_from_image(frame)[0] # type: ignore + + capture_and_display_processed_frames( + frame_processor, "QAIHM Mediapipe Hand Demo", args.camera + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mediapipe_hand/export.py b/qai_hub_models/models/mediapipe_hand/export.py new file mode 100644 index 00000000..8d8cae99 --- /dev/null +++ b/qai_hub_models/models/mediapipe_hand/export.py @@ -0,0 +1,219 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Mapping, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.mediapipe_hand import Model +from qai_hub_models.utils.args import ( + export_parser, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + +ALL_COMPONENTS = ["MediaPipeHandDetector", "MediaPipeHandLandmarkDetector"] + + +def export_model( + device: str = "Samsung Galaxy S23", + components: Optional[List[str]] = None, + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Mapping[ + str, Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] +] | List[str]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + components: List of sub-components of the model that will be exported. + Each component is compiled and profiled separately. + Defaults to ALL_COMPONENTS if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` + + Returns: + A Mapping from component_name to a 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "mediapipe_hand" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + component_arg = components + components = components or ALL_COMPONENTS + for component in components: + if component not in ALL_COMPONENTS: + raise ValueError(f"Invalid component {component}.") + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "mediapipe_hand", + "MediaPipe-Hand-Detection", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + component_arg, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + components_dict = {} + if "MediaPipeHandDetector" in components: + components_dict["MediaPipeHandDetector"] = model.hand_detector + if "MediaPipeHandLandmarkDetector" in components: + components_dict["MediaPipeHandLandmarkDetector"] = model.hand_landmark_detector + + compile_jobs = {} + for component_name, component in components_dict.items(): + # Trace the model + input_spec = component.get_input_spec() + source_model = torch.jit.trace(component, make_torch_inputs(input_spec)) + + # 2. Compile the models to an on-device asset + model_compile_options = component.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {component_name} to run on-device.") + compile_jobs[component_name] = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=f"{component_name}", + options=model_compile_options, + ) + + # 3. Profile the model assets on real devices + profile_jobs = {} + if not skip_profiling: + for component_name in components: + print(f"Profiling model {component_name} on a hosted device.") + profile_jobs[component_name] = hub.submit_profile_job( + model=compile_jobs[component_name].get_target_model(), + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_jobs = {} + if not skip_inferencing: + for component_name in components: + print( + f"Running inference for {component_name} on a hosted device with example inputs." + ) + sample_inputs = components_dict[component_name].sample_inputs() + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_jobs[component_name] = hub.submit_inference_job( + model=compile_jobs[component_name].get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 5. Download the model assets to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + for component_name, compile_job in compile_jobs.items(): + target_model = compile_job.get_target_model() + target_model.download( + str(output_path / f"{model_name}_{component_name}.tflite") + ) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + for component_name in components: + profile_job = profile_jobs[component_name] + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + for component_name in components: + inference_job = inference_jobs[component_name] + sample_inputs = components_dict[component_name].sample_inputs() + torch_out = torch_inference(components_dict[component_name], sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + return { + component_name: ( + compile_jobs[component_name], + profile_jobs.get(component_name, None), + inference_jobs.get(component_name, None), + ) + for component_name in components + } + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, components=ALL_COMPONENTS) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mediapipe_hand/info.yaml b/qai_hub_models/models/mediapipe_hand/info.yaml new file mode 100644 index 00000000..32d75fb9 --- /dev/null +++ b/qai_hub_models/models/mediapipe_hand/info.yaml @@ -0,0 +1,37 @@ +name: MediaPipe-Hand-Detection +# id must match with the model dir name in qai_hub_models +id: mediapipe_hand +status: public +headline: Real-time hand detection optimized for mobile and edge. +domain: Computer Vision +description: The MediaPipe Hand Landmark Detector is a machine learning pipeline that + predicts bounding boxes and pose skeletons of hands in an image. +use_case: Object Detection +tags: + - real-time +research_paper: https://arxiv.org/abs/2006.10214 +research_paper_title: 'MediaPipe Hands: On-device Real-time Hand Tracking' +license: https://github.com/zmurez/MediaPipePyTorch/blob/master/LICENSE +source_repo: https://github.com/zmurez/MediaPipePyTorch/ +technical_details: + Input resolution: 256x256 + Number of parameters (MediaPipeHandDetector): 1.76M + Model size (MediaPipeHandDetector): 6.76 MB + Number of parameters (MediaPipeHandLandmarkDetector): 2.01M + Model size (MediaPipeHandLandmarkDetector): 7.71 MB +applicable_scenarios: + - Gesture Control + - Virtual Reality + - Gaming +related_models: + - mediapipe_face + - mediapipe_pose + - mediapipe_selfie +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: [] diff --git a/qai_hub_models/models/mediapipe_hand/model.py b/qai_hub_models/models/mediapipe_hand/model.py new file mode 100644 index 00000000..b41542ef --- /dev/null +++ b/qai_hub_models/models/mediapipe_hand/model.py @@ -0,0 +1,178 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, Tuple + +import numpy as np +import torch + +from qai_hub_models.models._shared.mediapipe.utils import MediaPipePyTorchAsRoot +from qai_hub_models.utils.base_model import BaseModel, CollectionModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 + +# https://github.com/metalwhale/hand_tracking/blob/b2a650d61b4ab917a2367a05b85765b81c0564f2/run.py +# 8 12 16 20 +# | | | | +# 7 11 15 19 +# 4 | | | | +# | 6 10 14 18 +# 3 | | | | +# | 5---9---13--17 +# 2 \ / +# \ \ / +# 1 \ / +# \ \ / +# ------0- +HAND_LANDMARK_CONNECTIONS = ( + [ # Landmark model will output 18 points. They map to the points above. + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (5, 6), + (6, 7), + (7, 8), + (9, 10), + (10, 11), + (11, 12), + (13, 14), + (14, 15), + (15, 16), + (17, 18), + (18, 19), + (19, 20), + (0, 5), + (5, 9), + (9, 13), + (13, 17), + (0, 17), + ] +) + +# Palm detector model parameters. +BATCH_SIZE = 1 +DETECT_SCORE_SLIPPING_THRESHOLD = 100 # Clip output scores to this maximum value. +DETECT_DXY, DETECT_DSCALE = ( + 0.5, + 2.5, +) # Modifiers applied to palm detector output bounding box to encapsulate the entire hand. +WRIST_CENTER_KEYPOINT_INDEX = 0 # The palm detector outputs several keypoints. This is the keypoint index for the wrist center. +MIDDLE_FINDER_KEYPOINT_INDEX = 2 # The palm detector outputs several keypoints. This is the keypoint index for the bottom of the middle finger. +ROTATION_VECTOR_OFFSET_RADS = ( + np.pi / 2 +) # Offset required when computing rotation of the detected palm. + + +class MediaPipeHand(CollectionModel): + def __init__( + self, + hand_detector: HandDetector, + hand_landmark_detector: HandLandmarkDetector, + ) -> None: + """ + Construct a mediapipe hand model. + + Inputs: + hand_detector: Callable[[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]] + Hand detection model. Input is an image, output is + [bounding boxes & keypoints, box & keypoint scores] + + hand_landmark_detector + Hand landmark detector model. Input is an image cropped to the hand. The hand must be upright + and un-tilted in the frame. Returns [landmark_scores, prob_is_right_hand, landmarks] + """ + super().__init__() + self.hand_detector = hand_detector + self.hand_landmark_detector = hand_landmark_detector + + @classmethod + def from_pretrained( + cls, + detector_weights: str = "blazepalm.pth", + detector_anchors: str = "anchors_palm.npy", + landmark_detector_weights: str = "blazehand_landmark.pth", + ) -> MediaPipeHand: + with MediaPipePyTorchAsRoot(): + from blazehand_landmark import BlazeHandLandmark + from blazepalm import BlazePalm + + palm_detector = BlazePalm() + palm_detector.load_weights(detector_weights) + palm_detector.load_anchors(detector_anchors) + palm_detector.min_score_thresh = 0.75 + hand_regressor = BlazeHandLandmark() + hand_regressor.load_weights(landmark_detector_weights) + + return cls( + HandDetector(palm_detector, palm_detector.anchors), + HandLandmarkDetector(hand_regressor), + ) + + +class HandDetector(BaseModel): + def __init__( + self, + detector: Callable[[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]], + anchors: torch.Tensor, + ): + super().__init__() + self.detector = detector + self.anchors = anchors + + def forward(self, image: torch.Tensor): + return self.detector(image) + + @classmethod + def from_pretrained( + cls, + detector_weights: str = "blazepalm.pth", + detector_anchors: str = "anchors_palm.npy", + ): + with MediaPipePyTorchAsRoot(): + from blazepalm import BlazePalm + + hand_detector = BlazePalm(back_model=True) + hand_detector.load_weights(detector_weights) + hand_detector.load_anchors(detector_anchors) + return cls(hand_detector, hand_detector.anchors) + + def get_input_spec(self, batch_size: int = BATCH_SIZE) -> InputSpec: + """ + Returns the input specification (name -> (shape, type) of the hand detector. + This can be used to submit profiling job on Qualcomm AI Hub. + """ + return {"image": ((batch_size, 3, 256, 256), "float32")} + + +class HandLandmarkDetector(BaseModel): + def __init__( + self, + detector: Callable[[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]], + ): + super().__init__() + self.detector = detector + + def forward(self, image: torch.Tensor): + return self.detector(image) + + @classmethod + def from_pretrained(cls, landmark_detector_weights: str = "blazehand_landmark.pth"): + with MediaPipePyTorchAsRoot(): + from blazehand_landmark import BlazeHandLandmark + + hand_regressor = BlazeHandLandmark() + hand_regressor.load_weights(landmark_detector_weights) + cls(hand_regressor) + + def get_input_spec(self, batch_size: int = BATCH_SIZE) -> InputSpec: + """ + Returns the input specification (name -> (shape, type) of the hand landmark detector. + This can be used to submit profiling job on Qualcomm AI Hub. + """ + return {"image": ((batch_size, 3, 256, 256), "float32")} diff --git a/qai_hub_models/models/mediapipe_hand/perf.yaml b/qai_hub_models/models/mediapipe_hand/perf.yaml new file mode 100644 index 00000000..f79bb0af --- /dev/null +++ b/qai_hub_models/models/mediapipe_hand/perf.yaml @@ -0,0 +1,107 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: MediaPipeHandDetector + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 762.0 + throughput: 1312.3359580052493 + estimated_peak_memory_range: + min: 12288 + max: 3281536 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 151 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 151 + job_id: jwgolne4g + job_status: Passed + torchscript_onnx_qnn: + inference_time: 820.0 + throughput: 1219.5121951219512 + estimated_peak_memory_range: + min: 806912 + max: 6264240 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 196 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 196 + job_id: j7gjr2k7p + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:08:53.710000Z' +- name: MediaPipeHandLandmarkDetector + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1017.0 + throughput: 983.284169124877 + estimated_peak_memory_range: + min: 24576 + max: 2409872 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 158 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 158 + job_id: j1pvlrz75 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 1088.0 + throughput: 919.1176470588235 + estimated_peak_memory_range: + min: 577536 + max: 53567440 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 209 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 209 + job_id: jlpe7w475 + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:12:22.243551Z' diff --git a/qai_hub_models/models/mediapipe_hand/requirements.txt b/qai_hub_models/models/mediapipe_hand/requirements.txt new file mode 100644 index 00000000..9c11ddeb --- /dev/null +++ b/qai_hub_models/models/mediapipe_hand/requirements.txt @@ -0,0 +1,2 @@ +opencv-python +requests diff --git a/qai_hub_models/models/mediapipe_hand/test.py b/qai_hub_models/models/mediapipe_hand/test.py new file mode 100644 index 00000000..f53eb193 --- /dev/null +++ b/qai_hub_models/models/mediapipe_hand/test.py @@ -0,0 +1,42 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp +from qai_hub_models.models.mediapipe_hand.demo import INPUT_IMAGE_ADDRESS +from qai_hub_models.models.mediapipe_hand.demo import main as demo_main +from qai_hub_models.models.mediapipe_hand.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + MediaPipeHand, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "hand_output.png" +) + +# Because we have not made a modification to the pytorch source network, +# no numerical tests are included for the model; only for the app. + + +@skip_clone_repo_check +def test_hand_app(): + input = load_image( + INPUT_IMAGE_ADDRESS, + ) + expected_output = load_image( + OUTPUT_IMAGE_ADDRESS, + ).convert("RGB") + app = MediaPipeHandApp(MediaPipeHand.from_pretrained()) + assert np.allclose( + app.predict_landmarks_from_image(input)[0], np.asarray(expected_output) + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/mediapipe_pose/README.md b/qai_hub_models/models/mediapipe_pose/README.md new file mode 100644 index 00000000..19eb21a0 --- /dev/null +++ b/qai_hub_models/models/mediapipe_pose/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [MediaPipe-Pose-Estimation: Detect and track human body poses in real-time images and video streams](https://aihub.qualcomm.com/models/mediapipe_pose) + +The MediaPipe Pose Landmark Detector is a machine learning pipeline that predicts bounding boxes and pose skeletons of poses in an image. + +This is based on the implementation of MediaPipe-Pose-Estimation found +[here](https://github.com/zmurez/MediaPipePyTorch/). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/mediapipe_pose). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[mediapipe_pose]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.mediapipe_pose.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.mediapipe_pose.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of MediaPipe-Pose-Estimation can be found + [here](https://github.com/zmurez/MediaPipePyTorch/blob/master/LICENSE). + + +## References +* [BlazePose: On-device Real-time Body Pose tracking](https://arxiv.org/abs/2006.10204) +* [Source Model Implementation](https://github.com/zmurez/MediaPipePyTorch/) diff --git a/qai_hub_models/models/mediapipe_pose/__init__.py b/qai_hub_models/models/mediapipe_pose/__init__.py new file mode 100644 index 00000000..394ab086 --- /dev/null +++ b/qai_hub_models/models/mediapipe_pose/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import MediaPipePoseApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import MediaPipePose as Model # noqa: F401 diff --git a/qai_hub_models/models/mediapipe_pose/app.py b/qai_hub_models/models/mediapipe_pose/app.py new file mode 100644 index 00000000..67f5b995 --- /dev/null +++ b/qai_hub_models/models/mediapipe_pose/app.py @@ -0,0 +1,121 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import List, Tuple + +import torch + +from qai_hub_models.models._shared.mediapipe.app import MediaPipeApp +from qai_hub_models.models.mediapipe_pose.model import ( + DETECT_DSCALE, + DETECT_DXY, + DETECT_SCORE_SLIPPING_THRESHOLD, + POSE_KEYPOINT_INDEX_END, + POSE_KEYPOINT_INDEX_START, + POSE_LANDMARK_CONNECTIONS, + ROTATION_VECTOR_OFFSET_RADS, + MediaPipePose, +) +from qai_hub_models.utils.bounding_box_processing import ( + compute_box_corners_with_rotation, +) +from qai_hub_models.utils.image_processing import compute_vector_rotation + + +class MediaPipePoseApp(MediaPipeApp): + """ + This class consists of light-weight "app code" that is required to perform end to end inference with MediaPipe's pose landmark detector. + + The app uses 2 models: + * MediaPipePoseDetector + * MediaPipePoseLandmark + + See the class comment for the parent class for details. + """ + + def __init__( + self, + model: MediaPipePose, + min_detector_pose_box_score: float = 0.75, + nms_iou_threshold: float = 0.3, + min_landmark_score: float = 0.5, + ): + """ + Construct a mediapipe pose application. + + Inputs: + model: MediaPipePose model + Pose detection & landmark model container. + + See parent initializer for further parameter documentation. + """ + + def _landmark_detector_ignore_third_output( + x: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + The Last landmark detector output ("mask") is not used by the demo application. + Wrap the detector in a function that discards the mask. + """ + out0, out1, _ = model.pose_landmark_detector(x) + return out0, out1 + + super().__init__( + model.pose_detector, + model.pose_detector.anchors, + _landmark_detector_ignore_third_output, + model.pose_detector.get_input_spec()["image"][0][-2:], + model.pose_landmark_detector.get_input_spec()["image"][0][-2:], + POSE_KEYPOINT_INDEX_START, + POSE_KEYPOINT_INDEX_END, + ROTATION_VECTOR_OFFSET_RADS, + DETECT_DXY, + DETECT_DSCALE, + min_detector_pose_box_score, + DETECT_SCORE_SLIPPING_THRESHOLD, + nms_iou_threshold, + min_landmark_score, + POSE_LANDMARK_CONNECTIONS, + ) + + def _compute_object_roi( + self, + batched_selected_boxes: List[torch.Tensor | None], + batched_selected_keypoints: List[torch.Tensor | None], + ) -> List[torch.Tensor | None]: + """ + See parent function for base functionality and parameter documentation. + + The MediaPipe pose pipeline computes the ROI not from the detector bounding box, + but from specific detected keypoints. This override implements that behavior. + """ + batched_selected_roi = [] + for boxes, keypoints in zip(batched_selected_boxes, batched_selected_keypoints): + if boxes is None or keypoints is None: + batched_selected_roi.append(None) + continue + + # Compute bounding box center and rotation + theta = compute_vector_rotation( + keypoints[:, self.keypoint_rotation_vec_start_idx, ...], + keypoints[:, self.keypoint_rotation_vec_end_idx, ...], + self.rotation_offset_rads, + ) + xc = keypoints[..., self.keypoint_rotation_vec_start_idx, 0] + yc = keypoints[..., self.keypoint_rotation_vec_start_idx, 1] + x1 = keypoints[..., self.keypoint_rotation_vec_end_idx, 0] + y1 = keypoints[..., self.keypoint_rotation_vec_end_idx, 1] + + # Square box always + w = ((xc - x1) ** 2 + (yc - y1) ** 2).sqrt() * 2 * self.detect_box_scale + h = w + + # Compute box corners from box center, width, height + batched_selected_roi.append( + compute_box_corners_with_rotation(xc, yc, w, h, theta) + ) + + return batched_selected_roi diff --git a/qai_hub_models/models/mediapipe_pose/demo.py b/qai_hub_models/models/mediapipe_pose/demo.py new file mode 100644 index 00000000..a59ffffd --- /dev/null +++ b/qai_hub_models/models/mediapipe_pose/demo.py @@ -0,0 +1,89 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse + +import numpy as np +from PIL import Image + +from qai_hub_models.models.mediapipe_pose.app import MediaPipePoseApp +from qai_hub_models.models.mediapipe_pose.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + MediaPipePose, +) +from qai_hub_models.utils.args import add_output_dir_arg +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.camera_capture import capture_and_display_processed_frames +from qai_hub_models.utils.display import display_or_save_image + +INPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "pose.jpeg" +) + + +# Run Mediapipe Pose landmark detection end-to-end on a sample image or camera stream. +# The demo will display output with the predicted landmarks & bounding boxes drawn. +def main(is_test: bool = False): + # Demo parameters + parser = argparse.ArgumentParser() + parser.add_argument( + "--image", + type=str, + required=False, + help="image file path or URL. Image spatial dimensions (x and y) must be multiples", + ) + add_output_dir_arg(parser) + parser.add_argument( + "--camera", + type=int, + default=0, + help="Camera Input ID", + ) + parser.add_argument( + "--score-threshold", + type=float, + default=0.75, + help="Score threshold for NonMaximumSuppression", + ) + parser.add_argument( + "--iou-threshold", + type=float, + default=0.3, + help="Intersection over Union (IoU) threshold for NonMaximumSuppression", + ) + + args = parser.parse_args([] if is_test else None) + if is_test: + args.image = INPUT_IMAGE_ADDRESS + + print( + "Note: This readme is running through torch, and not meant to be real-time without dedicated ML hardware." + ) + print("Use Ctrl+C in your terminal to exit.") + + # Load app + app = MediaPipePoseApp( + MediaPipePose.from_pretrained(), args.score_threshold, args.iou_threshold + ) + print("Model and App Loaded") + + if args.image: + image = load_image(args.image).convert("RGB") + pred_image = app.predict_landmarks_from_image(image) + out = Image.fromarray(pred_image[0], "RGB") + if not is_test: + display_or_save_image(out, args.output_dir) + else: + + def frame_processor(frame: np.ndarray) -> np.ndarray: + return app.predict_landmarks_from_image(frame)[0] # type: ignore + + capture_and_display_processed_frames( + frame_processor, "QAIHM Mediapipe Pose Demo", args.camera + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mediapipe_pose/export.py b/qai_hub_models/models/mediapipe_pose/export.py new file mode 100644 index 00000000..4a363c31 --- /dev/null +++ b/qai_hub_models/models/mediapipe_pose/export.py @@ -0,0 +1,219 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Mapping, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.mediapipe_pose import Model +from qai_hub_models.utils.args import ( + export_parser, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + +ALL_COMPONENTS = ["MediaPipePoseDetector", "MediaPipePoseLandmarkDetector"] + + +def export_model( + device: str = "Samsung Galaxy S23", + components: Optional[List[str]] = None, + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Mapping[ + str, Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] +] | List[str]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + components: List of sub-components of the model that will be exported. + Each component is compiled and profiled separately. + Defaults to ALL_COMPONENTS if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` + + Returns: + A Mapping from component_name to a 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "mediapipe_pose" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + component_arg = components + components = components or ALL_COMPONENTS + for component in components: + if component not in ALL_COMPONENTS: + raise ValueError(f"Invalid component {component}.") + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "mediapipe_pose", + "MediaPipe-Pose-Estimation", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + component_arg, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + components_dict = {} + if "MediaPipePoseDetector" in components: + components_dict["MediaPipePoseDetector"] = model.pose_detector + if "MediaPipePoseLandmarkDetector" in components: + components_dict["MediaPipePoseLandmarkDetector"] = model.pose_landmark_detector + + compile_jobs = {} + for component_name, component in components_dict.items(): + # Trace the model + input_spec = component.get_input_spec() + source_model = torch.jit.trace(component, make_torch_inputs(input_spec)) + + # 2. Compile the models to an on-device asset + model_compile_options = component.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {component_name} to run on-device.") + compile_jobs[component_name] = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=f"{component_name}", + options=model_compile_options, + ) + + # 3. Profile the model assets on real devices + profile_jobs = {} + if not skip_profiling: + for component_name in components: + print(f"Profiling model {component_name} on a hosted device.") + profile_jobs[component_name] = hub.submit_profile_job( + model=compile_jobs[component_name].get_target_model(), + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_jobs = {} + if not skip_inferencing: + for component_name in components: + print( + f"Running inference for {component_name} on a hosted device with example inputs." + ) + sample_inputs = components_dict[component_name].sample_inputs() + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_jobs[component_name] = hub.submit_inference_job( + model=compile_jobs[component_name].get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 5. Download the model assets to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + for component_name, compile_job in compile_jobs.items(): + target_model = compile_job.get_target_model() + target_model.download( + str(output_path / f"{model_name}_{component_name}.tflite") + ) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + for component_name in components: + profile_job = profile_jobs[component_name] + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + for component_name in components: + inference_job = inference_jobs[component_name] + sample_inputs = components_dict[component_name].sample_inputs() + torch_out = torch_inference(components_dict[component_name], sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + return { + component_name: ( + compile_jobs[component_name], + profile_jobs.get(component_name, None), + inference_jobs.get(component_name, None), + ) + for component_name in components + } + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, components=ALL_COMPONENTS) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mediapipe_pose/info.yaml b/qai_hub_models/models/mediapipe_pose/info.yaml new file mode 100644 index 00000000..23e2045b --- /dev/null +++ b/qai_hub_models/models/mediapipe_pose/info.yaml @@ -0,0 +1,37 @@ +name: MediaPipe-Pose-Estimation +# id must match with the model dir name in qai_hub_models +id: mediapipe_pose +status: public +headline: Detect and track human body poses in real-time images and video streams. +domain: Computer Vision +description: The MediaPipe Pose Landmark Detector is a machine learning pipeline that + predicts bounding boxes and pose skeletons of poses in an image. +use_case: Pose Estimation +tags: + - real-time +research_paper: https://arxiv.org/abs/2006.10204 +research_paper_title: 'BlazePose: On-device Real-time Body Pose tracking' +license: https://github.com/zmurez/MediaPipePyTorch/blob/master/LICENSE +source_repo: https://github.com/zmurez/MediaPipePyTorch/ +technical_details: + Input resolution: 256x256 + Number of parameters (MediaPipePoseDetector): 815K + Model size (MediaPipePoseDetector): 3.14 MB + Number of parameters (MediaPipePoseLandmarkDetector): 3.37M + Model size (MediaPipePoseLandmarkDetector): 12.9 MB +applicable_scenarios: + - Accessibility + - Augmented Reality + - ARVR +related_models: + - mediapipe_hand + - mediapipe_face + - mediapipe_selfie +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: [] diff --git a/qai_hub_models/models/mediapipe_pose/model.py b/qai_hub_models/models/mediapipe_pose/model.py new file mode 100644 index 00000000..ad32a893 --- /dev/null +++ b/qai_hub_models/models/mediapipe_pose/model.py @@ -0,0 +1,174 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, Tuple + +import torch + +from qai_hub_models.models._shared.mediapipe.utils import MediaPipePyTorchAsRoot +from qai_hub_models.utils.base_model import BaseModel, CollectionModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 + +POSE_LANDMARK_CONNECTIONS = [ + (0, 1), + (1, 2), + (2, 3), + (3, 7), + (0, 4), + (4, 5), + (5, 6), + (6, 8), + (9, 10), + (11, 13), + (13, 15), + (15, 17), + (17, 19), + (19, 15), + (15, 21), + (12, 14), + (14, 16), + (16, 18), + (18, 20), + (20, 16), + (16, 22), + (11, 12), + (12, 24), + (24, 23), + (23, 11), +] + + +# pose detector model parameters. +BATCH_SIZE = 1 +DETECT_SCORE_SLIPPING_THRESHOLD = 100 # Clip output scores to this maximum value. +DETECT_DXY, DETECT_DSCALE = ( + 0, + 1.5, +) # Modifiers applied to pose detector output bounding box to encapsulate the entire pose. +POSE_KEYPOINT_INDEX_START = 2 # The pose detector outputs several keypoints. This is the keypoint index for the bottom. +POSE_KEYPOINT_INDEX_END = 3 # The pose detector outputs several keypoints. This is the keypoint index for the top. +ROTATION_VECTOR_OFFSET_RADS = ( + torch.pi / 2 +) # Offset required when computing rotation of the detected pose. + + +class MediaPipePose(CollectionModel): + def __init__( + self, + pose_detector: PoseDetector, + pose_landmark_detector: PoseLandmarkDetector, + ) -> None: + """ + Construct a mediapipe pose model. + + Inputs: + pose_detector: Callable[[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]] + Pose detection model. Input is an image, output is + [bounding boxes & keypoints, box & kp scores] + + pose_landmark_detector + Pose landmark detector model. Input is an image cropped to the posing object. The pose must be upright + and un-tilted in the frame. Returns [landmark_scores, landmarks, mask] + + Note that although the landmark detector returns 3 values, + the third output (mask) is unused by this application. + + """ + super().__init__() + self.pose_detector = pose_detector + self.pose_landmark_detector = pose_landmark_detector + + @classmethod + def from_pretrained( + cls, + detector_weights: str = "blazepose.pth", + detector_anchors: str = "anchors_pose.npy", + landmark_detector_weights: str = "blazepose_landmark.pth", + ) -> MediaPipePose: + """ + Load mediapipe models from the source repository. + Returns tuple[.blazepose.BlazePose, BlazePose Anchors, .blazepose_landmark.BlazePoseLandmark] + """ + with MediaPipePyTorchAsRoot(): + from blazepose import BlazePose + from blazepose_landmark import BlazePoseLandmark + + pose_detector = BlazePose() + pose_detector.load_weights(detector_weights) + pose_detector.load_anchors(detector_anchors) + pose_regressor = BlazePoseLandmark() + pose_regressor.load_weights(landmark_detector_weights) + + return cls( + PoseDetector(pose_detector, pose_detector.anchors), + PoseLandmarkDetector(pose_regressor), + ) + + +class PoseDetector(BaseModel): + def __init__( + self, + detector: Callable[[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]], + anchors: torch.Tensor, + ): + super().__init__() + self.detector = detector + self.anchors = anchors + + def forward(self, image: torch.Tensor): + return self.detector(image) + + @classmethod + def from_pretrained( + cls, + detector_weights: str = "blazepose.pth", + detector_anchors: str = "anchors_pose.npy", + ): + with MediaPipePyTorchAsRoot(): + from blazepose import BlazePose + + pose_detector = BlazePose(back_model=True) + pose_detector.load_weights(detector_weights) + pose_detector.load_anchors(detector_anchors) + return cls(pose_detector, pose_detector.anchors) + + def get_input_spec(self, batch_size: int = BATCH_SIZE) -> InputSpec: + """ + Returns the input specification (name -> (shape, type) of the pose detector. + This can be used to submit profiling job on Qualcomm AI Hub. + """ + return {"image": ((batch_size, 3, 128, 128), "float32")} + + +class PoseLandmarkDetector(BaseModel): + def __init__( + self, + detector: Callable[[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]], + ): + super().__init__() + self.detector = detector + + def forward(self, image: torch.Tensor): + return self.detector(image) + + @classmethod + def from_pretrained(cls, landmark_detector_weights: str = "blazepose_landmark.pth"): + with MediaPipePyTorchAsRoot(): + from blazepose_landmark import BlazePoseLandmark + + pose_regressor = BlazePoseLandmark() + pose_regressor.load_weights(landmark_detector_weights) + cls(pose_regressor) + + def get_input_spec(self, batch_size: int = BATCH_SIZE) -> InputSpec: + """ + Returns the input specification (name -> (shape, type) of the pose landmark detector. + This can be used to submit profiling job on Qualcomm AI Hub. + """ + return {"image": ((batch_size, 3, 256, 256), "float32")} diff --git a/qai_hub_models/models/mediapipe_pose/perf.yaml b/qai_hub_models/models/mediapipe_pose/perf.yaml new file mode 100644 index 00000000..7b7ebe7c --- /dev/null +++ b/qai_hub_models/models/mediapipe_pose/perf.yaml @@ -0,0 +1,107 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: MediaPipePoseDetector + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 807.0 + throughput: 1239.1573729863692 + estimated_peak_memory_range: + min: 28672 + max: 1641432 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 106 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 106 + job_id: j1p3z1wz5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 865.0 + throughput: 1156.0693641618498 + estimated_peak_memory_range: + min: 212992 + max: 66280848 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 139 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 139 + job_id: j1pvlr9m5 + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:22:09.229999Z' +- name: MediaPipePoseLandmarkDetector + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1023.0 + throughput: 977.5171065493646 + estimated_peak_memory_range: + min: 12288 + max: 3253904 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 229 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 229 + job_id: jwgoln4dg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 1101.0 + throughput: 908.2652134423251 + estimated_peak_memory_range: + min: 20480 + max: 149395360 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 305 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 305 + job_id: j7gjr2w8p + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:29:24.657545Z' diff --git a/qai_hub_models/models/mediapipe_pose/requirements.txt b/qai_hub_models/models/mediapipe_pose/requirements.txt new file mode 100644 index 00000000..9c11ddeb --- /dev/null +++ b/qai_hub_models/models/mediapipe_pose/requirements.txt @@ -0,0 +1,2 @@ +opencv-python +requests diff --git a/qai_hub_models/models/mediapipe_pose/test.py b/qai_hub_models/models/mediapipe_pose/test.py new file mode 100644 index 00000000..13a48320 --- /dev/null +++ b/qai_hub_models/models/mediapipe_pose/test.py @@ -0,0 +1,43 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models.mediapipe_pose.app import MediaPipePoseApp +from qai_hub_models.models.mediapipe_pose.demo import INPUT_IMAGE_ADDRESS +from qai_hub_models.models.mediapipe_pose.demo import main as demo_main +from qai_hub_models.models.mediapipe_pose.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + MediaPipePose, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "pose_output.png" +) + + +# Because we have not made a modification to the pytorch source network, +# no numerical tests are included for the model; only for the app. + + +@skip_clone_repo_check +def test_pose_app(): + input = load_image( + INPUT_IMAGE_ADDRESS, + ) + expected_output = load_image( + OUTPUT_IMAGE_ADDRESS, + ).convert("RGB") + app = MediaPipePoseApp(MediaPipePose.from_pretrained()) + assert np.allclose( + app.predict_landmarks_from_image(input)[0], np.asarray(expected_output) + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/mediapipe_selfie/README.md b/qai_hub_models/models/mediapipe_selfie/README.md new file mode 100644 index 00000000..70de5777 --- /dev/null +++ b/qai_hub_models/models/mediapipe_selfie/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [MediaPipe-Selfie-Segmentation: Segments the person from background in a selfie image and realtime background segmentation in video conferencing](https://aihub.qualcomm.com/models/mediapipe_selfie) + +Light-weight model that segments a person from the background in square or landscape selfie and video conference imagery. + +This is based on the implementation of MediaPipe-Selfie-Segmentation found +[here](https://github.com/google/mediapipe/tree/master/mediapipe/modules/selfie_segmentation). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/mediapipe_selfie). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[mediapipe_selfie]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.mediapipe_selfie.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.mediapipe_selfie.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of MediaPipe-Selfie-Segmentation can be found + [here](https://github.com/google/mediapipe/blob/master/LICENSE). + + +## References +* [Image segmentation guide](https://developers.google.com/mediapipe/solutions/vision/image_segmenter/) +* [Source Model Implementation](https://github.com/google/mediapipe/tree/master/mediapipe/modules/selfie_segmentation) diff --git a/qai_hub_models/models/mediapipe_selfie/__init__.py b/qai_hub_models/models/mediapipe_selfie/__init__.py new file mode 100644 index 00000000..566c8afc --- /dev/null +++ b/qai_hub_models/models/mediapipe_selfie/__init__.py @@ -0,0 +1,6 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .model import MODEL_ID # noqa: F401 +from .model import SelfieSegmentation as Model # noqa: F401 diff --git a/qai_hub_models/models/mediapipe_selfie/app.py b/qai_hub_models/models/mediapipe_selfie/app.py new file mode 100644 index 00000000..7ecd8723 --- /dev/null +++ b/qai_hub_models/models/mediapipe_selfie/app.py @@ -0,0 +1,44 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from typing import Callable + +import numpy as np +import torch +from PIL.Image import Image + +from qai_hub_models.utils.image_processing import preprocess_PIL_image + +RESIZE_SHAPE = (256, 256) + + +class SelfieSegmentationApp: + """ + This class consists of light-weight "app code" that is required to + perform end to end inference with UNet. + + For a given image input, the app will: + * Pre-process the image (resize and normalize) + * Run Selfie Segmentation model inference + * Convert the raw output into segmented image. + """ + + def __init__(self, model: Callable[[torch.Tensor], torch.Tensor]): + self.model = model + + def predict(self, image: Image) -> np.ndarray: + """ + From the provided image or tensor, generate the segmented mask. + + Parameters: + image: A PIL Image in RGB format. + + Returns: + mask: Segmented mask as np.array. + """ + image_tensor = preprocess_PIL_image(image.resize(RESIZE_SHAPE)) + output = self.model(image_tensor) + output = np.clip(np.reshape(output[0].detach().numpy(), (256, 256)), 0, 1) + + return output diff --git a/qai_hub_models/models/mediapipe_selfie/demo.py b/qai_hub_models/models/mediapipe_selfie/demo.py new file mode 100644 index 00000000..38c43043 --- /dev/null +++ b/qai_hub_models/models/mediapipe_selfie/demo.py @@ -0,0 +1,79 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Type + +from PIL.Image import fromarray + +from qai_hub_models.models.mediapipe_selfie.app import SelfieSegmentationApp +from qai_hub_models.models.mediapipe_selfie.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + SelfieSegmentation, +) +from qai_hub_models.utils.args import ( + add_output_dir_arg, + get_model_cli_parser, + model_from_cli_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.display import display_or_save_image + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "selfie.jpg" +) + + +# Run selfie segmentation app end-to-end on a sample image. +# The demo will display the predicted mask in a window. +def mediapipe_selfie_demo( + model_cls: Type[BaseModel], + default_image: str | CachedWebModelAsset, + is_test: bool = False, +): + # Demo parameters + parser = get_model_cli_parser(model_cls) + parser.add_argument( + "--image", + type=str, + default=default_image, + help="File path or URL to an input image to use for the demo.", + ) + add_output_dir_arg(parser) + args = parser.parse_args([] if is_test else None) + + # Load image & model + model = model_from_cli_args(model_cls, args) + print("Model loaded from pre-trained weights.") + image = load_image(args.image, verbose=True, desc="sample input image") + + # Run app + app = SelfieSegmentationApp(model) + mask = app.predict(image) * 255.0 + mask = fromarray(mask).convert("L") + if not is_test: + # Make sure the input image and mask are resized so the demo can visually + # show the images in the same resolution. + image = image.resize(mask.size) + display_or_save_image( + image, args.output_dir, "mediapipe_selfie_image.png", "sample input image" + ) + display_or_save_image( + mask, args.output_dir, "mediapipe_selfie_mask.png", "predicted mask" + ) + + +def main(is_test: bool = False): + mediapipe_selfie_demo( + SelfieSegmentation, + IMAGE_ADDRESS, + is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mediapipe_selfie/export.py b/qai_hub_models/models/mediapipe_selfie/export.py new file mode 100644 index 00000000..a4bdd82f --- /dev/null +++ b/qai_hub_models/models/mediapipe_selfie/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.mediapipe_selfie import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "mediapipe_selfie" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "mediapipe_selfie", + "MediaPipe-Selfie-Segmentation", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mediapipe_selfie/info.yaml b/qai_hub_models/models/mediapipe_selfie/info.yaml new file mode 100644 index 00000000..3c85bfbc --- /dev/null +++ b/qai_hub_models/models/mediapipe_selfie/info.yaml @@ -0,0 +1,39 @@ +name: MediaPipe-Selfie-Segmentation +# id must match with the model dir name in qai_hub_models +id: mediapipe_selfie +status: public +headline: Segments the person from background in a selfie image and realtime background + segmentation in video conferencing. +domain: Computer Vision +description: Light-weight model that segments a person from the background in square + or landscape selfie and video conference imagery. +use_case: Semantic Segmentation +tags: [] +research_paper: https://developers.google.com/mediapipe/solutions/vision/image_segmenter/ +research_paper_title: Image segmentation guide +license: https://github.com/google/mediapipe/blob/master/LICENSE +source_repo: + https://github.com/google/mediapipe/tree/master/mediapipe/modules/selfie_segmentation +technical_details: + Model checkpoint: Square + Input resolution (Square): 256x256 + Input resolution (Landscape): 144x256 + Number of parameters: 106K + Model size: 454 KB +applicable_scenarios: + - Camera + - Instant Photo Studio + - Video Conferencing + - Personalized Marketing Content + - Interactive Gaming Avatar + - Real-time Portrait Editor + - Dynamic Wallpaper Generator +related_models: + - sam +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: [] diff --git a/qai_hub_models/models/mediapipe_selfie/model.py b/qai_hub_models/models/mediapipe_selfie/model.py new file mode 100644 index 00000000..b9c65235 --- /dev/null +++ b/qai_hub_models/models/mediapipe_selfie/model.py @@ -0,0 +1,334 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from tflite import Model +from torch import nn + +from qai_hub_models.models.mediapipe_selfie.utils import ( + build_state_dict, + get_convert, + get_probable_names, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +MEDIAPIPE_SELFIE_CKPT_MAP = dict( + square=CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "weights/selfie_segmentation.tflite" + ), + landscape=CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "weights/selfie_segmentation_landscape.tflite" + ), +) +DEFAULT_IMAGE_TYPE = "square" + + +class DepthwiseConv2d(nn.Module): + def __init__(self, in_channels, kernel_size=3, stride=2, padding=1): + super(DepthwiseConv2d, self).__init__() + self.depthwise = nn.Conv2d( + in_channels, + in_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=in_channels, + ) + + def forward(self, x): + x = self.depthwise(x) + return x + + +class SelfieSegmentation(BaseModel): + """Reconstruct the selfie segmentation graph for square as well as landscape image.""" + + def __init__(self, image_type: str = "square"): + """ + Parameters: + image_type: str (choices: square or landscape) + Instance of two model variations can be created: + * One for square images (H=W) + * One for rectangle images (landscape format) + + Returns: + graph: Based on the image type, torch.nn.Module is returned. + The only difference in architectures is that global average pool + is only present in the model trained for landscape images. + + """ + if image_type not in ["square", "landscape"]: + raise ValueError(f"Unsupported image type {image_type}") + + super(SelfieSegmentation, self).__init__() + self.image_type = image_type + self.allow_avg = image_type != "landscape" + self.relu = nn.ReLU(inplace=True) + self.hardswish = nn.Hardswish() + self.sigmoid = nn.Sigmoid() + + self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1) + self.conv2 = nn.Conv2d(16, 16, 1) + self.depthwise1 = DepthwiseConv2d(16, 3, 2, 1) + if self.allow_avg: + self.avgpool1 = nn.AvgPool2d(kernel_size=64, stride=64, padding=0) + self.conv3 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0) + self.conv4 = nn.Conv2d(8, 16, kernel_size=1, stride=1, padding=0) + + self.conv5 = nn.Conv2d(16, 16, 1) + self.conv6 = nn.Conv2d(16, 72, 1) + self.depthwise2 = DepthwiseConv2d(72, 3, 2, 1) + + self.conv7 = nn.Conv2d(72, 24, 1) + self.conv8 = nn.Conv2d(24, 88, 1) + self.depthwise3 = DepthwiseConv2d(88, 3, 1, 1) + self.conv9 = nn.Conv2d(88, 24, 1) + + self.conv10 = nn.Conv2d(24, 96, kernel_size=1, stride=1, padding=0) + self.depthwise4 = DepthwiseConv2d(96, 5, 2, 2) + + if self.allow_avg: + self.avgpool2 = nn.AvgPool2d(kernel_size=16, stride=16, padding=0) + self.conv11 = nn.Conv2d(96, 24, kernel_size=1, stride=1, padding=0) + self.conv12 = nn.Conv2d(24, 96, kernel_size=1, stride=1, padding=0) + self.conv13 = nn.Conv2d(96, 32, 1) + + self.conv14 = nn.Conv2d(32, 128, kernel_size=1, stride=1, padding=0) + self.depthwise5 = DepthwiseConv2d(128, 5, 1, 2) + if self.allow_avg: + self.avgpool3 = nn.AvgPool2d(kernel_size=16, stride=16, padding=0) + self.conv15 = nn.Conv2d(128, 32, kernel_size=1, stride=1, padding=0) + self.conv16 = nn.Conv2d(32, 128, kernel_size=1, stride=1, padding=0) + + self.conv17 = nn.Conv2d(128, 32, 1) + + self.conv18 = nn.Conv2d(32, 128, kernel_size=1, stride=1, padding=0) + self.depthwise6 = DepthwiseConv2d(128, 5, 1, 2) + self.avgpool4 = nn.AvgPool2d(kernel_size=16, stride=16, padding=0) + self.conv19 = nn.Conv2d(128, 32, kernel_size=1, stride=1, padding=0) + self.conv20 = nn.Conv2d(32, 128, kernel_size=1, stride=1, padding=0) + + self.conv21 = nn.Conv2d(128, 32, 1) + + self.conv22 = nn.Conv2d(32, 96, kernel_size=1, stride=1, padding=0) + self.depthwise7 = DepthwiseConv2d(96, 5, 1, 2) + if self.allow_avg: + self.avgpool5 = nn.AvgPool2d(kernel_size=16, stride=16, padding=0) + self.conv23 = nn.Conv2d(96, 24, kernel_size=1, stride=1, padding=0) + self.conv24 = nn.Conv2d(24, 96, kernel_size=1, stride=1, padding=0) + + self.conv25 = nn.Conv2d(96, 32, 1) + + self.conv26 = nn.Conv2d(32, 96, kernel_size=1, stride=1, padding=0) + self.depthwise8 = DepthwiseConv2d(96, 5, 1, 2) + self.avgpool6 = nn.AvgPool2d(kernel_size=16, stride=16, padding=0) + self.conv27 = nn.Conv2d(96, 24, kernel_size=1, stride=1, padding=0) + self.conv28 = nn.Conv2d(24, 96, kernel_size=1, stride=1, padding=0) + + self.conv29 = nn.Conv2d(96, 32, 1) + + self.conv30 = nn.Conv2d(32, 128, 1) + if self.allow_avg: + self.avgpool6 = nn.AvgPool2d(kernel_size=16, stride=16, padding=0) + self.conv31 = nn.Conv2d(32, 128, kernel_size=1, stride=1, padding=0) + self.conv32 = nn.Conv2d(128, 24, 1) + if self.allow_avg: + self.avgpool7 = nn.AvgPool2d(kernel_size=32, stride=32, padding=0) + self.conv33 = nn.Conv2d(24, 24, 1) + self.conv34 = nn.Conv2d(24, 24, 1) + self.conv35 = nn.Conv2d(24, 24, 1) + self.depthwise9 = DepthwiseConv2d(24, 3, 1, 1) + + self.conv36 = nn.Conv2d(24, 16, 1) + self.avgpool8 = nn.AvgPool2d(kernel_size=64, stride=64, padding=0) + self.conv37 = nn.Conv2d(16, 16, 1) + self.conv38 = nn.Conv2d(16, 16, 1) + self.conv39 = nn.Conv2d(16, 16, 1) + self.depthwise10 = DepthwiseConv2d(16, 3, 1, 1) + + self.conv40 = nn.Conv2d(16, 16, 1) + if self.allow_avg: + self.avgpool9 = nn.AvgPool2d(kernel_size=128, stride=128, padding=0) + self.conv41 = nn.Conv2d(16, 16, 1) + self.conv42 = nn.Conv2d(16, 16, 1) + self.conv43 = nn.Conv2d(16, 16, 1) + self.depthwise11 = DepthwiseConv2d(16, 3, 1, 1) + self.transpose_conv = nn.ConvTranspose2d(16, 1, 2, 2, 0) + + self.upsample = nn.Upsample(scale_factor=2, mode="bilinear") + + @classmethod + def from_pretrained(cls, image_type: str = DEFAULT_IMAGE_TYPE): + """ + Load the TFLite weights and convert them to PyTorch checkpoint. + Weights for square input are different from landscape input. + Hence, based on image_type different weights are loaded and + different model instance is returned. + + Parameters: + image_type: str (choices: square or landscape) + Instance of two model variations can be created: + * One for square images (H=W) + * One for rectangle images (landscape format) + Returns: + Torch model with pretrained weights loaded. + """ + front_net = cls(image_type) + destination_path = MEDIAPIPE_SELFIE_CKPT_MAP[image_type].fetch() + front_data = open(destination_path, "rb").read() + front_model = Model.GetRootAsModel(front_data, 0) + front_subgraph = front_model.Subgraphs(0) + front_tensor_dict = { + (front_subgraph.Tensors(i).Name().decode("utf8")): i + for i in range(front_subgraph.TensorsLength()) + } + + front_probable_names = get_probable_names(front_subgraph) + front_convert = get_convert(front_net, front_probable_names) + front_state_dict = build_state_dict( + front_model, front_subgraph, front_tensor_dict, front_net, front_convert + ) + front_net.load_state_dict(front_state_dict, strict=True) + return front_net + + def get_input_spec(self, batch_size: int = 1) -> InputSpec: + if self.image_type == "square": + height, width = 256, 256 + else: + height, width = 144, 256 + return {"image": ((batch_size, 3, height, width), "float32")} + + def forward(self, image): + """ + Parameters: + image: Input image to be segmented. + Square: Shape [1, 3, 256, 256] + Landscape: Shape [1, 3, 144, 256] + Channel layout: RGB + + Returns: + output (mask): Mask with person and the background segmented. + Square: Shape [1, 256, 256] + Landscape: Shape [1, 144, 256] + + """ + x = self.hardswish(self.conv1(image)) + x1 = x + x = self.relu(self.conv2(x)) + x = self.relu(self.depthwise1(x)) + x1_1 = x + if self.allow_avg: + x = self.avgpool1(x) + x = self.relu(self.conv3(x)) + x = self.sigmoid(self.conv4(x)) * x1_1 + x = self.conv5(x) + x3 = x + x = self.relu(self.conv6(x)) + x = self.relu(self.depthwise2(x)) + x = self.conv7(x) + x4 = x + x = self.relu(self.conv8(x)) + x = self.relu(self.depthwise3(x)) + x = self.conv9(x) + x = x + x4 + x2 = x + x = self.hardswish(self.conv10(x)) + x = self.hardswish(self.depthwise4(x)) + x2_2 = x + if self.allow_avg: + x = self.avgpool2(x) + x = self.relu(self.conv11(x)) + x = self.sigmoid(self.conv12(x)) * x2_2 + x = self.conv13(x) + + x5 = x + x = self.hardswish(self.conv14(x)) + x = self.hardswish(self.depthwise5(x)) + x3_3 = x + if self.allow_avg: + x = self.avgpool3(x) + x = self.relu(self.conv15(x)) + x = self.sigmoid(self.conv16(x)) * x3_3 + x = self.conv17(x) + x = x + x5 + + x5 = x + x = self.hardswish(self.conv18(x)) + x = self.hardswish(self.depthwise6(x)) + x4_4 = x + if self.allow_avg: + x = self.avgpool4(x) + x = self.relu(self.conv19(x)) + x = self.sigmoid(self.conv20(x)) * x4_4 + x = self.conv21(x) + x = x + x5 + + x5 = x + x = self.hardswish(self.conv22(x)) + x = self.hardswish(self.depthwise7(x)) + x5_5 = x + if self.allow_avg: + x = self.avgpool5(x) + x = self.relu(self.conv23(x)) + x = self.sigmoid(self.conv24(x)) * x5_5 + x = self.conv25(x) + x = x + x5 + + x5 = x + x = self.hardswish(self.conv26(x)) + x = self.hardswish(self.depthwise8(x)) + x6_6 = x + if self.allow_avg: + x = self.avgpool6(x) + x = self.relu(self.conv27(x)) + x = self.sigmoid(self.conv28(x)) * x6_6 + x = self.conv29(x) + x = x + x5 + + x7_7 = x + + x = self.relu(self.conv30(x)) + if self.allow_avg: + x7_7 = self.avgpool6(x7_7) + + x = x * self.sigmoid(self.conv31(x7_7)) + + x = self.upsample(x) + x6 = self.conv32(x) + x = x2 + x6 + if self.allow_avg: + x = self.avgpool7(x) + x = x6 + x2 * self.sigmoid(self.conv34(self.relu(self.conv33(x)))) + x7 = self.relu(self.conv35(x)) + x = x7 + self.relu(self.depthwise9(x7)) + + x = self.upsample(x) + x = self.conv36(x) + x8 = x + x = x3 + x8 + if self.allow_avg: + x = self.avgpool8(x) + x = x8 + x3 * self.sigmoid(self.conv38(self.relu(self.conv37(x)))) + x = self.relu(self.conv39(x)) + x9 = x + x = x9 + self.relu(self.depthwise10(x9)) + + x = self.upsample(x) + x = self.conv40(x) + x10 = x + x = x10 + x1 + if self.allow_avg: + x = self.avgpool9(x) + x = x10 + x1 * self.sigmoid(self.conv42(self.relu(self.conv41(x)))) + x11 = self.relu(self.conv43(x)) + x = x11 + self.relu(self.depthwise11(x11)) + + x = self.sigmoid(self.transpose_conv(x)) + + return x diff --git a/qai_hub_models/models/mediapipe_selfie/perf.yaml b/qai_hub_models/models/mediapipe_selfie/perf.yaml new file mode 100644 index 00000000..f2615794 --- /dev/null +++ b/qai_hub_models/models/mediapipe_selfie/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: MediaPipe-Selfie-Segmentation + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 817.0 + throughput: 1223.9902080783354 + estimated_peak_memory_range: + min: 12288 + max: 1802840 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 118 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 118 + job_id: jygzljvz5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 801.0 + throughput: 1248.4394506866417 + estimated_peak_memory_range: + min: 811008 + max: 91168416 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 139 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 139 + job_id: jz5wl3mzp + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:39:49.005922Z' diff --git a/qai_hub_models/models/mediapipe_selfie/requirements.txt b/qai_hub_models/models/mediapipe_selfie/requirements.txt new file mode 100644 index 00000000..a97d948f --- /dev/null +++ b/qai_hub_models/models/mediapipe_selfie/requirements.txt @@ -0,0 +1 @@ +tflite==2.10.0 diff --git a/qai_hub_models/models/mediapipe_selfie/test.py b/qai_hub_models/models/mediapipe_selfie/test.py new file mode 100644 index 00000000..2f62a966 --- /dev/null +++ b/qai_hub_models/models/mediapipe_selfie/test.py @@ -0,0 +1,42 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models.mediapipe_selfie.app import SelfieSegmentationApp +from qai_hub_models.models.mediapipe_selfie.demo import IMAGE_ADDRESS +from qai_hub_models.models.mediapipe_selfie.demo import main as demo_main +from qai_hub_models.models.mediapipe_selfie.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + SelfieSegmentation, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "selfie_output.jpg" +) + + +def test_output(): + input_img = load_image( + IMAGE_ADDRESS, + ) + model = SelfieSegmentation.from_pretrained() + output = SelfieSegmentationApp(model).predict(input_img) + expected_output = load_image( + OUTPUT_IMAGE_ADDRESS, + ).convert("L") + + expected_output = np.array(expected_output) + np.testing.assert_allclose( + np.round(np.asarray(expected_output, dtype=np.float32) / 255, 2), + np.round(np.asarray(output, dtype=np.float32), 2), + rtol=0.1, + atol=0.1, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/mediapipe_selfie/utils.py b/qai_hub_models/models/mediapipe_selfie/utils.py new file mode 100644 index 00000000..4b542f5b --- /dev/null +++ b/qai_hub_models/models/mediapipe_selfie/utils.py @@ -0,0 +1,77 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# Source: https://github.com/hollance/BlazeFace-PyTorch/blob/master/Convert.ipynb +from collections import OrderedDict + +import numpy as np +import torch + + +def get_shape(tensor): + """Get shape for a TFLIte tensor.""" + return [tensor.Shape(i) for i in range(tensor.ShapeLength())] + + +def get_parameters(graph): + """Get parameters for a TFLite graph.""" + parameters = {} + for i in range(graph.TensorsLength()): + tensor = graph.Tensors(i) + if tensor.Buffer() > 0: + name = tensor.Name().decode("utf8") + parameters[name] = tensor.Buffer() + return parameters + + +def get_weights(model, graph, tensor_dict, tensor_name): + """Get weights using tensor name.""" + i = tensor_dict[tensor_name] + tensor = graph.Tensors(i) + buffer = tensor.Buffer() + shape = get_shape(tensor) + assert tensor.Type() == 1 + W = model.Buffers(buffer).DataAsNumpy() + W = W.view(dtype=np.float16) + W = W.reshape(shape) + return W + + +def get_probable_names(graph): + """Get the probable names for nodes in a graph.""" + probable_names = [] + for i in range(0, graph.TensorsLength()): + tensor = graph.Tensors(i) + if tensor.Buffer() > 0 and (tensor.Type() == 0 or tensor.Type() == 1): + probable_names.append(tensor.Name().decode("utf-8")) + return probable_names + + +def get_convert(net, probable_names): + """Convert state dict using probable node names.""" + convert = {} + i = 0 + for name, params in net.state_dict().items(): + convert[name] = probable_names[i] + i += 1 + return convert + + +def build_state_dict(model, graph, tensor_dict, net, convert): + """ + Building the state dict for PyTorch graph. A few layers + will need their weights to be transformed like Convolutions + and Depthwise Convolutions. + """ + new_state_dict = OrderedDict() + for dst, src in convert.items(): + W = get_weights(model, graph, tensor_dict, src) + if W.ndim == 4: + if W.shape[0] == 1: + W = W.transpose((3, 0, 1, 2)) # depthwise conv + else: + W = W.transpose((0, 3, 1, 2)) # regular conv + + new_state_dict[dst] = torch.from_numpy(np.array(W)) + return new_state_dict diff --git a/qai_hub_models/models/mnasnet05/README.md b/qai_hub_models/models/mnasnet05/README.md new file mode 100644 index 00000000..0db50a4c --- /dev/null +++ b/qai_hub_models/models/mnasnet05/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [MNASNet05: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/mnasnet05) + +MNASNet05 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of MNASNet05 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/mnasnet05). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.mnasnet05.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.mnasnet05.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of MNASNet05 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [MnasNet: Platform-Aware Neural Architecture Search for Mobile](https://arxiv.org/abs/1807.11626) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py) diff --git a/qai_hub_models/models/mnasnet05/__init__.py b/qai_hub_models/models/mnasnet05/__init__.py new file mode 100644 index 00000000..8a4d509d --- /dev/null +++ b/qai_hub_models/models/mnasnet05/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import MNASNet05 as Model # noqa: F401 diff --git a/qai_hub_models/models/mnasnet05/demo.py b/qai_hub_models/models/mnasnet05/demo.py new file mode 100644 index 00000000..f674fb1c --- /dev/null +++ b/qai_hub_models/models/mnasnet05/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.mnasnet05.model import MNASNet05 + + +def main(is_test: bool = False): + imagenet_demo(MNASNet05, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mnasnet05/export.py b/qai_hub_models/models/mnasnet05/export.py new file mode 100644 index 00000000..48bc5bfe --- /dev/null +++ b/qai_hub_models/models/mnasnet05/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.mnasnet05 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "mnasnet05" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "mnasnet05", + "MNASNet05", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mnasnet05/info.yaml b/qai_hub_models/models/mnasnet05/info.yaml new file mode 100644 index 00000000..be98a382 --- /dev/null +++ b/qai_hub_models/models/mnasnet05/info.yaml @@ -0,0 +1,40 @@ +name: MNASNet05 +# id must match with the model dir name in qai_hub_models +id: mnasnet05 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: MNASNet05 is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +use_case: Image Classification +tags: + - backbone +research_paper: https://arxiv.org/abs/1807.11626 +research_paper_title: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile' +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 2.21M + Model size: 8.45 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/mnasnet05/model.py b/qai_hub_models/models/mnasnet05/model.py new file mode 100644 index 00000000..0562410a --- /dev/null +++ b/qai_hub_models/models/mnasnet05/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class MNASNet05(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.mnasnet0_5(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/mnasnet05/perf.yaml b/qai_hub_models/models/mnasnet05/perf.yaml new file mode 100644 index 00000000..8282b9b2 --- /dev/null +++ b/qai_hub_models/models/mnasnet05/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: MNASNet05 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 370.0 + throughput: 2702.7027027027025 + estimated_peak_memory_range: + min: 12288 + max: 8955784 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 69 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 69 + job_id: jmg9zyxvp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 367.0 + throughput: 2724.7956403269754 + estimated_peak_memory_range: + min: 196608 + max: 36330664 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 102 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 102 + job_id: jnp1nwvlg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:13:59.738307Z' diff --git a/qai_hub_models/models/mnasnet05/test.py b/qai_hub_models/models/mnasnet05/test.py new file mode 100644 index 00000000..e3758c14 --- /dev/null +++ b/qai_hub_models/models/mnasnet05/test.py @@ -0,0 +1,25 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.mnasnet05.demo import main as demo_main +from qai_hub_models.models.mnasnet05.model import MODEL_ID, MNASNet05 + + +def test_task(): + run_imagenet_classifier_test( + MNASNet05.from_pretrained(), MODEL_ID, probability_threshold=0.69 + ) + + +def test_trace(): + run_imagenet_classifier_trace_test(MNASNet05.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/mobilenet_v2/README.md b/qai_hub_models/models/mobilenet_v2/README.md new file mode 100644 index 00000000..a207207f --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [MobileNet-v2: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/mobilenet_v2) + +MobileNetV2 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of MobileNet-v2 found +[here](https://github.com/tonylins/pytorch-mobilenet-v2/tree/master). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/mobilenet_v2). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.mobilenet_v2.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.mobilenet_v2.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of MobileNet-v2 can be found + [here](https://github.com/tonylins/pytorch-mobilenet-v2/blob/master/LICENSE). + + +## References +* [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) +* [Source Model Implementation](https://github.com/tonylins/pytorch-mobilenet-v2/tree/master) diff --git a/qai_hub_models/models/mobilenet_v2/__init__.py b/qai_hub_models/models/mobilenet_v2/__init__.py new file mode 100644 index 00000000..a9334a68 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import MobileNetV2 as Model # noqa: F401 diff --git a/qai_hub_models/models/mobilenet_v2/demo.py b/qai_hub_models/models/mobilenet_v2/demo.py new file mode 100644 index 00000000..b2100921 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.mobilenet_v2.model import MobileNetV2 + + +def main(is_test: bool = False): + imagenet_demo(MobileNetV2, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mobilenet_v2/export.py b/qai_hub_models/models/mobilenet_v2/export.py new file mode 100644 index 00000000..c3eda9f4 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.mobilenet_v2 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "mobilenet_v2" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "mobilenet_v2", + "MobileNet-v2", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mobilenet_v2/info.yaml b/qai_hub_models/models/mobilenet_v2/info.yaml new file mode 100644 index 00000000..977fd7ed --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2/info.yaml @@ -0,0 +1,42 @@ +name: MobileNet-v2 +# id must match with the model dir name in qai_hub_models +id: mobilenet_v2 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: MobileNetV2 is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: + - backbone + - real-time +research_paper: https://arxiv.org/abs/1801.04381 +research_paper_title: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks' +license: https://github.com/tonylins/pytorch-mobilenet-v2/blob/master/LICENSE +source_repo: https://github.com/tonylins/pytorch-mobilenet-v2/tree/master +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 3.49M + Model size: 13.3 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2_quantized + - squeezenet1_1 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/mobilenet_v2/model.py b/qai_hub_models/models/mobilenet_v2/model.py new file mode 100644 index 00000000..b8ba2bc4 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2/model.py @@ -0,0 +1,70 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import json + +import torch + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, SourceAsRoot + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +MOBILENETV2_WEIGHTS = "mobilenet_v2.pth.tar" +# MOBILENETV2_WEIGHTS = "torch_mobilenetv2_w8a8_state_dict.pth" +# from https://github.com/quic/aimet-model-zoo/blob/d09d2b0404d10f71a7640a87e9d5e5257b028802/aimet_zoo_torch/mobilenetv2/model/model_cards/mobilenetv2_w8a8.json +MOBILENETV2_CFG = "mobilenetv2_w8a8.json" +MOBILENETV2_SOURCE_REPOSITORY = "https://github.com/tonylins/pytorch-mobilenet-v2" +MOBILENETV2_SOURCE_REPO_COMMIT = "99f213657e97de463c11c9e0eaca3bda598e8b3f" + + +class MobileNetV2(ImagenetClassifier): + def __init__( + self, + mobilenet_v2_model: torch.nn.Module, + ) -> None: + super().__init__(mobilenet_v2_model) + + @classmethod + def from_pretrained(cls) -> MobileNetV2: + model = _load_mobilenet_v2_source_model() + checkpoint_path = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, MOBILENETV2_WEIGHTS + ).fetch() + checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu")) + # rename classifier.1.weight -> classifier.weight, and bias similarly + state_dict = { + k.replace("classifier.1", "classifier"): v for k, v in checkpoint.items() + } + model.load_state_dict(state_dict) + model.eval() + + return cls(model) + + +def _load_mobilenet_v2_source_model( + keep_sys_path=False, +) -> torch.nn.Module: + cfg_path = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, MOBILENETV2_CFG + ).fetch() + with open(cfg_path, "r") as f: + cfg = json.load(f) + with SourceAsRoot( + MOBILENETV2_SOURCE_REPOSITORY, + MOBILENETV2_SOURCE_REPO_COMMIT, + MODEL_ID, + MODEL_ASSET_VERSION, + keep_sys_path=keep_sys_path, + ): + # necessary import. `modeling.deeplab` comes from the DeepLabV3 repo. + from MobileNetV2 import MobileNetV2 as _MobileNetV2 + + return _MobileNetV2( + n_class=cfg["model_args"]["num_classes"], + input_size=cfg["model_args"]["input_size"], + width_mult=cfg["model_args"]["width_mult"], + ) diff --git a/qai_hub_models/models/mobilenet_v2/perf.yaml b/qai_hub_models/models/mobilenet_v2/perf.yaml new file mode 100644 index 00000000..7aa2220c --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: MobileNet-v2 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 533.0 + throughput: 1876.172607879925 + estimated_peak_memory_range: + min: 20480 + max: 1466112 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 70 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 70 + job_id: jep2r9vmg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 809.0 + throughput: 1236.0939431396787 + estimated_peak_memory_range: + min: 618496 + max: 5733064 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 104 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 104 + job_id: jqpyoj745 + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:29:39.371442Z' diff --git a/qai_hub_models/models/mobilenet_v2/test.py b/qai_hub_models/models/mobilenet_v2/test.py new file mode 100644 index 00000000..3b688c68 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2/test.py @@ -0,0 +1,35 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.mobilenet_v2.demo import main as demo_main +from qai_hub_models.models.mobilenet_v2.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + MobileNetV2, +) +from qai_hub_models.utils.testing import skip_clone_repo_check + + +@skip_clone_repo_check +def test_task(): + run_imagenet_classifier_test( + MobileNetV2.from_pretrained(), + MODEL_ID, + asset_version=MODEL_ASSET_VERSION, + probability_threshold=0.39, + ) + + +@skip_clone_repo_check +def test_trace(): + run_imagenet_classifier_trace_test(MobileNetV2.from_pretrained()) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/mobilenet_v2_quantized/README.md b/qai_hub_models/models/mobilenet_v2_quantized/README.md new file mode 100644 index 00000000..b95cd4f0 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [MobileNet-v2-Quantized: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/mobilenet_v2_quantized) + +MobileNetV2 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of MobileNet-v2-Quantized found +[here](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/mobilenetv2). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/mobilenet_v2_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.mobilenet_v2_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.mobilenet_v2_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of MobileNet-v2-Quantized can be found + [here](https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf). + + +## References +* [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) +* [Source Model Implementation](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/mobilenetv2) diff --git a/qai_hub_models/models/mobilenet_v2_quantized/__init__.py b/qai_hub_models/models/mobilenet_v2_quantized/__init__.py new file mode 100644 index 00000000..c84ab215 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2_quantized/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import MobileNetV2Quantizable as Model # noqa: F401 diff --git a/qai_hub_models/models/mobilenet_v2_quantized/demo.py b/qai_hub_models/models/mobilenet_v2_quantized/demo.py new file mode 100644 index 00000000..89bb7cce --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2_quantized/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.mobilenet_v2_quantized.model import MobileNetV2Quantizable + + +def main(is_test: bool = False): + imagenet_demo(MobileNetV2Quantizable, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mobilenet_v2_quantized/export.py b/qai_hub_models/models/mobilenet_v2_quantized/export.py new file mode 100644 index 00000000..08b14483 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2_quantized/export.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.mobilenet_v2_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "mobilenet_v2_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "mobilenet_v2_quantized", + "MobileNet-v2-Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mobilenet_v2_quantized/info.yaml b/qai_hub_models/models/mobilenet_v2_quantized/info.yaml new file mode 100644 index 00000000..21ac3a89 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2_quantized/info.yaml @@ -0,0 +1,42 @@ +name: MobileNet-v2-Quantized +id: mobilenet_v2_quantized +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: MobileNetV2 is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: + - backbone + - real-time + - quantized +research_paper: https://arxiv.org/abs/1801.04381 +research_paper_title: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks' +license: https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf +source_repo: https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/mobilenetv2 +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 3.49M + Model size: 3.42 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - squeezenet1_1 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/mobilenet_v2_quantized/model.py b/qai_hub_models/models/mobilenet_v2_quantized/model.py new file mode 100644 index 00000000..8ab961bb --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2_quantized/model.py @@ -0,0 +1,114 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +# isort: off +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( + AIMETQuantizableMixin, +) + +# isort: on + +import torch +from aimet_torch.cross_layer_equalization import equalize_model +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models.mobilenet_v2.model import ( + MobileNetV2, + _load_mobilenet_v2_source_model, +) +from qai_hub_models.utils.aimet.config_loader import get_default_aimet_config +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.base_model import SourceModelFormat, TargetRuntime + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 2 + +# Weights downloaded from https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/torch_mobilenetv2_w8a8_state_dict.pth +QUANTIZED_WEIGHTS = "torch_mobilenetv2_w8a8_state_dict.pth" +DEFAULT_ENCODINGS = "encodings.json" + + +class MobileNetV2Quantizable(AIMETQuantizableMixin, MobileNetV2): + """MobileNetV2 with post train quantization support.""" + + def __init__( + self, + quant_sim_model: QuantizationSimModel, + ) -> None: + MobileNetV2.__init__(self, quant_sim_model.model) + AIMETQuantizableMixin.__init__( + self, + quant_sim_model, + ) + + def preferred_hub_source_model_format( + self, target_runtime: TargetRuntime + ) -> SourceModelFormat: + return SourceModelFormat.ONNX + + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> "MobileNetV2Quantizable": + """ + Parameters: + aimet_encodings: + if "DEFAULT": Loads the model with aimet encodings calibrated on imagenette. + elif None: Doesn't load any encodings. Used when computing encodings. + else: Interprets as a filepath and loads the encodings stored there. + """ + # Load Model + model_fp32 = _load_mobilenet_v2_source_model( + keep_sys_path=True, + ) + input_shape = MobileNetV2(None).get_input_spec()["image_tensor"][0] + # Following + # https://github.com/quic/aimet-model-zoo/blob/develop/aimet_zoo_torch/mobilenetv2/model/model_definition.py#L64 + equalize_model(model_fp32, input_shape) + + # Download weights and quantization parameters + weights = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, QUANTIZED_WEIGHTS + ).fetch() + aimet_config = get_default_aimet_config() + + # Load the QAT/PTQ tuned model_fp32 weights + checkpoint = torch.load(weights, map_location=torch.device("cpu")) + state_dict = { + k.replace("classifier.1", "classifier"): v + for k, v in checkpoint["state_dict"].items() + } + model_fp32.load_state_dict(state_dict) + sim = QuantizationSimModel( + model_fp32, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=aimet_config, + dummy_input=torch.rand(input_shape), + ) + + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() + load_encodings_to_sim(sim, aimet_encodings) + + sim.model.eval() + return cls(sim) + + def get_hub_compile_options( + self, + target_runtime: TargetRuntime, + other_compile_options: str = "", + ) -> str: + compile_options = super().get_hub_compile_options( + target_runtime, other_compile_options + ) + return compile_options + " --quantize_full_type int8 --quantize_io" diff --git a/qai_hub_models/models/mobilenet_v2_quantized/perf.yaml b/qai_hub_models/models/mobilenet_v2_quantized/perf.yaml new file mode 100644 index 00000000..9521afec --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: MobileNet-v2-Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 240.0 + throughput: 4166.666666666667 + estimated_peak_memory_range: + min: 12288 + max: 1557248 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 70 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 70 + job_id: j1p8em3zp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:15:21.382192Z' diff --git a/qai_hub_models/models/mobilenet_v2_quantized/test.py b/qai_hub_models/models/mobilenet_v2_quantized/test.py new file mode 100644 index 00000000..b6c94ba4 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v2_quantized/test.py @@ -0,0 +1,41 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.mobilenet_v2_quantized.demo import main as demo_main +from qai_hub_models.models.mobilenet_v2_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + MobileNetV2Quantizable, +) +from qai_hub_models.utils.testing import skip_clone_repo_check + + +@skip_clone_repo_check +def test_task(): + run_imagenet_classifier_test( + MobileNetV2Quantizable.from_pretrained(), + MODEL_ID, + asset_version=MODEL_ASSET_VERSION, + probability_threshold=0.56, + diff_tol=0.06, + ) + + +@skip_clone_repo_check +def test_trace(): + run_imagenet_classifier_trace_test( + MobileNetV2Quantizable.from_pretrained(), + is_quantized=True, + atol=0.03, + ) + + +@skip_clone_repo_check +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/mobilenet_v3_large/README.md b/qai_hub_models/models/mobilenet_v3_large/README.md new file mode 100644 index 00000000..130a4e3c --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_large/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [MobileNet-v3-Large: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/mobilenet_v3_large) + +MobileNetV3Large is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of MobileNet-v3-Large found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/mobilenet_v3_large). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.mobilenet_v3_large.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.mobilenet_v3_large.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of MobileNet-v3-Large can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py) diff --git a/qai_hub_models/models/mobilenet_v3_large/__init__.py b/qai_hub_models/models/mobilenet_v3_large/__init__.py new file mode 100644 index 00000000..dc63af42 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_large/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import MobileNetV3Large as Model # noqa: F401 diff --git a/qai_hub_models/models/mobilenet_v3_large/demo.py b/qai_hub_models/models/mobilenet_v3_large/demo.py new file mode 100644 index 00000000..46a8418d --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_large/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.mobilenet_v3_large.model import MobileNetV3Large + + +def main(is_test: bool = False): + imagenet_demo(MobileNetV3Large, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mobilenet_v3_large/export.py b/qai_hub_models/models/mobilenet_v3_large/export.py new file mode 100644 index 00000000..4e364b1d --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_large/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.mobilenet_v3_large import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "mobilenet_v3_large" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "mobilenet_v3_large", + "MobileNet-v3-Large", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mobilenet_v3_large/info.yaml b/qai_hub_models/models/mobilenet_v3_large/info.yaml new file mode 100644 index 00000000..d733455e --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_large/info.yaml @@ -0,0 +1,41 @@ +name: MobileNet-v3-Large +# id must match with the model dir name in qai_hub_models +id: mobilenet_v3_large +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: MobileNetV3Large is a machine learning model that can classify images + from the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: + - backbone + - real-time +research_paper: https://arxiv.org/abs/1905.02244 +research_paper_title: Searching for MobileNetV3 +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 5.47M + Model size: 20.9 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/mobilenet_v3_large/model.py b/qai_hub_models/models/mobilenet_v3_large/model.py new file mode 100644 index 00000000..578f0355 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_large/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class MobileNetV3Large(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.mobilenet_v3_large(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/mobilenet_v3_large/perf.yaml b/qai_hub_models/models/mobilenet_v3_large/perf.yaml new file mode 100644 index 00000000..5907cc30 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_large/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: MobileNet-v3-Large + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 600.0 + throughput: 1666.6666666666667 + estimated_peak_memory_range: + min: 32768 + max: 17746392 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 134 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 134 + job_id: j1gly2ee5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:19:38.868341Z' diff --git a/qai_hub_models/models/mobilenet_v3_large/test.py b/qai_hub_models/models/mobilenet_v3_large/test.py new file mode 100644 index 00000000..60de58c0 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_large/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.mobilenet_v3_large.demo import main as demo_main +from qai_hub_models.models.mobilenet_v3_large.model import MODEL_ID, MobileNetV3Large + + +def test_task(): + run_imagenet_classifier_test(MobileNetV3Large.from_pretrained(), MODEL_ID) + + +def test_trace(): + run_imagenet_classifier_trace_test(MobileNetV3Large.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/mobilenet_v3_small/README.md b/qai_hub_models/models/mobilenet_v3_small/README.md new file mode 100644 index 00000000..f11cb04d --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_small/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [MobileNet-v3-Small: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/mobilenet_v3_small) + +MobileNetV3Small is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of MobileNet-v3-Small found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/mobilenet_v3_small). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.mobilenet_v3_small.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.mobilenet_v3_small.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of MobileNet-v3-Small can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py) diff --git a/qai_hub_models/models/mobilenet_v3_small/__init__.py b/qai_hub_models/models/mobilenet_v3_small/__init__.py new file mode 100644 index 00000000..71613adf --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_small/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import MobileNetV3Small as Model # noqa: F401 diff --git a/qai_hub_models/models/mobilenet_v3_small/demo.py b/qai_hub_models/models/mobilenet_v3_small/demo.py new file mode 100644 index 00000000..b603d666 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_small/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.mobilenet_v3_small.model import MobileNetV3Small + + +def main(is_test: bool = False): + imagenet_demo(MobileNetV3Small, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mobilenet_v3_small/export.py b/qai_hub_models/models/mobilenet_v3_small/export.py new file mode 100644 index 00000000..efbf745e --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_small/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.mobilenet_v3_small import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "mobilenet_v3_small" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "mobilenet_v3_small", + "MobileNet-v3-Small", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/mobilenet_v3_small/info.yaml b/qai_hub_models/models/mobilenet_v3_small/info.yaml new file mode 100644 index 00000000..e26f6a3a --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_small/info.yaml @@ -0,0 +1,41 @@ +name: MobileNet-v3-Small +# id must match with the model dir name in qai_hub_models +id: mobilenet_v3_small +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +use_case: Image Classification +description: MobileNetV3Small is a machine learning model that can classify images + from the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +tags: + - backbone + - real-time +research_paper: https://arxiv.org/abs/1905.02244 +research_paper_title: Searching for MobileNetV3 +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 2.54M + Model size: 9.72 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/mobilenet_v3_small/model.py b/qai_hub_models/models/mobilenet_v3_small/model.py new file mode 100644 index 00000000..2eb733dc --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_small/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class MobileNetV3Small(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.mobilenet_v3_small(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/mobilenet_v3_small/perf.yaml b/qai_hub_models/models/mobilenet_v3_small/perf.yaml new file mode 100644 index 00000000..65b78de3 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_small/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: MobileNet-v3-Small + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 427.0 + throughput: 2341.92037470726 + estimated_peak_memory_range: + min: 12288 + max: 1724768 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 122 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 122 + job_id: j1gly20e5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:09:16.610887Z' diff --git a/qai_hub_models/models/mobilenet_v3_small/test.py b/qai_hub_models/models/mobilenet_v3_small/test.py new file mode 100644 index 00000000..45f656b0 --- /dev/null +++ b/qai_hub_models/models/mobilenet_v3_small/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.mobilenet_v3_small.demo import main as demo_main +from qai_hub_models.models.mobilenet_v3_small.model import MODEL_ID, MobileNetV3Small + + +def test_task(): + run_imagenet_classifier_test(MobileNetV3Small.from_pretrained(), MODEL_ID) + + +def test_trace(): + run_imagenet_classifier_trace_test(MobileNetV3Small.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/openai_clip/README.md b/qai_hub_models/models/openai_clip/README.md new file mode 100644 index 00000000..a334800c --- /dev/null +++ b/qai_hub_models/models/openai_clip/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [OpenAI-Clip: Multi-modal foundational model for vision and language tasks like image/text similarity and for zero-shot image classification](https://aihub.qualcomm.com/models/openai_clip) + +Contrastive Language-Image Pre-Training (CLIP) uses a ViT like transformer to get visual features and a causal language model to get the text features. Both the text and visual features can then be used for a variety of zero-shot learning tasks. + +This is based on the implementation of OpenAI-Clip found +[here](https://github.com/openai/CLIP/). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/openai_clip). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[openai_clip]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.openai_clip.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.openai_clip.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of OpenAI-Clip can be found + [here](https://github.com/openai/CLIP/blob/main/LICENSE). + + +## References +* [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) +* [Source Model Implementation](https://github.com/openai/CLIP/) diff --git a/qai_hub_models/models/openai_clip/__init__.py b/qai_hub_models/models/openai_clip/__init__.py new file mode 100644 index 00000000..d477cf93 --- /dev/null +++ b/qai_hub_models/models/openai_clip/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import ClipApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import Clip as Model # noqa: F401 diff --git a/qai_hub_models/models/openai_clip/app.py b/qai_hub_models/models/openai_clip/app.py new file mode 100644 index 00000000..92ffd8c3 --- /dev/null +++ b/qai_hub_models/models/openai_clip/app.py @@ -0,0 +1,114 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Tuple + +import torch +from PIL.Image import Image + +from qai_hub_models.utils.input_spec import InputSpec + + +class ClipApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with Clip. + + The app uses 1 model: + * Clip + + For a given image input, the app will: + * pre-process the image + * pre-process the text + * Run Clip inference + """ + + def __init__( + self, + clip_model: torch.nn.Module, + ): + # Open AI Clip + self.text_encoder = clip_model.text_encoder + self.image_encoder = clip_model.image_encoder + # Preprocess Compose function from Open AI clip + self.preprocess = clip_model.preprocess + self.tokenizer = clip_model.tokenizer_func + + def predict(self, *args, **kwargs): + # See predict_similarity. + return self.predict_similarity(*args, **kwargs) + + def predict_similarity( + self, image: torch.Tensor, text: torch.Tensor + ) -> torch.Tensor: + """ + Inputs: + image: torch.Tensor (Shape: [1, 3, 224, 224]) + Processed image tensor with values normalized to be between 0-1. + text: torch.Tensor (Shape: [1, 77]) + Processed text tensor to be tokenized. + + Outputs: + logits_per_image: torch.Tensor (Shape: [num_images, num_text_prompts]) + + Given a batch of images and a batch of text tokens, returns a tensor, + containing the logit scores corresponding to each image per text input. + The values are cosine similarities between the corresponding image and + text features, times 100. The logits of text per image can be computed + by doing a transpose. + + """ + with torch.no_grad(): + image_features = self.image_encoder(image) + text_features = self.text_encoder(text) + logits_per_image = image_features @ text_features.t() + return logits_per_image.cpu().numpy() + + def process_image(self, image: Image) -> torch.Tensor: + """Process image before calling forward. + + Inputs: + image: PIL.Image + Image loaded by Pillow must be provided. + Example: image = Image.open('') + + Outputs: + processed_image: torch.Tensor (shape [1, 3, 224, 224]) + Layout: RGB + The image is converted to torch tensor and normalized + to be in the range of 0-1. + """ + return self.preprocess(image).unsqueeze(0) + + def process_text(self, text: str) -> torch.Tensor: + """Process text into tokens for forward call. + + Input: + text: str + Text prompt intended for inference. + Example: "golden hour" + + Output: + tokenized_tensor: torch.Tensor (shape: [1, 77]) + Example: tensor([[49406, 3878, 2232, 49407, 0, 0...]]) + + """ + return self.tokenizer(text) + + def get_input_spec( + self, + image_size: Tuple[int, int] = (224, 224), + text_size: Tuple[int, int] = (3, 77), + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + if isinstance(image_size, int): + image_size = (image_size, image_size) + return { + "image": ((1, 3, *image_size), "float32"), + "text": (text_size, "int32"), + } diff --git a/qai_hub_models/models/openai_clip/demo.py b/qai_hub_models/models/openai_clip/demo.py new file mode 100644 index 00000000..2299ab4e --- /dev/null +++ b/qai_hub_models/models/openai_clip/demo.py @@ -0,0 +1,98 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse +import os + +import numpy as np +import torch + +from qai_hub_models.models.openai_clip.app import ClipApp +from qai_hub_models.models.openai_clip.model import MODEL_ASSET_VERSION, MODEL_ID, Clip +from qai_hub_models.utils.args import add_output_dir_arg +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.display import display_or_save_image + + +# Run Clip on a directory of images with a query text. +# The demo will display similarity score for each image. +def main(is_test: bool = False): + # Demo parameters + parser = argparse.ArgumentParser() + parser.add_argument( + "--image-dir", + type=str, + default=None, + help="Path to image directory", + ) + parser.add_argument( + "--image-names", + type=str, + default="image1.jpg,image2.jpg,image3.jpg", + help="Specify names of the images in the folder.", + ) + parser.add_argument( + "--text", + type=str, + default="camping under the stars", + help="Text prompt for image search", + ) + add_output_dir_arg(parser) + args = parser.parse_args([] if is_test else None) + + # Load model + clip_model = Clip.from_pretrained() + app = ClipApp(clip_model=clip_model) + + image_names = args.image_names.split(",") + text = app.process_text(args.text) + images = [] + + # Iterate through images and text provided by user + for filename in image_names: + # Make sure the file is an image + if os.path.splitext(filename)[1].lower() in [".jpg", ".jpeg", ".png"]: + if not args.image_dir: + image = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, filename + ) + else: + image = os.path.join(args.image_dir, filename) + # Preprocess image and text pair + image = app.process_image(load_image(image)) + images.append(image) + + else: + print(f"Skipping file {filename}") + + images = torch.stack(images).squeeze(1) + + # Compute similarity + predictions = app.predict_similarity(images, text).flatten() + + # Display all the images and their score wrt to the text prompt provided. + print(f"Searching images by prompt: {args.text}") + for i in range(len(predictions)): + print( + f"\t Image with name: {image_names[i]} has a similarity score={predictions[i]}" + ) + + # Show image + print("Displaying the most relevant image") + + selected_image = image_names[np.argmax(predictions)] + if not args.image_dir: + selected_image = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, selected_image + ) + else: + selected_image = os.path.join(args.image_dir, selected_image) + most_relevant_image = load_image(selected_image) + + if not is_test: + display_or_save_image(most_relevant_image, args.output_dir) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/openai_clip/export.py b/qai_hub_models/models/openai_clip/export.py new file mode 100644 index 00000000..54990d0b --- /dev/null +++ b/qai_hub_models/models/openai_clip/export.py @@ -0,0 +1,219 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Mapping, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.openai_clip import Model +from qai_hub_models.utils.args import ( + export_parser, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + +ALL_COMPONENTS = ["CLIPTextEncoder", "CLIPImageEncoder"] + + +def export_model( + device: str = "Samsung Galaxy S23", + components: Optional[List[str]] = None, + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Mapping[ + str, Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] +] | List[str]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + components: List of sub-components of the model that will be exported. + Each component is compiled and profiled separately. + Defaults to ALL_COMPONENTS if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` + + Returns: + A Mapping from component_name to a 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "openai_clip" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + component_arg = components + components = components or ALL_COMPONENTS + for component in components: + if component not in ALL_COMPONENTS: + raise ValueError(f"Invalid component {component}.") + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "openai_clip", + "OpenAI-Clip", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + component_arg, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + components_dict = {} + if "CLIPTextEncoder" in components: + components_dict["CLIPTextEncoder"] = model.text_encoder + if "CLIPImageEncoder" in components: + components_dict["CLIPImageEncoder"] = model.image_encoder + + compile_jobs = {} + for component_name, component in components_dict.items(): + # Trace the model + input_spec = component.get_input_spec() + source_model = torch.jit.trace(component, make_torch_inputs(input_spec)) + + # 2. Compile the models to an on-device asset + model_compile_options = component.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {component_name} to run on-device.") + compile_jobs[component_name] = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=f"{component_name}", + options=model_compile_options, + ) + + # 3. Profile the model assets on real devices + profile_jobs = {} + if not skip_profiling: + for component_name in components: + print(f"Profiling model {component_name} on a hosted device.") + profile_jobs[component_name] = hub.submit_profile_job( + model=compile_jobs[component_name].get_target_model(), + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_jobs = {} + if not skip_inferencing: + for component_name in components: + print( + f"Running inference for {component_name} on a hosted device with example inputs." + ) + sample_inputs = components_dict[component_name].sample_inputs() + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_jobs[component_name] = hub.submit_inference_job( + model=compile_jobs[component_name].get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 5. Download the model assets to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + for component_name, compile_job in compile_jobs.items(): + target_model = compile_job.get_target_model() + target_model.download( + str(output_path / f"{model_name}_{component_name}.tflite") + ) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + for component_name in components: + profile_job = profile_jobs[component_name] + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + for component_name in components: + inference_job = inference_jobs[component_name] + sample_inputs = components_dict[component_name].sample_inputs() + torch_out = torch_inference(components_dict[component_name], sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + return { + component_name: ( + compile_jobs[component_name], + profile_jobs.get(component_name, None), + inference_jobs.get(component_name, None), + ) + for component_name in components + } + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, components=ALL_COMPONENTS) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/openai_clip/info.yaml b/qai_hub_models/models/openai_clip/info.yaml new file mode 100644 index 00000000..2e439649 --- /dev/null +++ b/qai_hub_models/models/openai_clip/info.yaml @@ -0,0 +1,38 @@ +name: OpenAI-Clip +# id must match with the model dir name in qai_hub_models +id: openai_clip +status: public +headline: Multi-modal foundational model for vision and language tasks like image/text + similarity and for zero-shot image classification. +domain: Multimodal +description: Contrastive Language-Image Pre-Training (CLIP) uses a ViT like transformer + to get visual features and a causal language model to get the text features. Both + the text and visual features can then be used for a variety of zero-shot learning + tasks. +use_case: Image Classification +tags: + - foundation +research_paper: https://arxiv.org/abs/2103.00020 +research_paper_title: Learning Transferable Visual Models From Natural Language Supervision +license: https://github.com/openai/CLIP/blob/main/LICENSE +source_repo: https://github.com/openai/CLIP/ +technical_details: + Model checkpoint: ViT-B/16 + Image input resolution: 224x224 + Text context length: 77 + Number of parameters (CLIPTextEncoder): 76.0M + Model size (CLIPTextEncoder): 290 MB + Number of parameters (CLIPImageEncoder): 115M + Model size (CLIPImageEncoder): 437 MB +applicable_scenarios: + - Image Search + - Content Moderation + - Caption Creation +related_models: [] +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: mit +dataset: [] diff --git a/qai_hub_models/models/openai_clip/model.py b/qai_hub_models/models/openai_clip/model.py new file mode 100644 index 00000000..9e4c452d --- /dev/null +++ b/qai_hub_models/models/openai_clip/model.py @@ -0,0 +1,153 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable + +import torch +import torchvision + +from qai_hub_models.utils.asset_loaders import SourceAsRoot, callback_with_retry +from qai_hub_models.utils.base_model import BaseModel, CollectionModel +from qai_hub_models.utils.input_spec import InputSpec + +PRETRAINED_WEIGHTS = "ViT-B/16" +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +OPENAI_CLIP_SOURCE_REPOSITORY = "https://github.com/openai/CLIP" +OPENAI_CLIP_SOURCE_REPO_COMMIT = "a1d071733d7111c9c014f024669f959182114e33" + + +def load_clip_and_tokenizer(): + """Downloading pretrained weights via OpenAI and loading them.""" + with SourceAsRoot( + OPENAI_CLIP_SOURCE_REPOSITORY, + OPENAI_CLIP_SOURCE_REPO_COMMIT, + MODEL_ID, + MODEL_ASSET_VERSION, + ): + import clip + + tokenizer_func = clip.tokenize + net, preprocess = clip.load(PRETRAINED_WEIGHTS) + return net, preprocess, tokenizer_func + + +class Clip(CollectionModel): + def __init__( + self, + text_encoder: torch.nn.Module, + image_encoder: torch.nn.Module, + preprocess: torchvision.transforms.transforms.Compose, + tokenizer_func: Callable, + ): + super().__init__() + self.text_encoder = text_encoder + self.image_encoder = image_encoder + self.preprocess = preprocess + self.tokenizer_func = tokenizer_func + + @staticmethod + def from_pretrained(): + net, preprocess, tokenizer_func = callback_with_retry( + num_retries=5, callback=load_clip_and_tokenizer + ) + return Clip.from_source_model(net, preprocess, tokenizer_func) + + @staticmethod + def from_source_model(net, preprocess, tokenizer_func): + net = net.eval() + text_encoder = ClipTextEncoder(net) + image_encoder = ClipImageEncoder(net) + return Clip(text_encoder, image_encoder, preprocess, tokenizer_func) + + +class ClipTextEncoder(BaseModel): + def __init__(self, net: torch.nn.Module): + super().__init__() + """ Wrapper for OpenAI CLIP.""" + self.net = net + self.eot_token = 49407 + + def forward(self, text: torch.Tensor): + """Forward call on Open AI CLIP model. + + Inputs: + text: torch.Tensor (Shape: [1, 77] context_length=77) + Processed text tensor to be tokenized. + + Outputs: + text_features: torch.Tensor [512 (transformer_width), num_text_prompts] + Raw text features are returned. When multiplied to image features, + you can obtain a matrix of cosine similarities between the + corresponding image and text input. + + """ + clipped_text = torch.clip(text, min=0, max=self.eot_token) + text_features = self.net.encode_text(clipped_text) + text_features = text_features / text_features.norm(dim=1, keepdim=True) + return text_features + + def get_input_spec( + self, + batch_size: int = 1, + text_length: int = 77, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return { + "text": ((batch_size, text_length), "int32"), + } + + @classmethod + def from_pretrained(cls): + return Clip.from_pretrained().text_encoder + + +class ClipImageEncoder(BaseModel): + def __init__(self, net: torch.nn.Module): + super().__init__() + """ Wrapper for OpenAI Clip.""" + self.net = net + self.eot_token = 49407 + + def forward(self, image: torch.Tensor): + """Forward call on Open AI Clip model. + + Inputs: + image: torch.Tensor (Shape: [1, 3, 224, 224]) + Processed image tensor with values normalized to be between 0-1. + Channel Layout: RGB + + Outputs: + image_features: torch.Tensor [num_images, 512 (transformer_width)] + Raw image features (multiplied to 100) are returned. + When multiplied to text features, you can obtain a + matrix of cosine similarities between the corresponding image and + text input. + + """ + image_features = self.net.encode_image(image) + image_features = image_features / image_features.norm(dim=1, keepdim=True) + return self.net.logit_scale.exp() * image_features + + def get_input_spec( + self, + height: int = 224, + width: int = 224, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return { + "image": ((1, 3, height, width), "float32"), + } + + @classmethod + def from_pretrained(cls): + return Clip.from_pretrained().image_encoder diff --git a/qai_hub_models/models/openai_clip/perf.yaml b/qai_hub_models/models/openai_clip/perf.yaml new file mode 100644 index 00000000..2a6ddaed --- /dev/null +++ b/qai_hub_models/models/openai_clip/perf.yaml @@ -0,0 +1,107 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: CLIPTextEncoder + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 15528.0 + throughput: 64.39979392065945 + estimated_peak_memory_range: + min: 40960 + max: 3106072 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 574 + layers_on_gpu: 0 + layers_on_cpu: 2 + total_layers: 576 + job_id: j2p0m2veg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 8149.0 + throughput: 122.71444348999877 + estimated_peak_memory_range: + min: 40960 + max: 23728064 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 377 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 377 + job_id: jogk2q9og + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:25:08.294036Z' +- name: CLIPImageEncoder + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 127729.0 + throughput: 7.829075621041424 + estimated_peak_memory_range: + min: 159744 + max: 3867320 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 575 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 575 + job_id: j1p8em48p + job_status: Passed + torchscript_onnx_qnn: + inference_time: 50903.0 + throughput: 19.645207551617784 + estimated_peak_memory_range: + min: 86016 + max: 59741752 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 370 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 370 + job_id: jn5qlrmmp + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:30:00.084732Z' diff --git a/qai_hub_models/models/openai_clip/requirements.txt b/qai_hub_models/models/openai_clip/requirements.txt new file mode 100644 index 00000000..4245d2f9 --- /dev/null +++ b/qai_hub_models/models/openai_clip/requirements.txt @@ -0,0 +1,3 @@ +torchvision +ftfy==6.1.1 +regex==2023.10.3 diff --git a/qai_hub_models/models/openai_clip/test.py b/qai_hub_models/models/openai_clip/test.py new file mode 100644 index 00000000..83dae200 --- /dev/null +++ b/qai_hub_models/models/openai_clip/test.py @@ -0,0 +1,52 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models.openai_clip.app import ClipApp +from qai_hub_models.models.openai_clip.demo import main as demo_main +from qai_hub_models.models.openai_clip.model import MODEL_ASSET_VERSION, MODEL_ID, Clip +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "image1.jpg" +) +TEXT = "pyramid in desert" + + +@skip_clone_repo_check +def test_prediction(): + """Verify our driver produces the correct score given image and text pair.""" + source_clip_model = Clip.from_pretrained() + clip_app = ClipApp(source_clip_model) + processed_sample_image = clip_app.process_image(load_image(IMAGE_ADDRESS)) + processed_sample_text = clip_app.process_text(TEXT) + assert clip_app.predict_similarity(processed_sample_image, processed_sample_text) + + +@skip_clone_repo_check +def test_task(): + """Verify that raw (numeric) outputs of both networks are the same.""" + source_clip_model = Clip.from_pretrained() + clip_app = ClipApp(source_clip_model) + processed_sample_image = clip_app.process_image(load_image(IMAGE_ADDRESS)) + processed_sample_text = clip_app.process_text(TEXT) + source_clip_text_model, source_clip_image_model = ( + source_clip_model.text_encoder, + source_clip_model.image_encoder, + ) + text_features = source_clip_text_model(processed_sample_text) + image_features = source_clip_image_model(processed_sample_image) + source_out = image_features @ text_features.t() + qaihm_out = clip_app.predict_similarity( + processed_sample_image, processed_sample_text + ) + + assert np.allclose(source_out.detach().numpy(), qaihm_out) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/openpose/README.md b/qai_hub_models/models/openpose/README.md new file mode 100644 index 00000000..1ae81491 --- /dev/null +++ b/qai_hub_models/models/openpose/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [OpenPose: Human pose estimation](https://aihub.qualcomm.com/models/openpose) + +OpenPose is a machine learning model that estimates body and hand pose in an image and returns location and confidence for each of 19 joints. + +This is based on the implementation of OpenPose found +[here](https://github.com/CMU-Perceptual-Computing-Lab/openpose). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/openpose). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[openpose]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.openpose.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.openpose.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of OpenPose can be found + [here](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/LICENSE). + + +## References +* [OpenPose: Realtime Multi-Person 2D Pose Estimation using Part Affinity Fields](https://arxiv.org/abs/1812.08008) +* [Source Model Implementation](https://github.com/CMU-Perceptual-Computing-Lab/openpose) diff --git a/qai_hub_models/models/openpose/__init__.py b/qai_hub_models/models/openpose/__init__.py new file mode 100644 index 00000000..6956b47d --- /dev/null +++ b/qai_hub_models/models/openpose/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import OpenPoseApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import OpenPose as Model # noqa: F401 diff --git a/qai_hub_models/models/openpose/app.py b/qai_hub_models/models/openpose/app.py new file mode 100644 index 00000000..de4cf2b6 --- /dev/null +++ b/qai_hub_models/models/openpose/app.py @@ -0,0 +1,350 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import math +from typing import Tuple + +import numpy as np +import PIL +import torch +import torch.nn.functional as F +from PIL.Image import Image +from scipy.ndimage.filters import gaussian_filter + +from qai_hub_models.utils.image_processing import preprocess_PIL_image + + +class OpenPoseApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with OpenPose. + + The app uses 1 model: + * OpenPose + + For a given image input, the app will: + * Run OpenPose inference on the image + * display the output keypoints drawn over the input image + """ + + def __init__(self, openpose_model): + self.model = openpose_model + + def predict(self, *args, **kwargs): + # See estimate_pose. + return self.estimate_pose(*args, **kwargs) + + def estimate_pose( + self, + image: Image, + ) -> Image: + """ + Perform pose estimate on provided images + + Parameters: + image: Input PIL image + + Returns: + keypoints: List[] + A list of keypoints of joints in the image + """ + + # preprocess + pixel_values = preprocess_PIL_image(image) + + # Run prediction + paf, heatmap = self.model(pixel_values) + + # post process heatmaps and paf to get keypoints + keypoints, subset = getKeypointsFromPredictions( + paf, heatmap, pixel_values.shape[2], pixel_values.shape[3] + ) + + output_image = draw_keypoints(image, keypoints, radius=4, alpha=0.8) + + return output_image + + +def getKeypointsFromPredictions( + paf: torch.Tensor, heatmap: torch.Tensor, h, w +) -> Tuple[np.ndarray, np.ndarray]: + # upsample the PAF and heatmap to be the same size as the original image + target_size = (h, w) + upsampled_paf = ( + F.interpolate(paf, size=target_size, mode="bicubic", align_corners=False) + .detach() + .numpy() + ) + heatmap = ( + F.interpolate(heatmap, size=target_size, mode="bicubic", align_corners=False) + .detach() + .numpy() + ) + + # reshape for post processing + heatmap = np.transpose(heatmap.squeeze(), (1, 2, 0)) + paf = np.transpose(upsampled_paf.squeeze(), (1, 2, 0)) + + """ + The following post-processing code comes from the pytorch openpose repo, at + https://github.com/Hzzone/pytorch-openpose/blob/5ee71dc10020403dc3def2bb68f9b77c40337ae2/src/body.py#L67C9-L67C9 + """ + + all_peaks = [] + peak_counter = 0 + thre1 = 0.1 + thre2 = 0.05 + + for part in range(18): + map_ori = heatmap[:, :, part] + one_heatmap = gaussian_filter(map_ori, sigma=3) + + map_left = np.zeros(one_heatmap.shape) + map_left[1:, :] = one_heatmap[:-1, :] + map_right = np.zeros(one_heatmap.shape) + map_right[:-1, :] = one_heatmap[1:, :] + map_up = np.zeros(one_heatmap.shape) + map_up[:, 1:] = one_heatmap[:, :-1] + map_down = np.zeros(one_heatmap.shape) + map_down[:, :-1] = one_heatmap[:, 1:] + + peaks_binary = np.logical_and.reduce( + ( + one_heatmap >= map_left, + one_heatmap >= map_right, + one_heatmap >= map_up, + one_heatmap >= map_down, + one_heatmap > thre1, + ) + ) + peaks = list( + zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) + ) # note reverse + peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks] + peak_id = range(peak_counter, peak_counter + len(peaks)) + peaks_with_score_and_id = [ + peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id)) + ] + + all_peaks.append(peaks_with_score_and_id) + peak_counter += len(peaks) + + # find connection in the specified sequence, center 29 is in the position 15 + limbSeq = [ + [2, 3], + [2, 6], + [3, 4], + [4, 5], + [6, 7], + [7, 8], + [2, 9], + [9, 10], + [10, 11], + [2, 12], + [12, 13], + [13, 14], + [2, 1], + [1, 15], + [15, 17], + [1, 16], + [16, 18], + [3, 17], + [6, 18], + ] + # the middle joints heatmap correpondence + mapIdx = [ + [31, 32], + [39, 40], + [33, 34], + [35, 36], + [41, 42], + [43, 44], + [19, 20], + [21, 22], + [23, 24], + [25, 26], + [27, 28], + [29, 30], + [47, 48], + [49, 50], + [53, 54], + [51, 52], + [55, 56], + [37, 38], + [45, 46], + ] + + connection_all = [] + special_k = [] + mid_num = 10 + + for k in range(len(mapIdx)): + score_mid = paf[:, :, [x - 19 for x in mapIdx[k]]] + candA = all_peaks[limbSeq[k][0] - 1] + candB = all_peaks[limbSeq[k][1] - 1] + nA = len(candA) + nB = len(candB) + indexA, indexB = limbSeq[k] + if nA != 0 and nB != 0: + connection_candidate = [] + for i in range(nA): + for j in range(nB): + vec = np.subtract(candB[j][:2], candA[i][:2]) + norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + norm = max(0.001, norm) + vec = np.divide(vec, norm) + + startend = list( + zip( + np.linspace(candA[i][0], candB[j][0], num=mid_num), + np.linspace(candA[i][1], candB[j][1], num=mid_num), + ) + ) + + vec_x = np.array( + [ + score_mid[ + int(round(startend[index][1])), + int(round(startend[index][0])), + 0, + ] + for index in range(len(startend)) + ] + ) + vec_y = np.array( + [ + score_mid[ + int(round(startend[index][1])), + int(round(startend[index][0])), + 1, + ] + for index in range(len(startend)) + ] + ) + + score_midpts = np.multiply(vec_x, vec[0]) + np.multiply( + vec_y, vec[1] + ) + score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min( + 0.5 * h / norm - 1, 0 + ) + criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len( + score_midpts + ) + criterion2 = score_with_dist_prior > 0 + if criterion1 and criterion2: + connection_candidate.append( + [ + i, + j, + score_with_dist_prior, + score_with_dist_prior + candA[i][2] + candB[j][2], + ] + ) + + connection_candidate = sorted( + connection_candidate, key=lambda x: x[2], reverse=True + ) + connection = np.zeros((0, 5)) + for c in range(len(connection_candidate)): + i, j, s = connection_candidate[c][0:3] + if i not in connection[:, 3] and j not in connection[:, 4]: + connection = np.vstack( + [connection, [candA[i][3], candB[j][3], s, i, j]] + ) + if len(connection) >= min(nA, nB): + break + + connection_all.append(connection) + else: + special_k.append(k) + connection_all.append([]) + + # last number in each row is the total parts number of that person + # the second last number in each row is the score of the overall configuration + subset = -1 * np.ones((0, 20)) + candidate = np.array([item for sublist in all_peaks for item in sublist]) + + for k in range(len(mapIdx)): + if k not in special_k: + partAs = connection_all[k][:, 0] + partBs = connection_all[k][:, 1] + indexA, indexB = np.array(limbSeq[k]) - 1 + + for i in range(len(connection_all[k])): # = 1:size(temp,1) + found = 0 + subset_idx = [-1, -1] + for j in range(len(subset)): # 1:size(subset,1): + if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]: + subset_idx[found] = j + found += 1 + + if found == 1: + j = subset_idx[0] + if subset[j][indexB] != partBs[i]: + subset[j][indexB] = partBs[i] + subset[j][-1] += 1 + subset[j][-2] += ( + candidate[partBs[i].astype(int), 2] + + connection_all[k][i][2] + ) + elif found == 2: # if found 2 and disjoint, merge them + j1, j2 = subset_idx + membership = ( + (subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int) + )[:-2] + if len(np.nonzero(membership == 2)[0]) == 0: # merge + subset[j1][:-2] += subset[j2][:-2] + 1 + subset[j1][-2:] += subset[j2][-2:] + subset[j1][-2] += connection_all[k][i][2] + subset = np.delete(subset, j2, 0) + else: # as like found == 1 + subset[j1][indexB] = partBs[i] + subset[j1][-1] += 1 + subset[j1][-2] += ( + candidate[partBs[i].astype(int), 2] + + connection_all[k][i][2] + ) + + # if find no partA in the subset, create a new subset + elif not found and k < 17: + row = -1 * np.ones(20) + row[indexA] = partAs[i] + row[indexB] = partBs[i] + row[-1] = 2 + row[-2] = ( + sum(candidate[connection_all[k][i, :2].astype(int), 2]) + + connection_all[k][i][2] + ) + subset = np.vstack([subset, row]) + # delete some rows of subset which has few parts occur + deleteIdx = [] + for i in range(len(subset)): + if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: + deleteIdx.append(i) + subset = np.delete(subset, deleteIdx, axis=0) + + # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts + # candidate: x, y, score, id + return candidate, subset + + +def draw_keypoints(image: Image, keypoints: np.ndarray, radius=1, alpha=1.0): + overlay = image.copy() + draw = PIL.ImageDraw.Draw(overlay) + confidence_threshold = 0.8 + for kp in keypoints: + x, y, v, i = kp + if v > confidence_threshold: + draw.ellipse( + ( + (int(x - radius), int(y - radius)), + (int(x + radius), int(y + radius)), + ), + outline=(0, 255, 0), + fill=(0, 255, 0), + ) + + return PIL.Image.blend(overlay, image, alpha) diff --git a/qai_hub_models/models/openpose/demo.py b/qai_hub_models/models/openpose/demo.py new file mode 100644 index 00000000..885d7631 --- /dev/null +++ b/qai_hub_models/models/openpose/demo.py @@ -0,0 +1,42 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse + +from qai_hub_models.models.openpose.app import OpenPoseApp +from qai_hub_models.models.openpose.model import MODEL_ASSET_VERSION, MODEL_ID, OpenPose +from qai_hub_models.utils.args import add_output_dir_arg +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.display import display_or_save_image + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "openpose_demo.png" +) + + +# Run OpenPose end-to-end on a sample image. +# The demo will display the input image with circles drawn over the estimated joint positions. +def main(is_test: bool = False): + # Demo parameters + parser = argparse.ArgumentParser() + parser.add_argument( + "--image", + type=str, + default=IMAGE_ADDRESS, + help="image file path or URL.", + ) + add_output_dir_arg(parser) + + args = parser.parse_args([] if is_test else None) + + # Load image & model + app = OpenPoseApp(OpenPose.from_pretrained()) + image = load_image(args.image) + pred_image = app.estimate_pose(image) + if not is_test: + display_or_save_image(pred_image, args.output_dir) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/openpose/export.py b/qai_hub_models/models/openpose/export.py new file mode 100644 index 00000000..8f29e78f --- /dev/null +++ b/qai_hub_models/models/openpose/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.openpose import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "openpose" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "openpose", + "OpenPose", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0,output_1", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0,output_1", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/openpose/info.yaml b/qai_hub_models/models/openpose/info.yaml new file mode 100644 index 00000000..1941c9ef --- /dev/null +++ b/qai_hub_models/models/openpose/info.yaml @@ -0,0 +1,35 @@ +name: OpenPose +# id must match with the model dir name in qai_hub_models +id: openpose +status: public +headline: Human pose estimation. +domain: Computer Vision +description: OpenPose is a machine learning model that estimates body and hand pose + in an image and returns location and confidence for each of 19 joints. +use_case: Pose Estimation +tags: [] +research_paper: https://arxiv.org/abs/1812.08008 +research_paper_title: 'OpenPose: Realtime Multi-Person 2D Pose Estimation using Part + Affinity Fields' +license: https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/LICENSE +source_repo: https://github.com/CMU-Perceptual-Computing-Lab/openpose +technical_details: + Model checkpoint: body_pose_model.pth + Input resolution: 240x320 + Number of parameters: 52.3M + Model size: 200 MB +applicable_scenarios: + - Injury prevention training + - Sports performance analysis + - Posture recognition +form_factors: + - Phone + - Tablet + - IoT +related_models: + - litehrnet + - mediapipe_pose +has_static_banner: yes +has_animated_banner: no +license_type: other +dataset: [] diff --git a/qai_hub_models/models/openpose/model.py b/qai_hub_models/models/openpose/model.py new file mode 100644 index 00000000..f2749ddf --- /dev/null +++ b/qai_hub_models/models/openpose/model.py @@ -0,0 +1,139 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Tuple + +import torch + +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, SourceAsRoot +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +OPENPOSE_SOURCE_REPOSITORY = "https://github.com/CMU-Perceptual-Computing-Lab/openpose" +OPENPOSE_SOURCE_REPO_COMMIT = "80d4c5f7b25ba4c3bf5745ab7d0e6ccd3db8b242" +OPENPOSE_PROXY_REPOSITORY = "https://github.com/Hzzone/pytorch-openpose" +OPENPOSE_PROXY_REPO_COMMIT = "5ee71dc10020403dc3def2bb68f9b77c40337ae2" +# Originally from https://drive.google.com/file/d/1EULkcH_hhSU28qVc1jSJpCh2hGOrzpjK/view +DEFAULT_WEIGHTS = "body_pose_model.pth" +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 + + +class OpenPose(BaseModel): + """Exportable OpenPose pose estimation""" + + def __init__( + self, + openpose_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = openpose_model + + @classmethod + def from_pretrained(cls, weights_path: str | None = None) -> OpenPose: + """Load OpenPose from a weightfile created by the source OpenPose repository.""" + + # Load PyTorch model from disk + openpose_model = _load_openpose_source_model_from_weights(weights_path) + + return cls(openpose_model) + + def forward(self, image: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Run OpenPose on `image`, and produce keypoints for pose estimation + + Parameters: + image: Pixel values for model consumption. + Range: float32[0-1] + 3-channel Color Space: RGB + Shape: 1xCxHxW + + Returns: + PAF: 1x38xH/8xW/8 (2x number of joints) + Range: float[0, 1] + 2-dimensional relations between different indices that represent body parts + heatmap: 1x19xH/8xW/8 (i value per joint per pixel) + Range: float[0, 1] + 2 dimensional heatmaps representing probabilities for each joint across the image + + The output width and height are downsampled from the input width and height by a factor of 8. + """ + + img_padded = image.squeeze().permute(1, 2, 0) + h = img_padded.shape[0] + w = img_padded.shape[1] + padValue = 128 + stride = 8 + pad = [ + 0, + 0, + 0 if (h % stride == 0) else stride - (h % stride), + 0 if (w % stride == 0) else stride - (w % stride), + ] + # Pad up + pad_up = torch.full((pad[0], w, 3), padValue, dtype=img_padded.dtype) + img_padded = torch.cat((pad_up, img_padded), dim=0) + + # Pad left + pad_left = torch.full((h, pad[1], 3), padValue, dtype=img_padded.dtype) + img_padded = torch.cat((pad_left, img_padded), dim=1) + + # Pad down + pad_down = torch.full((pad[2], w, 3), padValue, dtype=img_padded.dtype) + img_padded = torch.cat((img_padded, pad_down), dim=0) + + # Pad right + pad_right = torch.full((h, pad[3], 3), padValue, dtype=img_padded.dtype) + img_padded = torch.cat((img_padded, pad_right), dim=1) + + # reshape + im = img_padded.permute(2, 0, 1).unsqueeze(0) - 0.5 + + # Run the model + with torch.no_grad(): + paf, heatmap = self.model(im) + + return paf, heatmap + + def get_input_spec( + self, + batch_size: int = 1, + num_channels: int = 3, + height: int = 224, + width: int = 224, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} + + +def _load_openpose_source_model_from_weights( + weights_path_body: str | None = None, +) -> torch.nn.Module: + # Load OpenPose model from the source repository using the given weights. + + # OpenPose exists as a Caffe model or Windows binaries in the original repository. + # The proxy repository contains a pytorch implementation, converted from the caffe model + with SourceAsRoot( + OPENPOSE_PROXY_REPOSITORY, + OPENPOSE_PROXY_REPO_COMMIT, + MODEL_ID, + MODEL_ASSET_VERSION, + ): + # download the weights file + if not weights_path_body: + weights_path_body = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_WEIGHTS + ).fetch() + + # Import model files from pytorch openpose repo + from src.body import Body + + body_estimation = Body(weights_path_body) + + return body_estimation.model.eval() diff --git a/qai_hub_models/models/openpose/perf.yaml b/qai_hub_models/models/openpose/perf.yaml new file mode 100644 index 00000000..29999a3f --- /dev/null +++ b/qai_hub_models/models/openpose/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: OpenPose + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 11747.0 + throughput: 85.12811781731506 + estimated_peak_memory_range: + min: 229376 + max: 2462464 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 103 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 103 + job_id: jnp1nw3kg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 11820.0 + throughput: 84.60236886632826 + estimated_peak_memory_range: + min: 622592 + max: 241891488 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 187 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 187 + job_id: jvgddq0kg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:07:34.029953Z' diff --git a/qai_hub_models/models/openpose/requirements.txt b/qai_hub_models/models/openpose/requirements.txt new file mode 100644 index 00000000..ab4e628e --- /dev/null +++ b/qai_hub_models/models/openpose/requirements.txt @@ -0,0 +1,2 @@ +scipy +matplotlib diff --git a/qai_hub_models/models/openpose/test.py b/qai_hub_models/models/openpose/test.py new file mode 100644 index 00000000..644be16c --- /dev/null +++ b/qai_hub_models/models/openpose/test.py @@ -0,0 +1,35 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models.openpose.app import OpenPoseApp +from qai_hub_models.models.openpose.demo import IMAGE_ADDRESS +from qai_hub_models.models.openpose.demo import main as demo_main +from qai_hub_models.models.openpose.model import MODEL_ASSET_VERSION, MODEL_ID, OpenPose +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "openpose_output.png" +) + + +@skip_clone_repo_check +def test_openpose_app(): + image = load_image(IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + app = OpenPoseApp(OpenPose.from_pretrained()) + app_output_image = app.estimate_pose(image) + np.testing.assert_allclose( + np.asarray(app_output_image, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/quicksrnetlarge/README.md b/qai_hub_models/models/quicksrnetlarge/README.md new file mode 100644 index 00000000..31d3723e --- /dev/null +++ b/qai_hub_models/models/quicksrnetlarge/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [QuickSRNetLarge: Upscale images and remove image noise](https://aihub.qualcomm.com/models/quicksrnetlarge) + +QuickSRNet Large is designed for upscaling images on mobile platforms to sharpen in real-time. + +This is based on the implementation of QuickSRNetLarge found +[here](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/quicksrnet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/quicksrnetlarge). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.quicksrnetlarge.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.quicksrnetlarge.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of QuickSRNetLarge can be found + [here](https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf). + + +## References +* [QuickSRNet: Plain Single-Image Super-Resolution Architecture for Faster Inference on Mobile Platforms](https://arxiv.org/abs/2303.04336) +* [Source Model Implementation](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/quicksrnet) diff --git a/qai_hub_models/models/quicksrnetlarge/__init__.py b/qai_hub_models/models/quicksrnetlarge/__init__.py new file mode 100644 index 00000000..186e4441 --- /dev/null +++ b/qai_hub_models/models/quicksrnetlarge/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 + SuperResolutionApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import QuickSRNetLarge as Model # noqa: F401 diff --git a/qai_hub_models/models/quicksrnetlarge/demo.py b/qai_hub_models/models/quicksrnetlarge/demo.py new file mode 100644 index 00000000..77a29a77 --- /dev/null +++ b/qai_hub_models/models/quicksrnetlarge/demo.py @@ -0,0 +1,29 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo +from qai_hub_models.models.quicksrnetlarge.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + QuickSRNetLarge, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "quicksrnetlarge_demo.jpg" +) + + +# Run QuickSRNet end-to-end on a sample image. +# The demo will display an upscaled image +def main(is_test: bool = False): + super_resolution_demo( + model_cls=QuickSRNetLarge, + default_image=IMAGE_ADDRESS, + is_test=is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/quicksrnetlarge/export.py b/qai_hub_models/models/quicksrnetlarge/export.py new file mode 100644 index 00000000..d6e2239f --- /dev/null +++ b/qai_hub_models/models/quicksrnetlarge/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.quicksrnetlarge import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "quicksrnetlarge" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "quicksrnetlarge", + "QuickSRNetLarge", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/quicksrnetlarge/info.yaml b/qai_hub_models/models/quicksrnetlarge/info.yaml new file mode 100644 index 00000000..3bb825dc --- /dev/null +++ b/qai_hub_models/models/quicksrnetlarge/info.yaml @@ -0,0 +1,32 @@ +name: QuickSRNetLarge +# id must match with the model dir name in qai_hub_models +id: quicksrnetlarge +status: public +headline: Upscale images and remove image noise. +domain: Computer Vision +description: QuickSRNet Large is designed for upscaling images on mobile platforms + to sharpen in real-time. +use_case: Super Resolution +tags: [] +research_paper: https://arxiv.org/abs/2303.04336 +research_paper_title: 'QuickSRNet: Plain Single-Image Super-Resolution Architecture + for Faster Inference on Mobile Platforms' +license: https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf +source_repo: https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/quicksrnet +technical_details: + Model checkpoint: quicksrnet_large_4x_checkpoint_float32 + Input resolution: 128x128 + Number of parameters: 436K + Model size: 1.67 MB +applicable_scenarios: + - Virtual Real Estate Tours + - Gaming + - ARVR +form_factors: + - Phone + - Tablet +related_models: [xlsr, esrgan, quicksrnetlarge_quantized] +has_static_banner: yes +has_animated_banner: yes +license_type: other +dataset: [] diff --git a/qai_hub_models/models/quicksrnetlarge/model.py b/qai_hub_models/models/quicksrnetlarge/model.py new file mode 100644 index 00000000..609b9163 --- /dev/null +++ b/qai_hub_models/models/quicksrnetlarge/model.py @@ -0,0 +1,90 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.evaluators.superres_evaluator import SuperResolutionOutputEvaluator +from qai_hub_models.models._shared.quicksrnet.common import ( + _load_quicksrnet_source_model, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 2 +# Weights and config stored in S3 are sourced from +# https://github.com/quic/aimet-model-zoo/blob/develop/aimet_zoo_torch/quicksrnet/model/model_cards/quicksrnet_large_4x_w8a8.json +# https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/quicksrnet_large_4x_checkpoint_float32.pth.tar +QUICKSRNET_WEIGHTS = "quicksrnet_large_4x_checkpoint_float32.pth.tar" +SCALING_FACTOR = 4 +NUM_CHANNELS = 64 +NUM_INTERMEDIATE_LAYERS = 11 +USE_ITO_CONNECTION = True + + +class QuickSRNetLarge(BaseModel): + """Exportable QuickSRNet-Large upscaler, end-to-end.""" + + def __init__( + self, + quicksrnet_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = quicksrnet_model + + @classmethod + def from_pretrained(cls) -> QuickSRNetLarge: + model = _load_quicksrnet_source_model( + MODEL_ID, + MODEL_ASSET_VERSION, + SCALING_FACTOR, + NUM_CHANNELS, + NUM_INTERMEDIATE_LAYERS, + USE_ITO_CONNECTION, + ) + dst = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, QUICKSRNET_WEIGHTS + ).fetch() + checkpoint = torch.load(dst, map_location=torch.device("cpu")) + model.load_state_dict(checkpoint["state_dict"]) + model.eval() + + return cls(model) + + def get_evaluator(self) -> BaseEvaluator: + return SuperResolutionOutputEvaluator() + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run QuickSRNet-Large on `image`, and produce an upscaled image + + Parameters: + image: Pixel values pre-processed for model consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + image: Pixel values + Range: float[0, 1] + 3-channel Color Space: RGB + """ + + return self.model(image) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 128, + width: int = 128, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} diff --git a/qai_hub_models/models/quicksrnetlarge/perf.yaml b/qai_hub_models/models/quicksrnetlarge/perf.yaml new file mode 100644 index 00000000..1b0102ea --- /dev/null +++ b/qai_hub_models/models/quicksrnetlarge/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: QuickSRNetLarge + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 2532.0 + throughput: 394.9447077409163 + estimated_peak_memory_range: + min: 16384 + max: 8035880 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 28 + layers_on_gpu: 0 + layers_on_cpu: 3 + total_layers: 31 + job_id: jz57el4rp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 2106.0 + throughput: 474.8338081671415 + estimated_peak_memory_range: + min: 212992 + max: 76319976 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 32 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 32 + job_id: jqp4yd1lp + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:38:01.534196Z' diff --git a/qai_hub_models/models/quicksrnetlarge/test.py b/qai_hub_models/models/quicksrnetlarge/test.py new file mode 100644 index 00000000..66f6fc53 --- /dev/null +++ b/qai_hub_models/models/quicksrnetlarge/test.py @@ -0,0 +1,39 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models._shared.super_resolution.app import SuperResolutionApp +from qai_hub_models.models.quicksrnetlarge.demo import IMAGE_ADDRESS +from qai_hub_models.models.quicksrnetlarge.demo import main as demo_main +from qai_hub_models.models.quicksrnetlarge.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + QuickSRNetLarge, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_same, skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "quicksrnetlarge_demo_output.png" +) + + +@skip_clone_repo_check +def test_task(): + image = load_image(IMAGE_ADDRESS) + model = QuickSRNetLarge.from_pretrained() + app = SuperResolutionApp(model=model) + output_img = app.upscale_image(image)[0] + + expected_output_image = load_image(OUTPUT_IMAGE_ADDRESS) + assert_most_same( + np.asarray(expected_output_image, dtype=np.float32), + np.array(output_img).astype(np.float32), + diff_tol=0.01, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/quicksrnetmedium/README.md b/qai_hub_models/models/quicksrnetmedium/README.md new file mode 100644 index 00000000..0c2a24e4 --- /dev/null +++ b/qai_hub_models/models/quicksrnetmedium/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [QuickSRNetMedium: Upscale images and remove image noise](https://aihub.qualcomm.com/models/quicksrnetmedium) + +QuickSRNet Medium is designed for upscaling images on mobile platforms to sharpen in real-time. + +This is based on the implementation of QuickSRNetMedium found +[here](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/quicksrnet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/quicksrnetmedium). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.quicksrnetmedium.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.quicksrnetmedium.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of QuickSRNetMedium can be found + [here](https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf). + + +## References +* [QuickSRNet: Plain Single-Image Super-Resolution Architecture for Faster Inference on Mobile Platforms](https://arxiv.org/abs/2303.04336) +* [Source Model Implementation](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/quicksrnet) diff --git a/qai_hub_models/models/quicksrnetmedium/__init__.py b/qai_hub_models/models/quicksrnetmedium/__init__.py new file mode 100644 index 00000000..e3dd0c8f --- /dev/null +++ b/qai_hub_models/models/quicksrnetmedium/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 + SuperResolutionApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import QuickSRNetMedium as Model # noqa: F401 diff --git a/qai_hub_models/models/quicksrnetmedium/demo.py b/qai_hub_models/models/quicksrnetmedium/demo.py new file mode 100644 index 00000000..55e5d7a1 --- /dev/null +++ b/qai_hub_models/models/quicksrnetmedium/demo.py @@ -0,0 +1,29 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo +from qai_hub_models.models.quicksrnetmedium.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + QuickSRNetMedium, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "quicksrnetmedium_demo.jpg" +) + + +# Run QuickSRNet end-to-end on a sample image. +# The demo will display an upscaled image +def main(is_test: bool = False): + super_resolution_demo( + model_cls=QuickSRNetMedium, + default_image=IMAGE_ADDRESS, + is_test=is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/quicksrnetmedium/export.py b/qai_hub_models/models/quicksrnetmedium/export.py new file mode 100644 index 00000000..978d5e70 --- /dev/null +++ b/qai_hub_models/models/quicksrnetmedium/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.quicksrnetmedium import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "quicksrnetmedium" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "quicksrnetmedium", + "QuickSRNetMedium", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/quicksrnetmedium/info.yaml b/qai_hub_models/models/quicksrnetmedium/info.yaml new file mode 100644 index 00000000..aaa2691e --- /dev/null +++ b/qai_hub_models/models/quicksrnetmedium/info.yaml @@ -0,0 +1,32 @@ +name: QuickSRNetMedium +# id must match with the model dir name in qai_hub_models +id: quicksrnetmedium +status: public +headline: Upscale images and remove image noise. +domain: Computer Vision +description: QuickSRNet Medium is designed for upscaling images on mobile platforms + to sharpen in real-time. +use_case: Super Resolution +tags: [] +research_paper: https://arxiv.org/abs/2303.04336 +research_paper_title: 'QuickSRNet: Plain Single-Image Super-Resolution Architecture + for Faster Inference on Mobile Platforms' +license: https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf +source_repo: https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/quicksrnet +technical_details: + Model checkpoint: quicksrnet_medium_4x_checkpoint_float32 + Input resolution: 128x128 + Number of parameters: 61.0K + Model size: 244 KB +applicable_scenarios: + - Virtual Real Estate Tours + - Gaming + - ARVR +form_factors: + - Phone + - Tablet +related_models: [xlsr, esrgan, quicksrnetlarge] +has_static_banner: yes +has_animated_banner: yes +license_type: other +dataset: [] diff --git a/qai_hub_models/models/quicksrnetmedium/model.py b/qai_hub_models/models/quicksrnetmedium/model.py new file mode 100644 index 00000000..24c4345c --- /dev/null +++ b/qai_hub_models/models/quicksrnetmedium/model.py @@ -0,0 +1,90 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.evaluators.superres_evaluator import SuperResolutionOutputEvaluator +from qai_hub_models.models._shared.quicksrnet.common import ( + _load_quicksrnet_source_model, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 2 +# Weights and config stored in S3 are sourced from +# https://github.com/quic/aimet-model-zoo/blob/develop/aimet_zoo_torch/quicksrnet/model/model_cards/quicksrnet_medium_4x_w8a8.json +# https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/quicksrnet_medium_4x_checkpoint_float32.pth.tar +QUICKSRNET_WEIGHTS = "quicksrnet_medium_4x_checkpoint_float32.pth.tar" +SCALING_FACTOR = 4 +NUM_CHANNELS = 32 +NUM_INTERMEDIATE_LAYERS = 5 +USE_ITO_CONNECTION = False + + +class QuickSRNetMedium(BaseModel): + """Exportable QuickSRNet-Medium upscaler, end-to-end.""" + + def __init__( + self, + quicksrnet_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = quicksrnet_model + + @classmethod + def from_pretrained(cls) -> QuickSRNetMedium: + model = _load_quicksrnet_source_model( + MODEL_ID, + MODEL_ASSET_VERSION, + SCALING_FACTOR, + NUM_CHANNELS, + NUM_INTERMEDIATE_LAYERS, + USE_ITO_CONNECTION, + ) + dst = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, QUICKSRNET_WEIGHTS + ).fetch() + checkpoint = torch.load(dst, map_location=torch.device("cpu")) + model.load_state_dict(checkpoint["state_dict"]) + model.eval() + + return cls(model) + + def get_evaluator(self) -> BaseEvaluator: + return SuperResolutionOutputEvaluator() + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run QuickSRNet-Medium on `image`, and produce an upscaled image + + Parameters: + image: Pixel values pre-processed for model consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + image: Pixel values + Range: float[0, 1] + 3-channel Color Space: RGB + """ + + return self.model(image) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 128, + width: int = 128, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} diff --git a/qai_hub_models/models/quicksrnetmedium/perf.yaml b/qai_hub_models/models/quicksrnetmedium/perf.yaml new file mode 100644 index 00000000..5851a60e --- /dev/null +++ b/qai_hub_models/models/quicksrnetmedium/perf.yaml @@ -0,0 +1,137 @@ +models: +- name: QuickSRNetMedium + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-08T22:33:17.244157Z' + torchscript_onnx_tflite: + inference_time: 1407.0 + throughput: 710.7320540156361 + estimated_peak_memory_range: + min: 32768 + max: 8364248 + layer_info: + layers_on_npu: 14 + layers_on_gpu: 0 + layers_on_cpu: 3 + total_layers: 17 + precision: fp16 + primary_compute_unit: NPU + job_id: jvgd2x1z5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 992.0 + throughput: 1008.0645161290323 + estimated_peak_memory_range: + min: 217088 + max: 28908792 + layer_info: + layers_on_npu: 18 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 18 + precision: fp16 + primary_compute_unit: NPU + job_id: j1gllveeg + job_status: Passed + torchscript_onnx_ort_qnn_htp: + inference_time: 17078.0 + throughput: 58.55486590935707 + estimated_peak_memory_range: + min: 15241216 + max: 26970304 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 8 + total_layers: 8 + precision: fp32 + primary_compute_unit: CPU + job_id: j0pxxkv3p + job_status: Passed + torchscript_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + layer_info: + layers_on_npu: 'null' + layers_on_gpu: 'null' + layers_on_cpu: 'null' + total_layers: 'null' + precision: 'null' + primary_compute_unit: 'null' + job_id: 'null' + job_status: 'null' +aggregated: + supported_devices: + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 Pro + - Xiaomi 13 + - Xiaomi 13 Pro + supported_oses: + - Android + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-08T22:33:19.043922Z' + torchscript_onnx_tflite: + inference_time: 1407.0 + throughput: 710.7320540156361 + estimated_peak_memory_range: + min: 32768 + max: 8364248 + precision: fp16 + primary_compute_unit: NPU + job_status: Passed + torchscript_onnx_qnn: + inference_time: 992.0 + throughput: 1008.0645161290323 + estimated_peak_memory_range: + min: 217088 + max: 28908792 + precision: fp16 + primary_compute_unit: NPU + job_status: Passed + torchscript_onnx_ort_qnn_htp: + inference_time: 17078.0 + throughput: 58.55486590935707 + estimated_peak_memory_range: + min: 15241216 + max: 26970304 + precision: fp32 + primary_compute_unit: CPU + job_status: Passed + torchscript_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0.0 + max: 0.0 + precision: 'null' + primary_compute_unit: 'null' + job_status: 'null' diff --git a/qai_hub_models/models/quicksrnetmedium/test.py b/qai_hub_models/models/quicksrnetmedium/test.py new file mode 100644 index 00000000..aca388ad --- /dev/null +++ b/qai_hub_models/models/quicksrnetmedium/test.py @@ -0,0 +1,39 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models._shared.super_resolution.app import SuperResolutionApp +from qai_hub_models.models.quicksrnetmedium.demo import IMAGE_ADDRESS +from qai_hub_models.models.quicksrnetmedium.demo import main as demo_main +from qai_hub_models.models.quicksrnetmedium.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + QuickSRNetMedium, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_same, skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "quicksrnetmedium_demo_output.png" +) + + +@skip_clone_repo_check +def test_task(): + image = load_image(IMAGE_ADDRESS) + model = QuickSRNetMedium.from_pretrained() + app = SuperResolutionApp(model=model) + output_img = app.upscale_image(image)[0] + + expected_output_image = load_image(OUTPUT_IMAGE_ADDRESS) + assert_most_same( + np.asarray(expected_output_image, dtype=np.float32), + np.array(output_img).astype(np.float32), + diff_tol=0.01, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/quicksrnetsmall/README.md b/qai_hub_models/models/quicksrnetsmall/README.md new file mode 100644 index 00000000..4cf6e004 --- /dev/null +++ b/qai_hub_models/models/quicksrnetsmall/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [QuickSRNetSmall: Upscale images and remove image noise](https://aihub.qualcomm.com/models/quicksrnetsmall) + +QuickSRNet Small is designed for upscaling images on mobile platforms to sharpen in real-time. + +This is based on the implementation of QuickSRNetSmall found +[here](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/quicksrnet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/quicksrnetsmall). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.quicksrnetsmall.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.quicksrnetsmall.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of QuickSRNetSmall can be found + [here](https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf). + + +## References +* [QuickSRNet: Plain Single-Image Super-Resolution Architecture for Faster Inference on Mobile Platforms](https://arxiv.org/abs/2303.04336) +* [Source Model Implementation](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/quicksrnet) diff --git a/qai_hub_models/models/quicksrnetsmall/__init__.py b/qai_hub_models/models/quicksrnetsmall/__init__.py new file mode 100644 index 00000000..c84111f6 --- /dev/null +++ b/qai_hub_models/models/quicksrnetsmall/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 + SuperResolutionApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import QuickSRNetSmall as Model # noqa: F401 diff --git a/qai_hub_models/models/quicksrnetsmall/demo.py b/qai_hub_models/models/quicksrnetsmall/demo.py new file mode 100644 index 00000000..880f23bd --- /dev/null +++ b/qai_hub_models/models/quicksrnetsmall/demo.py @@ -0,0 +1,29 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo +from qai_hub_models.models.quicksrnetsmall.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + QuickSRNetSmall, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "quicksrnetsmall_demo.jpg" +) + + +# Run QuickSRNet end-to-end on a sample image. +# The demo will display an upscaled image +def main(is_test: bool = False): + super_resolution_demo( + model_cls=QuickSRNetSmall, + default_image=IMAGE_ADDRESS, + is_test=is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/quicksrnetsmall/export.py b/qai_hub_models/models/quicksrnetsmall/export.py new file mode 100644 index 00000000..080fc777 --- /dev/null +++ b/qai_hub_models/models/quicksrnetsmall/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.quicksrnetsmall import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "quicksrnetsmall" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "quicksrnetsmall", + "QuickSRNetSmall", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/quicksrnetsmall/info.yaml b/qai_hub_models/models/quicksrnetsmall/info.yaml new file mode 100644 index 00000000..91aff954 --- /dev/null +++ b/qai_hub_models/models/quicksrnetsmall/info.yaml @@ -0,0 +1,32 @@ +name: QuickSRNetSmall +# id must match with the model dir name in qai_hub_models +id: quicksrnetsmall +status: public +headline: Upscale images and remove image noise. +domain: Computer Vision +description: QuickSRNet Small is designed for upscaling images on mobile platforms + to sharpen in real-time. +use_case: Super Resolution +tags: [] +research_paper: https://arxiv.org/abs/2303.04336 +research_paper_title: 'QuickSRNet: Plain Single-Image Super-Resolution Architecture + for Faster Inference on Mobile Platforms' +license: https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf +source_repo: https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/quicksrnet +technical_details: + Model checkpoint: quicksrnet_small_4x_checkpoint_float32 + Input resolution: 128x128 + Number of parameters: 76.0M + Model size: 290 MB +applicable_scenarios: + - Virtual Real Estate Tours + - Gaming + - ARVR +form_factors: + - Phone + - Tablet +related_models: [xlsr, esrgan, quicksrnetlarge] +has_static_banner: yes +has_animated_banner: yes +license_type: other +dataset: [] diff --git a/qai_hub_models/models/quicksrnetsmall/model.py b/qai_hub_models/models/quicksrnetsmall/model.py new file mode 100644 index 00000000..92ad52d1 --- /dev/null +++ b/qai_hub_models/models/quicksrnetsmall/model.py @@ -0,0 +1,90 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.evaluators.superres_evaluator import SuperResolutionOutputEvaluator +from qai_hub_models.models._shared.quicksrnet.common import ( + _load_quicksrnet_source_model, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 2 +# Weights and config stored in S3 are sourced from +# https://github.com/quic/aimet-model-zoo/blob/develop/aimet_zoo_torch/quicksrnet/model/model_cards/quicksrnet_small_4x_w8a8.json +# https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/quicksrnet_small_4x_checkpoint_float32.pth.tar +QUICKSRNET_WEIGHTS = "quicksrnet_small_4x_checkpoint_float32.pth.tar" +SCALING_FACTOR = 4 +NUM_CHANNELS = 32 +NUM_INTERMEDIATE_LAYERS = 2 +USE_ITO_CONNECTION = False + + +class QuickSRNetSmall(BaseModel): + """Exportable QuickSRNet-Small upscaler, end-to-end.""" + + def __init__( + self, + quicksrnet_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = quicksrnet_model + + @classmethod + def from_pretrained(cls) -> QuickSRNetSmall: + model = _load_quicksrnet_source_model( + MODEL_ID, + MODEL_ASSET_VERSION, + SCALING_FACTOR, + NUM_CHANNELS, + NUM_INTERMEDIATE_LAYERS, + USE_ITO_CONNECTION, + ) + dst = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, QUICKSRNET_WEIGHTS + ).fetch() + checkpoint = torch.load(dst, map_location=torch.device("cpu")) + model.load_state_dict(checkpoint["state_dict"]) + model.eval() + + return cls(model) + + def get_evaluator(self) -> BaseEvaluator: + return SuperResolutionOutputEvaluator() + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run QuickSRNet-Small on `image`, and produce an upscaled image + + Parameters: + image: Pixel values pre-processed for model consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + image: Pixel values + Range: float[0, 1] + 3-channel Color Space: RGB + """ + + return self.model(image) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 128, + width: int = 128, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} diff --git a/qai_hub_models/models/quicksrnetsmall/perf.yaml b/qai_hub_models/models/quicksrnetsmall/perf.yaml new file mode 100644 index 00000000..3cf63319 --- /dev/null +++ b/qai_hub_models/models/quicksrnetsmall/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: QuickSRNetSmall + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1333.0 + throughput: 750.1875468867216 + estimated_peak_memory_range: + min: 16384 + max: 8022608 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 8 + layers_on_gpu: 0 + layers_on_cpu: 3 + total_layers: 11 + job_id: j1pvl9zr5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 1017.0 + throughput: 983.284169124877 + estimated_peak_memory_range: + min: 212992 + max: 64518392 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 12 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 12 + job_id: jep2rv3qg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-22T22:36:34.984329Z' diff --git a/qai_hub_models/models/quicksrnetsmall/test.py b/qai_hub_models/models/quicksrnetsmall/test.py new file mode 100644 index 00000000..d7645b6f --- /dev/null +++ b/qai_hub_models/models/quicksrnetsmall/test.py @@ -0,0 +1,39 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models._shared.super_resolution.app import SuperResolutionApp +from qai_hub_models.models.quicksrnetsmall.demo import IMAGE_ADDRESS +from qai_hub_models.models.quicksrnetsmall.demo import main as demo_main +from qai_hub_models.models.quicksrnetsmall.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + QuickSRNetSmall, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_same, skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "quicksrnetsmall_demo_output.png" +) + + +@skip_clone_repo_check +def test_task(): + image = load_image(IMAGE_ADDRESS) + model = QuickSRNetSmall.from_pretrained() + app = SuperResolutionApp(model=model) + output_img = app.upscale_image(image)[0] + + expected_output_image = load_image(OUTPUT_IMAGE_ADDRESS) + assert_most_same( + np.asarray(expected_output_image, dtype=np.float32), + np.array(output_img).astype(np.float32), + diff_tol=0.01, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/real_esrgan_general_x4v3/README.md b/qai_hub_models/models/real_esrgan_general_x4v3/README.md new file mode 100644 index 00000000..5729860d --- /dev/null +++ b/qai_hub_models/models/real_esrgan_general_x4v3/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Real-ESRGAN-General-x4v3: Upscale images and remove image noise](https://aihub.qualcomm.com/models/real_esrgan_general_x4v3) + +Real-ESRGAN is a machine learning model that upscales an image with minimal loss in quality. + +This is based on the implementation of Real-ESRGAN-General-x4v3 found +[here](https://github.com/xinntao/Real-ESRGAN/tree/master). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/real_esrgan_general_x4v3). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[real_esrgan_general_x4v3]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.real_esrgan_general_x4v3.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.real_esrgan_general_x4v3.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Real-ESRGAN-General-x4v3 can be found + [here](https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE). + + +## References +* [Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data](https://arxiv.org/abs/2107.10833) +* [Source Model Implementation](https://github.com/xinntao/Real-ESRGAN/tree/master) diff --git a/qai_hub_models/models/real_esrgan_general_x4v3/__init__.py b/qai_hub_models/models/real_esrgan_general_x4v3/__init__.py new file mode 100644 index 00000000..5d2485c0 --- /dev/null +++ b/qai_hub_models/models/real_esrgan_general_x4v3/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 + SuperResolutionApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import Real_ESRGAN_General_x4v3 as Model # noqa: F401 diff --git a/qai_hub_models/models/real_esrgan_general_x4v3/demo.py b/qai_hub_models/models/real_esrgan_general_x4v3/demo.py new file mode 100644 index 00000000..ae8541e7 --- /dev/null +++ b/qai_hub_models/models/real_esrgan_general_x4v3/demo.py @@ -0,0 +1,30 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo +from qai_hub_models.models.real_esrgan_general_x4v3.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + Real_ESRGAN_General_x4v3, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +WEIGHTS_HELP_MSG = "RealESRGAN checkpoint `.pth` name from the Real-ESRGAN repo. Can be set to any of the model names defined here: https://github.com/xinntao/Real-ESRGAN/blob/master/docs/model_zoo.md to automatically download the file instead." +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "real_esrgan_general_x4v3_demo.jpg" +) + + +# Run Real-ESRGAN end-to-end on a sample image. +# The demo will display a image with the predicted bounding boxes. +def main(is_test: bool = False): + super_resolution_demo( + model_cls=Real_ESRGAN_General_x4v3, + default_image=IMAGE_ADDRESS, + is_test=is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/real_esrgan_general_x4v3/export.py b/qai_hub_models/models/real_esrgan_general_x4v3/export.py new file mode 100644 index 00000000..78f52202 --- /dev/null +++ b/qai_hub_models/models/real_esrgan_general_x4v3/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.real_esrgan_general_x4v3 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "real_esrgan_general_x4v3" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "real_esrgan_general_x4v3", + "Real-ESRGAN-General-x4v3", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/real_esrgan_general_x4v3/info.yaml b/qai_hub_models/models/real_esrgan_general_x4v3/info.yaml new file mode 100644 index 00000000..f2dbd297 --- /dev/null +++ b/qai_hub_models/models/real_esrgan_general_x4v3/info.yaml @@ -0,0 +1,36 @@ +name: Real-ESRGAN-General-x4v3 +# id must match with the model dir name in qai_hub_models +id: real_esrgan_general_x4v3 +status: public +headline: Upscale images and remove image noise. +domain: Computer Vision +description: Real-ESRGAN is a machine learning model that upscales an image with minimal + loss in quality. +use_case: Super Resolution +tags: [] +research_paper: https://arxiv.org/abs/2107.10833 +research_paper_title: 'Real-ESRGAN: Training Real-World Blind Super-Resolution with + Pure Synthetic Data' +license: https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE +source_repo: https://github.com/xinntao/Real-ESRGAN/tree/master +technical_details: + Model checkpoint: realesr-general-x4v3 + Input resolution: 128x128 + Number of parameters: 1.21M + Model size: 4.66 MB +applicable_scenarios: + - Virtual Real Estate Tours + - Gaming + - ARVR +related_models: + - esrgan + - real_esrgan_x4plus +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: [] diff --git a/qai_hub_models/models/real_esrgan_general_x4v3/model.py b/qai_hub_models/models/real_esrgan_general_x4v3/model.py new file mode 100644 index 00000000..e74e63a8 --- /dev/null +++ b/qai_hub_models/models/real_esrgan_general_x4v3/model.py @@ -0,0 +1,141 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os + +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.evaluators.superres_evaluator import SuperResolutionOutputEvaluator +from qai_hub_models.utils.asset_loaders import SourceAsRoot +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +REALESRGAN_SOURCE_REPOSITORY = "https://github.com/xinntao/Real-ESRGAN" +REALESRGAN_SOURCE_REPO_COMMIT = "5ca1078535923d485892caee7d7804380bfc87fd" +REALESRGAN_SOURCE_VERSION = 1 +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 2 +DEFAULT_WEIGHTS = "realesr-general-x4v3" +PRE_PAD = 10 +SCALING_FACTOR = 4 + + +class Real_ESRGAN_General_x4v3(BaseModel): + """Exportable RealESRGAN upscaler, end-to-end.""" + + def __init__( + self, + realesrgan_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = realesrgan_model + + @classmethod + def from_pretrained( + cls, + weight_path: str = DEFAULT_WEIGHTS, + ) -> Real_ESRGAN_General_x4v3: + """Load Real_ESRGAN_General_x4v3 from a weightfile created by the source RealESRGAN repository.""" + + # Load PyTorch model from disk + realesrgan_model = _load_realesrgan_source_model_from_weights(weight_path) + + return Real_ESRGAN_General_x4v3(realesrgan_model) + + def get_evaluator(self) -> BaseEvaluator: + return SuperResolutionOutputEvaluator() + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run RealESRGAN on `image`, and produce an upscaled image + Parameters: + image: Pixel values pre-processed for GAN consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + Returns: + image: Pixel values + Range: float[0, 1] + 3-channel Color Space: RGB + """ + return self.model(image) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 128, + width: int = 128, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} + + +def _get_weightsfile_from_name(weights_name: str = DEFAULT_WEIGHTS): + """Convert from names of weights files to the url for the weights file""" + if weights_name == DEFAULT_WEIGHTS: + return "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth" + return "" + + +def _load_realesrgan_source_model_from_weights( + weights_name_or_path: str, +) -> torch.nn.Module: + with SourceAsRoot( + REALESRGAN_SOURCE_REPOSITORY, + REALESRGAN_SOURCE_REPO_COMMIT, + MODEL_ID, + REALESRGAN_SOURCE_VERSION, + ): + # Patch path for this load only, since the model source + # code references modules via a global scope. + # CWD should be the repository path now + realesrgan_repo_path = os.getcwd() + # The official repo omits this folder, which causes import issues + version_dir = os.path.join(realesrgan_repo_path, "realesrgan/version") + if not os.path.exists(version_dir): + os.makedirs(version_dir) + + if os.path.exists(os.path.expanduser(weights_name_or_path)): + weights_path = os.path.expanduser(weights_name_or_path) + else: + weights_path = os.path.join(os.getcwd(), weights_name_or_path + ".pth") + if not os.path.exists(weights_path): + # Load RealESRGAN model from the source repository using the given weights. + # Returns .realesrgan.archs.srvgg_arch + weights_url = _get_weightsfile_from_name(weights_name_or_path) + + # download the weights file + import requests + + response = requests.get(weights_url) + with open(weights_path, "wb") as file: + file.write(response.content) + print(f"Weights file downloaded as {weights_path}") + + # necessary import. `archs` comes from the realesrgan repo. + from realesrgan.archs.srvgg_arch import SRVGGNetCompact + + realesrgan_model = SRVGGNetCompact( + num_in_ch=3, + num_out_ch=3, + num_feat=64, + num_conv=32, + upscale=4, + act_type="prelu", + ) + pretrained_dict = torch.load(weights_path, map_location=torch.device("cpu")) + + if "params_ema" in pretrained_dict: + keyname = "params_ema" + else: + keyname = "params" + realesrgan_model.load_state_dict(pretrained_dict[keyname], strict=True) + + return realesrgan_model diff --git a/qai_hub_models/models/real_esrgan_general_x4v3/perf.yaml b/qai_hub_models/models/real_esrgan_general_x4v3/perf.yaml new file mode 100644 index 00000000..5305963f --- /dev/null +++ b/qai_hub_models/models/real_esrgan_general_x4v3/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Real-ESRGAN-General-x4v3 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 7168.0 + throughput: 139.50892857142858 + estimated_peak_memory_range: + min: 15761408 + max: 27106520 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 69 + layers_on_gpu: 0 + layers_on_cpu: 3 + total_layers: 72 + job_id: jmg9zy3qp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 6995.0 + throughput: 142.9592566118656 + estimated_peak_memory_range: + min: 45056 + max: 67127640 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 73 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 73 + job_id: jnp1nwdkg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:15:20.798589Z' diff --git a/qai_hub_models/models/real_esrgan_general_x4v3/requirements.txt b/qai_hub_models/models/real_esrgan_general_x4v3/requirements.txt new file mode 100644 index 00000000..80ca5630 --- /dev/null +++ b/qai_hub_models/models/real_esrgan_general_x4v3/requirements.txt @@ -0,0 +1,6 @@ +opencv-python +PyYAML +requests +scipy +seaborn +basicsr diff --git a/qai_hub_models/models/real_esrgan_general_x4v3/test.py b/qai_hub_models/models/real_esrgan_general_x4v3/test.py new file mode 100644 index 00000000..599f41b9 --- /dev/null +++ b/qai_hub_models/models/real_esrgan_general_x4v3/test.py @@ -0,0 +1,40 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models._shared.super_resolution.app import SuperResolutionApp +from qai_hub_models.models.real_esrgan_general_x4v3.demo import IMAGE_ADDRESS +from qai_hub_models.models.real_esrgan_general_x4v3.demo import main as demo_main +from qai_hub_models.models.real_esrgan_general_x4v3.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + Real_ESRGAN_General_x4v3, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "real_esrgan_general_x4v3_demo_output.png" +) + + +@skip_clone_repo_check +def test_realesrgan_app(): + image = load_image(IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + model = Real_ESRGAN_General_x4v3.from_pretrained() + app = SuperResolutionApp(model) + app_output_image = app.upscale_image(image)[0] + np.testing.assert_allclose( + np.asarray(app_output_image, dtype=np.float32), + np.asarray(output_image, dtype=np.float32), + rtol=0.02, + atol=1.5, + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/real_esrgan_x4plus/README.md b/qai_hub_models/models/real_esrgan_x4plus/README.md new file mode 100644 index 00000000..b4bb3d0d --- /dev/null +++ b/qai_hub_models/models/real_esrgan_x4plus/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Real-ESRGAN-x4plus: Upscale images and remove image noise](https://aihub.qualcomm.com/models/real_esrgan_x4plus) + +Real-ESRGAN is a machine learning model that upscales an image with minimal loss in quality. The implementation is a derivative of the Real-ESRGAN-x4plus architecture, a larger and more powerful version compared to the Real-ESRGAN-general-x4v3 architecture. + +This is based on the implementation of Real-ESRGAN-x4plus found +[here](https://github.com/xinntao/Real-ESRGAN). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/real_esrgan_x4plus). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[real_esrgan_x4plus]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.real_esrgan_x4plus.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.real_esrgan_x4plus.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Real-ESRGAN-x4plus can be found + [here](https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE). + + +## References +* [Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data](https://arxiv.org/abs/2107.10833) +* [Source Model Implementation](https://github.com/xinntao/Real-ESRGAN) diff --git a/qai_hub_models/models/real_esrgan_x4plus/__init__.py b/qai_hub_models/models/real_esrgan_x4plus/__init__.py new file mode 100644 index 00000000..eff072be --- /dev/null +++ b/qai_hub_models/models/real_esrgan_x4plus/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 + SuperResolutionApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import Real_ESRGAN_x4plus as Model # noqa: F401 diff --git a/qai_hub_models/models/real_esrgan_x4plus/demo.py b/qai_hub_models/models/real_esrgan_x4plus/demo.py new file mode 100644 index 00000000..5eeb17e2 --- /dev/null +++ b/qai_hub_models/models/real_esrgan_x4plus/demo.py @@ -0,0 +1,30 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo +from qai_hub_models.models.real_esrgan_x4plus.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + Real_ESRGAN_x4plus, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "real_esrgan_x4plus_demo.jpg" +) +WEIGHTS_HELP_MSG = "RealESRGAN checkpoint `.pth` name from the Real-ESRGAN repo. Can be set to any of the model names defined here: https://github.com/xinntao/Real-ESRGAN/blob/master/docs/model_zoo.md to automatically download the file instead." + + +# Run Real-ESRGAN end-to-end on a sample image. +# The demo will display a image with the predicted bounding boxes. +def main(is_test: bool = False): + super_resolution_demo( + model_cls=Real_ESRGAN_x4plus, + default_image=IMAGE_ADDRESS, + is_test=is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/real_esrgan_x4plus/export.py b/qai_hub_models/models/real_esrgan_x4plus/export.py new file mode 100644 index 00000000..b37ecdda --- /dev/null +++ b/qai_hub_models/models/real_esrgan_x4plus/export.py @@ -0,0 +1,181 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.real_esrgan_x4plus import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "real_esrgan_x4plus" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "real_esrgan_x4plus", + "Real-ESRGAN-x4plus", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=sample_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/real_esrgan_x4plus/info.yaml b/qai_hub_models/models/real_esrgan_x4plus/info.yaml new file mode 100644 index 00000000..3b9bc903 --- /dev/null +++ b/qai_hub_models/models/real_esrgan_x4plus/info.yaml @@ -0,0 +1,30 @@ +name: Real-ESRGAN-x4plus +# id must match with the model dir name in qai_hub_models +id: real_esrgan_x4plus +status: public +headline: Upscale images and remove image noise. +domain: Computer Vision +description: Real-ESRGAN is a machine learning model that upscales an image with minimal loss in quality. The implementation is a derivative of the Real-ESRGAN-x4plus architecture, a larger and more powerful version compared to the Real-ESRGAN-general-x4v3 architecture. +use_case: Super Resolution +tags: [] +research_paper: https://arxiv.org/abs/2107.10833 +research_paper_title: "Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data" +license: https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE +source_repo: https://github.com/xinntao/Real-ESRGAN +technical_details: + Number of parameters: 16.7M + Model size: 67.1 MB + Model checkpoint: RealESRGAN_x4plus + Input resolution: 128x128 +applicable_scenarios: + - Virtual Real Estate Tours + - Gaming + - ARVR +form_factors: + - Phone + - Tablet +related_models: ['esrgan', 'real_esrgan_general_x4v3'] +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: [] diff --git a/qai_hub_models/models/real_esrgan_x4plus/model.py b/qai_hub_models/models/real_esrgan_x4plus/model.py new file mode 100644 index 00000000..965ce49b --- /dev/null +++ b/qai_hub_models/models/real_esrgan_x4plus/model.py @@ -0,0 +1,136 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.evaluators.superres_evaluator import SuperResolutionOutputEvaluator +from qai_hub_models.utils.asset_loaders import ( + CachedWebModelAsset, + SourceAsRoot, + load_torch, +) +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +# The architecture for this RealESRGAN model comes from the original ESRGAN repo +REALESRGAN_SOURCE_REPOSITORY = "https://github.com/xinntao/ESRGAN" +REALESRGAN_SOURCE_REPO_COMMIT = "73e9b634cf987f5996ac2dd33f4050922398a921" +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 4 +DEFAULT_WEIGHTS = "RealESRGAN_x4plus" +DEFAULT_WEIGHTS_URL = CachedWebModelAsset( + "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth", + MODEL_ID, + MODEL_ASSET_VERSION, + "RealESRGAN_x4plus.pth", +) +PRE_PAD = 10 +SCALING_FACTOR = 4 + + +class Real_ESRGAN_x4plus(BaseModel): + """Exportable RealESRGAN upscaler, end-to-end.""" + + def __init__( + self, + realesrgan_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = realesrgan_model + + @classmethod + def from_pretrained( + cls, + weight_path: str = DEFAULT_WEIGHTS, + ) -> Real_ESRGAN_x4plus: + """Load RealESRGAN from a weightfile created by the source RealESRGAN repository.""" + + # Load PyTorch model from disk + realesrgan_model = _load_realesrgan_source_model_from_weights( + weight_path + ).eval() + + return cls(realesrgan_model) + + def get_evaluator(self) -> BaseEvaluator: + return SuperResolutionOutputEvaluator() + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run RealESRGAN on `image`, and produce an upscaled image + + Parameters: + image: Pixel values pre-processed for GAN consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + image: Pixel values + Range: float[0, 1] + 3-channel Color Space: RGB + """ + + with torch.no_grad(): + # upscale + output = self.model(image) + + output_img = output.squeeze().float().cpu().clamp_(0, 1) + + return output_img + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 128, + width: int = 128, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} + + +def _get_weightsfile_from_name(weights_name: str = DEFAULT_WEIGHTS): + """Convert from names of weights files to the url for the weights file""" + if weights_name == DEFAULT_WEIGHTS: + return DEFAULT_WEIGHTS_URL + return "" + + +def _load_realesrgan_source_model_from_weights(weights_name: str) -> torch.nn.Module: + # Load RealESRGAN model from the source repository using the given weights. + # Returns .realesrgan.archs.srvgg_arch + weights_url = _get_weightsfile_from_name(weights_name) + + with SourceAsRoot( + REALESRGAN_SOURCE_REPOSITORY, + REALESRGAN_SOURCE_REPO_COMMIT, + MODEL_ID, + MODEL_ASSET_VERSION, + ): + # necessary import. `archs` comes from the realesrgan repo. + from basicsr.archs.rrdbnet_arch import RRDBNet + + realesrgan_model = RRDBNet( + num_in_ch=3, + num_out_ch=3, + num_feat=64, + num_block=23, + num_grow_ch=32, + scale=SCALING_FACTOR, + ) + pretrained_dict = load_torch(weights_url) + + if "params_ema" in pretrained_dict: + keyname = "params_ema" + else: + keyname = "params" + realesrgan_model.load_state_dict(pretrained_dict[keyname], strict=True) + + return realesrgan_model diff --git a/qai_hub_models/models/real_esrgan_x4plus/perf.yaml b/qai_hub_models/models/real_esrgan_x4plus/perf.yaml new file mode 100644 index 00000000..d059c356 --- /dev/null +++ b/qai_hub_models/models/real_esrgan_x4plus/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Real-ESRGAN-x4plus + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 69426.0 + throughput: 14.40382565609426 + estimated_peak_memory_range: + min: 3272704 + max: 6458720 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 1028 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 1028 + job_id: jygzl8665 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 67244.0 + throughput: 14.87121527571233 + estimated_peak_memory_range: + min: 102400 + max: 106071688 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 1031 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 1031 + job_id: jygzljxz5 + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-22T18:50:48.142201Z' diff --git a/qai_hub_models/models/real_esrgan_x4plus/requirements.txt b/qai_hub_models/models/real_esrgan_x4plus/requirements.txt new file mode 100644 index 00000000..6292b978 --- /dev/null +++ b/qai_hub_models/models/real_esrgan_x4plus/requirements.txt @@ -0,0 +1,4 @@ +opencv-python +scipy +seaborn +basicsr diff --git a/qai_hub_models/models/real_esrgan_x4plus/test.py b/qai_hub_models/models/real_esrgan_x4plus/test.py new file mode 100644 index 00000000..905f473c --- /dev/null +++ b/qai_hub_models/models/real_esrgan_x4plus/test.py @@ -0,0 +1,39 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models._shared.super_resolution.app import SuperResolutionApp +from qai_hub_models.models.real_esrgan_x4plus.demo import IMAGE_ADDRESS +from qai_hub_models.models.real_esrgan_x4plus.demo import main as demo_main +from qai_hub_models.models.real_esrgan_x4plus.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + Real_ESRGAN_x4plus, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_same, skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "real_esrgan_x4plus_demo_output.png" +) + + +@skip_clone_repo_check +def test_task(): + image = load_image(IMAGE_ADDRESS) + model = Real_ESRGAN_x4plus.from_pretrained() + app = SuperResolutionApp(model=model) + output_img = app.upscale_image(image)[0] + + expected_output_image = load_image(OUTPUT_IMAGE_ADDRESS) + assert_most_same( + np.asarray(expected_output_image, dtype=np.float32), + np.array(output_img).astype(np.float32), + diff_tol=0.01, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/regnet/README.md b/qai_hub_models/models/regnet/README.md new file mode 100644 index 00000000..2a1a1082 --- /dev/null +++ b/qai_hub_models/models/regnet/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [RegNet: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/regnet) + +RegNet is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of RegNet found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/regnet). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.regnet.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.regnet.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of RegNet can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py) diff --git a/qai_hub_models/models/regnet/__init__.py b/qai_hub_models/models/regnet/__init__.py new file mode 100644 index 00000000..f10a7964 --- /dev/null +++ b/qai_hub_models/models/regnet/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import RegNet as Model # noqa: F401 diff --git a/qai_hub_models/models/regnet/demo.py b/qai_hub_models/models/regnet/demo.py new file mode 100644 index 00000000..3fe6310f --- /dev/null +++ b/qai_hub_models/models/regnet/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.regnet.model import RegNet + + +def main(is_test: bool = False): + imagenet_demo(RegNet, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/regnet/export.py b/qai_hub_models/models/regnet/export.py new file mode 100644 index 00000000..abc1ad63 --- /dev/null +++ b/qai_hub_models/models/regnet/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.regnet import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "regnet" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "regnet", + "RegNet", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/regnet/info.yaml b/qai_hub_models/models/regnet/info.yaml new file mode 100644 index 00000000..457a281f --- /dev/null +++ b/qai_hub_models/models/regnet/info.yaml @@ -0,0 +1,40 @@ +name: RegNet +# id must match with the model dir name in qai_hub_models +id: regnet +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: RegNet is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +use_case: Image Classification +tags: + - backbone +research_paper: https://arxiv.org/abs/2003.13678 +research_paper_title: Designing Network Design Spaces +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 15.3M + Model size: 58.3 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/regnet/model.py b/qai_hub_models/models/regnet/model.py new file mode 100644 index 00000000..a71cf873 --- /dev/null +++ b/qai_hub_models/models/regnet/model.py @@ -0,0 +1,18 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" +MODEL_ASSET_VERSION = 3 + + +class RegNet(ImagenetClassifier): + model_builder = tv_models.regnet_x_3_2gf + DEFAULT_WEIGHTS = DEFAULT_WEIGHTS diff --git a/qai_hub_models/models/regnet/perf.yaml b/qai_hub_models/models/regnet/perf.yaml new file mode 100644 index 00000000..896bd9cb --- /dev/null +++ b/qai_hub_models/models/regnet/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: RegNet + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1921.0 + throughput: 520.5622071837585 + estimated_peak_memory_range: + min: 16384 + max: 1931624 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 112 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 112 + job_id: jogk2q8og + job_status: Passed + torchscript_onnx_qnn: + inference_time: 1659.0 + throughput: 602.7727546714889 + estimated_peak_memory_range: + min: 237568 + max: 59498896 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 187 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 187 + job_id: jn5qlrvmp + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:36:39.546315Z' diff --git a/qai_hub_models/models/regnet/test.py b/qai_hub_models/models/regnet/test.py new file mode 100644 index 00000000..f6ebd563 --- /dev/null +++ b/qai_hub_models/models/regnet/test.py @@ -0,0 +1,30 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.regnet.demo import main as demo_main +from qai_hub_models.models.regnet.model import MODEL_ASSET_VERSION, MODEL_ID, RegNet + + +def test_task(): + run_imagenet_classifier_test( + RegNet.from_pretrained(), + MODEL_ID, + probability_threshold=0.45, + atol=0.2, + rtol=0.2, + asset_version=MODEL_ASSET_VERSION, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test(RegNet.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/resnet101/README.md b/qai_hub_models/models/resnet101/README.md new file mode 100644 index 00000000..f4701eef --- /dev/null +++ b/qai_hub_models/models/resnet101/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [ResNet101: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/resnet101) + +ResNet101 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of ResNet101 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/resnet101). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.resnet101.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.resnet101.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of ResNet101 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) diff --git a/qai_hub_models/models/resnet101/__init__.py b/qai_hub_models/models/resnet101/__init__.py new file mode 100644 index 00000000..95e6809b --- /dev/null +++ b/qai_hub_models/models/resnet101/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import ResNet101 as Model # noqa: F401 diff --git a/qai_hub_models/models/resnet101/demo.py b/qai_hub_models/models/resnet101/demo.py new file mode 100644 index 00000000..1b1a3524 --- /dev/null +++ b/qai_hub_models/models/resnet101/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.resnet101.model import ResNet101 + + +def main(is_test: bool = False): + imagenet_demo(ResNet101, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnet101/export.py b/qai_hub_models/models/resnet101/export.py new file mode 100644 index 00000000..7b7ea302 --- /dev/null +++ b/qai_hub_models/models/resnet101/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.resnet101 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "resnet101" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "resnet101", + "ResNet101", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnet101/info.yaml b/qai_hub_models/models/resnet101/info.yaml new file mode 100644 index 00000000..7d37336b --- /dev/null +++ b/qai_hub_models/models/resnet101/info.yaml @@ -0,0 +1,40 @@ +name: ResNet101 +# id must match with the model dir name in qai_hub_models +id: resnet101 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +use_case: Image Classification +description: ResNet101 is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +tags: + - backbone +research_paper: https://arxiv.org/abs/1512.03385 +research_paper_title: Deep Residual Learning for Image Recognition +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 44.5M + Model size: 170 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/resnet101/model.py b/qai_hub_models/models/resnet101/model.py new file mode 100644 index 00000000..4cc43210 --- /dev/null +++ b/qai_hub_models/models/resnet101/model.py @@ -0,0 +1,17 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class ResNet101(ImagenetClassifier): + model_builder = tv_models.resnet101 + DEFAULT_WEIGHTS = DEFAULT_WEIGHTS diff --git a/qai_hub_models/models/resnet101/perf.yaml b/qai_hub_models/models/resnet101/perf.yaml new file mode 100644 index 00000000..3baa5107 --- /dev/null +++ b/qai_hub_models/models/resnet101/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: ResNet101 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 3008.0 + throughput: 332.4468085106383 + estimated_peak_memory_range: + min: 28672 + max: 1505496 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 145 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 145 + job_id: jnp1nw6lg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 2895.0 + throughput: 345.4231433506045 + estimated_peak_memory_range: + min: 622592 + max: 226606408 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 244 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 244 + job_id: jvgddq2lg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:20:33.212112Z' diff --git a/qai_hub_models/models/resnet101/test.py b/qai_hub_models/models/resnet101/test.py new file mode 100644 index 00000000..f27696cd --- /dev/null +++ b/qai_hub_models/models/resnet101/test.py @@ -0,0 +1,30 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.resnet101.demo import main as demo_main +from qai_hub_models.models.resnet101.model import MODEL_ID, ResNet101 + + +def test_task(): + run_imagenet_classifier_test( + ResNet101.from_pretrained(), + MODEL_ID, + probability_threshold=0.45, + diff_tol=0.005, + rtol=0.02, + atol=0.02, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test(ResNet101.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/resnet101_quantized/README.md b/qai_hub_models/models/resnet101_quantized/README.md new file mode 100644 index 00000000..4f165dfb --- /dev/null +++ b/qai_hub_models/models/resnet101_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [ResNet101Quantized: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/resnet101_quantized) + +ResNet101 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of ResNet101Quantized found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/resnet101_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.resnet101_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.resnet101_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of ResNet101Quantized can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) diff --git a/qai_hub_models/models/resnet101_quantized/__init__.py b/qai_hub_models/models/resnet101_quantized/__init__.py new file mode 100644 index 00000000..fd2b29c7 --- /dev/null +++ b/qai_hub_models/models/resnet101_quantized/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import ResNet101Quantizable as Model # noqa: F401 diff --git a/qai_hub_models/models/resnet101_quantized/demo.py b/qai_hub_models/models/resnet101_quantized/demo.py new file mode 100644 index 00000000..a3c5dff7 --- /dev/null +++ b/qai_hub_models/models/resnet101_quantized/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.resnet101_quantized.model import ResNet101Quantizable + + +def main(is_test: bool = False): + imagenet_demo(ResNet101Quantizable, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnet101_quantized/export.py b/qai_hub_models/models/resnet101_quantized/export.py new file mode 100644 index 00000000..5ddadd22 --- /dev/null +++ b/qai_hub_models/models/resnet101_quantized/export.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.resnet101_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "resnet101_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "resnet101_quantized", + "ResNet101Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnet101_quantized/info.yaml b/qai_hub_models/models/resnet101_quantized/info.yaml new file mode 100644 index 00000000..e9f4491f --- /dev/null +++ b/qai_hub_models/models/resnet101_quantized/info.yaml @@ -0,0 +1,41 @@ +name: ResNet101Quantized +# id must match with the model dir name in qai_hub_models +id: resnet101_quantized +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +use_case: Image Classification +description: ResNet101 is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +tags: + - backbone + - quantized +research_paper: https://arxiv.org/abs/1512.03385 +research_paper_title: Deep Residual Learning for Image Recognition +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 44.5M + Model size: 43.9 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/resnet101_quantized/model.py b/qai_hub_models/models/resnet101_quantized/model.py new file mode 100644 index 00000000..1248f5a1 --- /dev/null +++ b/qai_hub_models/models/resnet101_quantized/model.py @@ -0,0 +1,81 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +# isort: off +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( + AIMETQuantizableMixin, +) + +# isort: on + +import torch +from aimet_torch.cross_layer_equalization import ( + equalize_bn_folded_model, + fold_all_batch_norms, +) +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models.resnet101.model import ResNet101 +from qai_hub_models.utils.aimet.config_loader import get_per_channel_aimet_config +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 3 +DEFAULT_ENCODINGS = "resnet101_quantized_encodings.json" + + +class ResNet101Quantizable(AIMETQuantizableMixin, ResNet101): + """ResNet101 with post train quantization support. + + Supports only 8 bit weights and activations, and only loads pre-quantized checkpoints. + Support for quantizing using your own weights & data will come at a later date.""" + + def __init__( + self, + sim_model: QuantizationSimModel, + ) -> None: + ResNet101.__init__(self, sim_model.model) + AIMETQuantizableMixin.__init__( + self, sim_model, needs_onnx_direct_aimet_export=False + ) + + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> "ResNet101Quantizable": + """ + Parameters: + aimet_encodings: + if "DEFAULT": Loads the model with aimet encodings calibrated on imagenette. + elif None: Doesn't load any encodings. Used when computing encodings. + else: Interprets as a filepath and loads the encodings stored there. + """ + model = ResNet101.from_pretrained() + input_shape = model.get_input_spec()["image_tensor"][0] + dummy_input = torch.rand(input_shape) + pairs = fold_all_batch_norms(model, input_shape, dummy_input) + equalize_bn_folded_model(model, input_shape, pairs, dummy_input) + + sim = QuantizationSimModel( + model.net, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=get_per_channel_aimet_config(), + dummy_input=torch.rand(input_shape), + ) + + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() + load_encodings_to_sim(sim, aimet_encodings) + + sim.model.eval() + return cls(sim) diff --git a/qai_hub_models/models/resnet101_quantized/perf.yaml b/qai_hub_models/models/resnet101_quantized/perf.yaml new file mode 100644 index 00000000..b8f30516 --- /dev/null +++ b/qai_hub_models/models/resnet101_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: ResNet101Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 74926.0 + throughput: 13.346501881856765 + estimated_peak_memory_range: + min: 151552 + max: 2762960 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 149 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 149 + job_id: joprl2nep + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:31:35.238685Z' diff --git a/qai_hub_models/models/resnet101_quantized/test.py b/qai_hub_models/models/resnet101_quantized/test.py new file mode 100644 index 00000000..fb9b6b7c --- /dev/null +++ b/qai_hub_models/models/resnet101_quantized/test.py @@ -0,0 +1,41 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.resnet101_quantized.demo import main as demo_main +from qai_hub_models.models.resnet101_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + ResNet101Quantizable, +) + + +def test_task(): + run_imagenet_classifier_test( + ResNet101Quantizable.from_pretrained(), + MODEL_ID, + probability_threshold=0.45, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + asset_version=MODEL_ASSET_VERSION, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test( + ResNet101Quantizable.from_pretrained(), + is_quantized=True, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + ) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/resnet18/README.md b/qai_hub_models/models/resnet18/README.md new file mode 100644 index 00000000..f80fd950 --- /dev/null +++ b/qai_hub_models/models/resnet18/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [ResNet18: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/resnet18) + +ResNet18 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of ResNet18 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/resnet18). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.resnet18.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.resnet18.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of ResNet18 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) diff --git a/qai_hub_models/models/resnet18/__init__.py b/qai_hub_models/models/resnet18/__init__.py new file mode 100644 index 00000000..1f435f6f --- /dev/null +++ b/qai_hub_models/models/resnet18/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import ResNet18 as Model # noqa: F401 diff --git a/qai_hub_models/models/resnet18/demo.py b/qai_hub_models/models/resnet18/demo.py new file mode 100644 index 00000000..8d7dcc1b --- /dev/null +++ b/qai_hub_models/models/resnet18/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.resnet18.model import ResNet18 + + +def main(is_test: bool = False): + imagenet_demo(ResNet18, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnet18/export.py b/qai_hub_models/models/resnet18/export.py new file mode 100644 index 00000000..66132e7a --- /dev/null +++ b/qai_hub_models/models/resnet18/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.resnet18 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "resnet18" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "resnet18", + "ResNet18", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnet18/info.yaml b/qai_hub_models/models/resnet18/info.yaml new file mode 100644 index 00000000..91be8429 --- /dev/null +++ b/qai_hub_models/models/resnet18/info.yaml @@ -0,0 +1,40 @@ +name: ResNet18 +# id must match with the model dir name in qai_hub_models +id: resnet18 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +use_case: Image Classification +description: ResNet18 is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +tags: + - backbone +research_paper: https://arxiv.org/abs/1512.03385 +research_paper_title: Deep Residual Learning for Image Recognition +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 11.7M + Model size: 44.6 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/resnet18/model.py b/qai_hub_models/models/resnet18/model.py new file mode 100644 index 00000000..ef618b19 --- /dev/null +++ b/qai_hub_models/models/resnet18/model.py @@ -0,0 +1,17 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class ResNet18(ImagenetClassifier): + model_builder = tv_models.resnet18 + DEFAULT_WEIGHTS = DEFAULT_WEIGHTS diff --git a/qai_hub_models/models/resnet18/perf.yaml b/qai_hub_models/models/resnet18/perf.yaml new file mode 100644 index 00000000..f58a8d78 --- /dev/null +++ b/qai_hub_models/models/resnet18/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: ResNet18 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1054.0 + throughput: 948.7666034155598 + estimated_peak_memory_range: + min: 12288 + max: 1722456 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 36 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 36 + job_id: j1p3z1xx5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 980.0 + throughput: 1020.4081632653061 + estimated_peak_memory_range: + min: 16384 + max: 84353688 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 52 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 52 + job_id: jwgolno4g + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:25:13.005640Z' diff --git a/qai_hub_models/models/resnet18/test.py b/qai_hub_models/models/resnet18/test.py new file mode 100644 index 00000000..591e93e3 --- /dev/null +++ b/qai_hub_models/models/resnet18/test.py @@ -0,0 +1,30 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.resnet18.demo import main as demo_main +from qai_hub_models.models.resnet18.model import MODEL_ID, ResNet18 + + +def test_task(): + run_imagenet_classifier_test( + ResNet18.from_pretrained(), + MODEL_ID, + probability_threshold=0.45, + diff_tol=0.005, + atol=0.02, + rtol=0.2, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test(ResNet18.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/resnet18_quantized/README.md b/qai_hub_models/models/resnet18_quantized/README.md new file mode 100644 index 00000000..5b2a2518 --- /dev/null +++ b/qai_hub_models/models/resnet18_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [ResNet18Quantized: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/resnet18_quantized) + +ResNet18 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of ResNet18Quantized found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/resnet18_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.resnet18_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.resnet18_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of ResNet18Quantized can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) diff --git a/qai_hub_models/models/resnet18_quantized/__init__.py b/qai_hub_models/models/resnet18_quantized/__init__.py new file mode 100644 index 00000000..2cfdb31c --- /dev/null +++ b/qai_hub_models/models/resnet18_quantized/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import ResNet18Quantizable as Model # noqa: F401 diff --git a/qai_hub_models/models/resnet18_quantized/demo.py b/qai_hub_models/models/resnet18_quantized/demo.py new file mode 100644 index 00000000..bc848d5c --- /dev/null +++ b/qai_hub_models/models/resnet18_quantized/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.resnet18_quantized.model import ResNet18Quantizable + + +def main(is_test: bool = False): + imagenet_demo(ResNet18Quantizable, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnet18_quantized/export.py b/qai_hub_models/models/resnet18_quantized/export.py new file mode 100644 index 00000000..47adb241 --- /dev/null +++ b/qai_hub_models/models/resnet18_quantized/export.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.resnet18_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "resnet18_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "resnet18_quantized", + "ResNet18Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnet18_quantized/info.yaml b/qai_hub_models/models/resnet18_quantized/info.yaml new file mode 100644 index 00000000..20fcdfc5 --- /dev/null +++ b/qai_hub_models/models/resnet18_quantized/info.yaml @@ -0,0 +1,41 @@ +name: ResNet18Quantized +# id must match with the model dir name in qai_hub_models +id: resnet18_quantized +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +use_case: Image Classification +description: ResNet18 is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +tags: + - backbone + - quantized +research_paper: https://arxiv.org/abs/1512.03385 +research_paper_title: Deep Residual Learning for Image Recognition +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 11.7M + Model size: 11.3 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/resnet18_quantized/model.py b/qai_hub_models/models/resnet18_quantized/model.py new file mode 100644 index 00000000..8a650fd3 --- /dev/null +++ b/qai_hub_models/models/resnet18_quantized/model.py @@ -0,0 +1,76 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +# isort: off +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( + AIMETQuantizableMixin, +) + +# isort: on + +import torch +from aimet_torch.cross_layer_equalization import equalize_model +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models.resnet18.model import ResNet18 +from qai_hub_models.utils.aimet.config_loader import get_per_channel_aimet_config +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 5 +DEFAULT_ENCODINGS = "resnet18_quantized_encodings.json" + + +class ResNet18Quantizable(AIMETQuantizableMixin, ResNet18): + """ResNet with post train quantization support. + + Supports only 8 bit weights and activations, and only loads pre-quantized checkpoints. + Support for quantizing using your own weights & data will come at a later date.""" + + def __init__( + self, + resnet18_model: QuantizationSimModel, + ) -> None: + ResNet18.__init__(self, resnet18_model.model) + AIMETQuantizableMixin.__init__( + self, resnet18_model, needs_onnx_direct_aimet_export=False + ) + + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> "ResNet18Quantizable": + """ + Parameters: + aimet_encodings: + if "DEFAULT": Loads the model with aimet encodings calibrated on imagenette. + elif None: Doesn't load any encodings. Used when computing encodings. + else: Interprets as a filepath and loads the encodings stored there. + """ + resnet18 = ResNet18.from_pretrained() + input_shape = resnet18.get_input_spec()["image_tensor"][0] + + equalize_model(resnet18, input_shape) + sim = QuantizationSimModel( + resnet18.net, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=get_per_channel_aimet_config(), + dummy_input=torch.rand(input_shape), + ) + + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() + load_encodings_to_sim(sim, aimet_encodings) + + sim.model.eval() + return cls(sim) diff --git a/qai_hub_models/models/resnet18_quantized/perf.yaml b/qai_hub_models/models/resnet18_quantized/perf.yaml new file mode 100644 index 00000000..ba508ccb --- /dev/null +++ b/qai_hub_models/models/resnet18_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: ResNet18Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 50502.0 + throughput: 19.801195992237933 + estimated_peak_memory_range: + min: 49152 + max: 14562768 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 40 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 40 + job_id: jegnzmrvg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:33:04.350551Z' diff --git a/qai_hub_models/models/resnet18_quantized/test.py b/qai_hub_models/models/resnet18_quantized/test.py new file mode 100644 index 00000000..88db0ac9 --- /dev/null +++ b/qai_hub_models/models/resnet18_quantized/test.py @@ -0,0 +1,41 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.resnet18_quantized.demo import main as demo_main +from qai_hub_models.models.resnet18_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + ResNet18Quantizable, +) + + +def test_task(): + run_imagenet_classifier_test( + ResNet18Quantizable.from_pretrained(), + MODEL_ID, + probability_threshold=0.45, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + asset_version=MODEL_ASSET_VERSION, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test( + ResNet18Quantizable.from_pretrained(), + diff_tol=0.007, + rtol=0.02, + atol=0.2, + is_quantized=True, + ) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/resnet50/README.md b/qai_hub_models/models/resnet50/README.md new file mode 100644 index 00000000..66d0a0e8 --- /dev/null +++ b/qai_hub_models/models/resnet50/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [ResNet50: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/resnet50) + +ResNet50 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of ResNet50 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/resnet50). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.resnet50.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.resnet50.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of ResNet50 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) diff --git a/qai_hub_models/models/resnet50/__init__.py b/qai_hub_models/models/resnet50/__init__.py new file mode 100644 index 00000000..5fd393ca --- /dev/null +++ b/qai_hub_models/models/resnet50/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import ResNet50 as Model # noqa: F401 diff --git a/qai_hub_models/models/resnet50/demo.py b/qai_hub_models/models/resnet50/demo.py new file mode 100644 index 00000000..81d4a995 --- /dev/null +++ b/qai_hub_models/models/resnet50/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.resnet50.model import ResNet50 + + +def main(is_test: bool = False): + imagenet_demo(ResNet50, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnet50/export.py b/qai_hub_models/models/resnet50/export.py new file mode 100644 index 00000000..c72917fc --- /dev/null +++ b/qai_hub_models/models/resnet50/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.resnet50 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "resnet50" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "resnet50", + "ResNet50", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnet50/info.yaml b/qai_hub_models/models/resnet50/info.yaml new file mode 100644 index 00000000..5f726eb5 --- /dev/null +++ b/qai_hub_models/models/resnet50/info.yaml @@ -0,0 +1,39 @@ +name: ResNet50 +# id must match with the model dir name in qai_hub_models +id: resnet50 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +use_case: Image Classification +description: ResNet50 is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +tags: + - backbone +research_paper: https://arxiv.org/abs/1512.03385 +research_paper_title: Deep Residual Learning for Image Recognition +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 25.5M + Model size: 97.4 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/resnet50/model.py b/qai_hub_models/models/resnet50/model.py new file mode 100644 index 00000000..eaf573f2 --- /dev/null +++ b/qai_hub_models/models/resnet50/model.py @@ -0,0 +1,17 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class ResNet50(ImagenetClassifier): + model_builder = tv_models.resnet50 + DEFAULT_WEIGHTS = DEFAULT_WEIGHTS diff --git a/qai_hub_models/models/resnet50/perf.yaml b/qai_hub_models/models/resnet50/perf.yaml new file mode 100644 index 00000000..f070af48 --- /dev/null +++ b/qai_hub_models/models/resnet50/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: ResNet50 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1904.0 + throughput: 525.2100840336135 + estimated_peak_memory_range: + min: 20480 + max: 2314168 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 77 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 77 + job_id: j1p8em6zp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 1768.0 + throughput: 565.6108597285067 + estimated_peak_memory_range: + min: 634880 + max: 186280024 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 125 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 125 + job_id: jogk2qoyg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:07:34.762219Z' diff --git a/qai_hub_models/models/resnet50/test.py b/qai_hub_models/models/resnet50/test.py new file mode 100644 index 00000000..ca60e960 --- /dev/null +++ b/qai_hub_models/models/resnet50/test.py @@ -0,0 +1,30 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.resnet50.demo import main as demo_main +from qai_hub_models.models.resnet50.model import MODEL_ID, ResNet50 + + +def test_task(): + run_imagenet_classifier_test( + ResNet50.from_pretrained(), + MODEL_ID, + probability_threshold=0.45, + diff_tol=0.005, + atol=0.02, + rtol=0.2, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test(ResNet50.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/resnext101/README.md b/qai_hub_models/models/resnext101/README.md new file mode 100644 index 00000000..f8dd7a49 --- /dev/null +++ b/qai_hub_models/models/resnext101/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [ResNeXt101: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/resnext101) + +ResNeXt101 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of ResNeXt101 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/resnext101). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.resnext101.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.resnext101.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of ResNeXt101 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/abs/1611.05431) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) diff --git a/qai_hub_models/models/resnext101/__init__.py b/qai_hub_models/models/resnext101/__init__.py new file mode 100644 index 00000000..dcb3f5df --- /dev/null +++ b/qai_hub_models/models/resnext101/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import ResNeXt101 as Model # noqa: F401 diff --git a/qai_hub_models/models/resnext101/demo.py b/qai_hub_models/models/resnext101/demo.py new file mode 100644 index 00000000..ffb7bc4d --- /dev/null +++ b/qai_hub_models/models/resnext101/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.resnext101.model import ResNeXt101 + + +def main(is_test: bool = False): + imagenet_demo(ResNeXt101, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnext101/export.py b/qai_hub_models/models/resnext101/export.py new file mode 100644 index 00000000..b6a88bfe --- /dev/null +++ b/qai_hub_models/models/resnext101/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.resnext101 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "resnext101" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "resnext101", + "ResNeXt101", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnext101/info.yaml b/qai_hub_models/models/resnext101/info.yaml new file mode 100644 index 00000000..e2662e67 --- /dev/null +++ b/qai_hub_models/models/resnext101/info.yaml @@ -0,0 +1,39 @@ +name: ResNeXt101 +# id must match with the model dir name in qai_hub_models +id: resnext101 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +use_case: Image Classification +description: ResNeXt101 is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +tags: + - backbone +research_paper: https://arxiv.org/abs/1611.05431 +research_paper_title: Aggregated Residual Transformations for Deep Neural Networks +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 88.7M + Model size: 338 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/resnext101/model.py b/qai_hub_models/models/resnext101/model.py new file mode 100644 index 00000000..108a5f1a --- /dev/null +++ b/qai_hub_models/models/resnext101/model.py @@ -0,0 +1,17 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class ResNeXt101(ImagenetClassifier): + model_builder = tv_models.resnext101_32x8d + DEFAULT_WEIGHTS = DEFAULT_WEIGHTS diff --git a/qai_hub_models/models/resnext101/perf.yaml b/qai_hub_models/models/resnext101/perf.yaml new file mode 100644 index 00000000..05f03000 --- /dev/null +++ b/qai_hub_models/models/resnext101/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: ResNeXt101 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 6434.0 + throughput: 155.4243083618278 + estimated_peak_memory_range: + min: 28672 + max: 2709368 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 145 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 145 + job_id: j1pvlr475 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 6146.0 + throughput: 162.70745200130165 + estimated_peak_memory_range: + min: 16384 + max: 38657672 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 244 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 244 + job_id: j7gjr217p + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:21:26.759411Z' diff --git a/qai_hub_models/models/resnext101/test.py b/qai_hub_models/models/resnext101/test.py new file mode 100644 index 00000000..257a15b6 --- /dev/null +++ b/qai_hub_models/models/resnext101/test.py @@ -0,0 +1,25 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.resnext101.demo import main as demo_main +from qai_hub_models.models.resnext101.model import MODEL_ID, ResNeXt101 + + +def test_task(): + run_imagenet_classifier_test( + ResNeXt101.from_pretrained(), MODEL_ID, atol=0.02, rtol=0.02, diff_tol=0.005 + ) + + +def test_trace(): + run_imagenet_classifier_trace_test(ResNeXt101.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/resnext101_quantized/README.md b/qai_hub_models/models/resnext101_quantized/README.md new file mode 100644 index 00000000..51d5dd6f --- /dev/null +++ b/qai_hub_models/models/resnext101_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [ResNeXt101Quantized: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/resnext101_quantized) + +ResNeXt101 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of ResNeXt101Quantized found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/resnext101_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.resnext101_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.resnext101_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of ResNeXt101Quantized can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/abs/1611.05431) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) diff --git a/qai_hub_models/models/resnext101_quantized/__init__.py b/qai_hub_models/models/resnext101_quantized/__init__.py new file mode 100644 index 00000000..07ea41cd --- /dev/null +++ b/qai_hub_models/models/resnext101_quantized/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import ResNeXt101Quantizable as Model # noqa: F401 diff --git a/qai_hub_models/models/resnext101_quantized/demo.py b/qai_hub_models/models/resnext101_quantized/demo.py new file mode 100644 index 00000000..51d4cde1 --- /dev/null +++ b/qai_hub_models/models/resnext101_quantized/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.resnext101_quantized.model import ResNeXt101Quantizable + + +def main(is_test: bool = False): + imagenet_demo(ResNeXt101Quantizable, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnext101_quantized/export.py b/qai_hub_models/models/resnext101_quantized/export.py new file mode 100644 index 00000000..cb8ccba0 --- /dev/null +++ b/qai_hub_models/models/resnext101_quantized/export.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.resnext101_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "resnext101_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "resnext101_quantized", + "ResNeXt101Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnext101_quantized/info.yaml b/qai_hub_models/models/resnext101_quantized/info.yaml new file mode 100644 index 00000000..8d169468 --- /dev/null +++ b/qai_hub_models/models/resnext101_quantized/info.yaml @@ -0,0 +1,41 @@ +name: ResNeXt101Quantized +# id must match with the model dir name in qai_hub_models +id: resnext101_quantized +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +use_case: Image Classification +description: ResNeXt101 is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +tags: + - backbone + - quantized +research_paper: https://arxiv.org/abs/1611.05431 +research_paper_title: Aggregated Residual Transformations for Deep Neural Networks +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 88.7M + Model size: 87.3 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/resnext101_quantized/model.py b/qai_hub_models/models/resnext101_quantized/model.py new file mode 100644 index 00000000..6a3bfa56 --- /dev/null +++ b/qai_hub_models/models/resnext101_quantized/model.py @@ -0,0 +1,76 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +# isort: off +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( + AIMETQuantizableMixin, +) + +# isort: on + +import torch +from aimet_torch.cross_layer_equalization import equalize_model +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models.resnext101.model import ResNeXt101 +from qai_hub_models.utils.aimet.config_loader import get_per_channel_aimet_config +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 3 +DEFAULT_ENCODINGS = "resnext101_quantized_encodings.json" + + +class ResNeXt101Quantizable(AIMETQuantizableMixin, ResNeXt101): + """ResNeXt101 with post train quantization support. + + Supports only 8 bit weights and activations, and only loads pre-quantized checkpoints. + Support for quantizing using your own weights & data will come at a later date.""" + + def __init__( + self, + sim_model: QuantizationSimModel, + ) -> None: + ResNeXt101.__init__(self, sim_model.model) + AIMETQuantizableMixin.__init__( + self, sim_model, needs_onnx_direct_aimet_export=False + ) + + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> "ResNeXt101Quantizable": + """ + Parameters: + aimet_encodings: + if "DEFAULT": Loads the model with aimet encodings calibrated on imagenette. + elif None: Doesn't load any encodings. Used when computing encodings. + else: Interprets as a filepath and loads the encodings stored there. + """ + model = ResNeXt101.from_pretrained() + input_shape = model.get_input_spec()["image_tensor"][0] + + equalize_model(model, input_shape) + sim = QuantizationSimModel( + model.net, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=get_per_channel_aimet_config(), + dummy_input=torch.rand(input_shape), + ) + + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() + load_encodings_to_sim(sim, aimet_encodings) + + sim.model.eval() + return cls(sim) diff --git a/qai_hub_models/models/resnext101_quantized/perf.yaml b/qai_hub_models/models/resnext101_quantized/perf.yaml new file mode 100644 index 00000000..0d14a5d4 --- /dev/null +++ b/qai_hub_models/models/resnext101_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: ResNeXt101Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 76378.0 + throughput: 13.092775406530677 + estimated_peak_memory_range: + min: 143360 + max: 3223784 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 149 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 149 + job_id: jmg9zy8qp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:24:55.190881Z' diff --git a/qai_hub_models/models/resnext101_quantized/test.py b/qai_hub_models/models/resnext101_quantized/test.py new file mode 100644 index 00000000..8beed1b8 --- /dev/null +++ b/qai_hub_models/models/resnext101_quantized/test.py @@ -0,0 +1,41 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.resnext101_quantized.demo import main as demo_main +from qai_hub_models.models.resnext101_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + ResNeXt101Quantizable, +) + + +def test_task(): + run_imagenet_classifier_test( + ResNeXt101Quantizable.from_pretrained(), + MODEL_ID, + probability_threshold=0.46, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + asset_version=MODEL_ASSET_VERSION, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test( + ResNeXt101Quantizable.from_pretrained(), + is_quantized=True, + diff_tol=0.007, + rtol=0.02, + atol=0.2, + ) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/resnext50/README.md b/qai_hub_models/models/resnext50/README.md new file mode 100644 index 00000000..60c0361d --- /dev/null +++ b/qai_hub_models/models/resnext50/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [ResNeXt50: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/resnext50) + +ResNeXt50 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of ResNeXt50 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/resnext50). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.resnext50.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.resnext50.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of ResNeXt50 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/abs/1611.05431) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) diff --git a/qai_hub_models/models/resnext50/__init__.py b/qai_hub_models/models/resnext50/__init__.py new file mode 100644 index 00000000..36ec9696 --- /dev/null +++ b/qai_hub_models/models/resnext50/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import ResNeXt50 as Model # noqa: F401 diff --git a/qai_hub_models/models/resnext50/demo.py b/qai_hub_models/models/resnext50/demo.py new file mode 100644 index 00000000..46e8761a --- /dev/null +++ b/qai_hub_models/models/resnext50/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.resnext50.model import ResNeXt50 + + +def main(is_test: bool = False): + imagenet_demo(ResNeXt50, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnext50/export.py b/qai_hub_models/models/resnext50/export.py new file mode 100644 index 00000000..633926fd --- /dev/null +++ b/qai_hub_models/models/resnext50/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.resnext50 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "resnext50" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "resnext50", + "ResNeXt50", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/resnext50/info.yaml b/qai_hub_models/models/resnext50/info.yaml new file mode 100644 index 00000000..a2e0e3c4 --- /dev/null +++ b/qai_hub_models/models/resnext50/info.yaml @@ -0,0 +1,39 @@ +name: ResNeXt50 +# id must match with the model dir name in qai_hub_models +id: resnext50 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: ResNeXt50 is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +use_case: Image Classification +tags: + - backbone +research_paper: https://arxiv.org/abs/1611.05431 +research_paper_title: Aggregated Residual Transformations for Deep Neural Networks +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 25.0M + Model size: 95.4 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/resnext50/model.py b/qai_hub_models/models/resnext50/model.py new file mode 100644 index 00000000..94a5239f --- /dev/null +++ b/qai_hub_models/models/resnext50/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V2" + + +class ResNeXt50(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.resnext50_32x4d(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/resnext50/perf.yaml b/qai_hub_models/models/resnext50/perf.yaml new file mode 100644 index 00000000..7801c578 --- /dev/null +++ b/qai_hub_models/models/resnext50/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: ResNeXt50 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 2118.0 + throughput: 472.14353163361665 + estimated_peak_memory_range: + min: 16384 + max: 2188056 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 77 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 77 + job_id: jep2r94xg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 2068.0 + throughput: 483.55899419729207 + estimated_peak_memory_range: + min: 16384 + max: 67185584 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 125 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 125 + job_id: jqpyojqr5 + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:07:32.076107Z' diff --git a/qai_hub_models/models/resnext50/test.py b/qai_hub_models/models/resnext50/test.py new file mode 100644 index 00000000..923fbdeb --- /dev/null +++ b/qai_hub_models/models/resnext50/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.resnext50.demo import main as demo_main +from qai_hub_models.models.resnext50.model import MODEL_ID, ResNeXt50 + + +def test_task(): + run_imagenet_classifier_test(ResNeXt50.from_pretrained(), MODEL_ID) + + +def test_trace(): + run_imagenet_classifier_trace_test(ResNeXt50.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/sam/README.md b/qai_hub_models/models/sam/README.md new file mode 100644 index 00000000..3c30d212 --- /dev/null +++ b/qai_hub_models/models/sam/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Segment-Anything-Model: High-quality segmentation mask generation around any object in an image with simple input prompt](https://aihub.qualcomm.com/models/sam) + +Transformer based encoder-decoder where prompts specify what to segment in an image thereby allowing segmentation without the need for additional training. The image encoder generates embeddings and the lightweight decoder operates on the embeddings for point and mask based image segmentation. + +This is based on the implementation of Segment-Anything-Model found +[here](https://github.com/facebookresearch/segment-anything). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/sam). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[sam]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.sam.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.sam.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Segment-Anything-Model can be found + [here](https://github.com/facebookresearch/segment-anything/blob/main/LICENSE). + + +## References +* [Segment Anything](https://arxiv.org/abs/2304.02643) +* [Source Model Implementation](https://github.com/facebookresearch/segment-anything) diff --git a/qai_hub_models/models/sam/__init__.py b/qai_hub_models/models/sam/__init__.py new file mode 100644 index 00000000..bd135ea7 --- /dev/null +++ b/qai_hub_models/models/sam/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import SAMApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import SAMQAIHMWrapper as Model # noqa: F401 diff --git a/qai_hub_models/models/sam/app.py b/qai_hub_models/models/sam/app.py new file mode 100644 index 00000000..74fd44b1 --- /dev/null +++ b/qai_hub_models/models/sam/app.py @@ -0,0 +1,133 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Tuple, no_type_check + +import numpy as np +import torch + +from qai_hub_models.models.sam.model import SAMQAIHMWrapper + + +class SAMApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with Segment-Anything Model. + + The app uses 2 models: + * encoder (Given input image, emmits image embeddings to be used by decoder) + * decoder (Lightweight decoder, modified to accept and work with fix image size) + + For a given image input, the app will: + * Prepare: Runs encoder on given image and creates and caches embeddings + * Generate masks: Uses cached embeddings and generate masks for given points + """ + + @no_type_check + def __init__(self, model: SAMQAIHMWrapper): + self.orig_img_size = None + self.image_embeddings = None + self.sam_qaihm_wrapper = model + self.sam_encoder = self.sam_qaihm_wrapper.get_sam_encoder() + self.sam_decoder = None + + def prepare(self, input_image: np.ndarray, single_mask_mode=True): + """ + Prepares App for segmentation of given input image + - Pre-processes input image + - Initiate Decoder with input image size + + Parameters: + input_image: np.ndarry + Input RGB image loaded as numpy array. + single_mask_mode: bool + Set decoder to return single mask for given points. + """ + if self.sam_encoder is None: + self.sam_encoder = self.sam_qaihm_wrapper.get_sam_encoder() + + preprocessed_image = self.sam_encoder.preprocess_input_image(input_image) + self.image_embeddings = self.sam_encoder(preprocessed_image) + + # Initialize decoder + self.orig_img_size = input_image.shape[:2] + self.sam_decoder = self.sam_qaihm_wrapper.get_sam_decoder( + self.orig_img_size, single_mask_mode + ) + + def reset(self): + """Reset app state""" + self.image_embeddings = None + self.orig_img_size = None + self.sam_decoder = None + + def preprocess_point_coordinates( + self, input_coords: np.ndarray, image_shape: Tuple[int, int] + ): + """Peprocesses Point coordinates to work with decoder""" + if self.sam_encoder is None: + raise RuntimeError("Encoder is not intialized. Please run `app.prepare`.") + return torch.Tensor( + self.sam_encoder.transforms.apply_coords(input_coords, image_shape) + ) + + def predict(self, *args, **kwargs): + # See generate_mask_from_points. + return self.generate_mask_from_points(*args, **kwargs) + + def generate_mask_from_points( + self, + point_coords: torch.Tensor, + point_labels: torch.Tensor, + ) -> torch.Tensor: + """ + Generate masks from given points + + Parameters: + point_coords: torch.Tensor of shape [k, 2] + Point coordinates from input image for segmentation + point_labels: torch.Tensor of shape [k] + Point Labels to select/de-select given point for segmentation + e.g. Corresponding value is 1 if this point is to be included, otherwise 0 + Returns: + upscaled_masks: torch.Tensor of shape [1, k, ] + score: torch.Tensor of shape [1, k] + masks: torch.Tensor of shape [1, k, 256, 256] + Use this low resolution masks to further slice and upscale for resolutions that Decoder is not intialized to. + + Where, + k = number of points + """ + if self.sam_decoder is None: + raise RuntimeError( + "Please call `prepare_from_image` or `prepare` before calling `segment`." + ) + + # Prepare inputs for decoder + # Preprocess point co-ordinates for decoder + point_coords = self.preprocess_point_coordinates( + np.expand_dims(np.array(point_coords), 0), self.orig_img_size + ) + point_labels = torch.Tensor(point_labels).unsqueeze(0) + mask_input = torch.zeros(self.sam_decoder.get_input_spec()["mask_input"][0]) + has_mask_input = torch.zeros((1,)) + + upscaled_masks, scores, masks = self.sam_decoder( + self.image_embeddings, + point_coords, + point_labels, + mask_input, + has_mask_input, + ) + + # Reduce noise from generated masks + upscaled_masks = self.postprocess_mask(upscaled_masks) + masks = self.postprocess_mask(masks) + + return upscaled_masks, scores, masks + + def postprocess_mask(self, generated_mask: torch.Tensor): + """Drop masks lower than threshold to minimize noise""" + return generated_mask > self.sam_qaihm_wrapper.get_sam().mask_threshold diff --git a/qai_hub_models/models/sam/demo.py b/qai_hub_models/models/sam/demo.py new file mode 100644 index 00000000..44bf5086 --- /dev/null +++ b/qai_hub_models/models/sam/demo.py @@ -0,0 +1,96 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse + +import numpy as np + +from qai_hub_models.models.sam.app import SAMApp +from qai_hub_models.models.sam.model import ( + DEFAULT_MODEL_TYPE, + MODEL_ASSET_VERSION, + MODEL_ID, + SMALL_MODEL_TYPE, + SAMQAIHMWrapper, +) +from qai_hub_models.models.sam.utils import show_image +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "truck.jpg" +) + + +# Run SAM end-to-end model on given image. +# The demo will output image with segmentation mask applied for input points +def main(is_test: bool = False): + # Demo parameters + parser = argparse.ArgumentParser() + parser.add_argument( + "--image", + type=str, + default=IMAGE_ADDRESS, + help="image file path or URL", + ) + parser.add_argument( + "--model-type", + type=str, + default=DEFAULT_MODEL_TYPE, + help=f"SAM model type to load. Tested with model type `{DEFAULT_MODEL_TYPE}`.", + ) + parser.add_argument( + "--point-coordinates", + type=str, + default="500,375;", + help="Comma separated x and y coordinate. Multiple coordinate separated by `;`." + " e.g. `x1,y1;x2,y2`. Default: `500,375;`", + ) + parser.add_argument( + "--single-mask-mode", + type=bool, + default=True, + help="If True, returns single mask. For multiple points multiple masks could lead to better results.", + ) + args = parser.parse_args(["--model-type", SMALL_MODEL_TYPE] if is_test else None) + + coordinates = list(filter(None, args.point_coordinates.split(";"))) + + # Load Application + app = SAMApp(SAMQAIHMWrapper.from_pretrained(model_type=args.model_type)) + + # Load Image + image = load_image(args.image) + image_data = np.asarray(image) + + # Prepare SAM for decoder for given input image: + # i.e. run SAM encoder to generate and cache image embeddings + app.prepare(image_data, single_mask_mode=args.single_mask_mode) + + # Point segmentation using decoder + print("\n** Performing point segmentation **\n") + + # Input points + input_coords = [] + input_labels = [] + + for coord in coordinates: + coord_split = coord.split(",") + if len(coord_split) != 2: + raise RuntimeError( + f"Expecting comma separated x and y coordinate. Provided {coord_split}." + ) + + input_coords.append([int(coord_split[0]), int(coord_split[1])]) + # Set label to `1` to include current point for segmentation + input_labels.append(1) + + # Generate masks with given input points + generated_mask, *_ = app.generate_mask_from_points(input_coords, input_labels) + + if not is_test: + show_image(image, generated_mask) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/sam/export.py b/qai_hub_models/models/sam/export.py new file mode 100644 index 00000000..01c81a28 --- /dev/null +++ b/qai_hub_models/models/sam/export.py @@ -0,0 +1,229 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Mapping, Optional, Tuple + +import qai_hub as hub +import torch +from torch.utils.mobile_optimizer import MobileOptimizerType, optimize_for_mobile + +from qai_hub_models.models.sam import Model +from qai_hub_models.utils.args import ( + export_parser, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, +) + +ALL_COMPONENTS = ["SAMDecoder", "SAMEncoder"] +DEFAULT_COMPONENTS = ["SAMDecoder"] + + +def export_model( + device: str = "Samsung Galaxy S23", + components: Optional[List[str]] = None, + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Mapping[ + str, Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] +] | List[str]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + components: List of sub-components of the model that will be exported. + Each component is compiled and profiled separately. + Defaults to ALL_COMPONENTS if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` + + Returns: + A Mapping from component_name to a 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "sam" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + component_arg = components + components = components or DEFAULT_COMPONENTS + for component in components: + if component not in ALL_COMPONENTS: + raise ValueError(f"Invalid component {component}.") + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "sam", + "Segment-Anything-Model", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + component_arg, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + components_dict = {} + if "SAMDecoder" in components: + components_dict["SAMDecoder"] = model.get_sam_decoder() + if "SAMEncoder" in components: + components_dict["SAMEncoder"] = model.get_sam_encoder() + + compile_jobs = {} + for component_name, component in components_dict.items(): + # Trace the model + input_spec = component.get_input_spec() + source_model = torch.jit.trace(component, make_torch_inputs(input_spec)) + + source_model = optimize_for_mobile( + source_model, + optimization_blocklist={ + MobileOptimizerType.HOIST_CONV_PACKED_PARAMS, + MobileOptimizerType.INSERT_FOLD_PREPACK_OPS, + MobileOptimizerType.CONV_BN_FUSION, + }, + ) + + # 2. Compile the models to an on-device asset + model_compile_options = component.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image --force_channel_last_output output_0", + ) + print(f"Optimizing model {component_name} to run on-device.") + compile_jobs[component_name] = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=f"{component_name}", + options=model_compile_options, + ) + + # 3. Profile the model assets on real devices + profile_jobs = {} + if not skip_profiling: + for component_name in components: + print(f"Profiling model {component_name} on a hosted device.") + profile_jobs[component_name] = hub.submit_profile_job( + model=compile_jobs[component_name].get_target_model(), + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_jobs = {} + if not skip_inferencing: + for component_name in components: + print( + f"Running inference for {component_name} on a hosted device with example inputs." + ) + sample_inputs = components_dict[component_name].sample_inputs() + inference_jobs[component_name] = hub.submit_inference_job( + model=compile_jobs[component_name].get_target_model(), + inputs=sample_inputs, + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 5. Download the model assets to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + for component_name, compile_job in compile_jobs.items(): + target_model = compile_job.get_target_model() + target_model.download( + str(output_path / f"{model_name}_{component_name}.tflite") + ) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + for component_name in components: + profile_job = profile_jobs[component_name] + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + for component_name in components: + inference_job = inference_jobs[component_name] + sample_inputs = components_dict[component_name].sample_inputs() + torch_out = torch_inference(components_dict[component_name], sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + return { + component_name: ( + compile_jobs[component_name], + profile_jobs.get(component_name, None), + inference_jobs.get(component_name, None), + ) + for component_name in components + } + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser( + model_cls=Model, components=ALL_COMPONENTS, supports_qnn=False + ) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/sam/info.yaml b/qai_hub_models/models/sam/info.yaml new file mode 100644 index 00000000..59b6b5c5 --- /dev/null +++ b/qai_hub_models/models/sam/info.yaml @@ -0,0 +1,35 @@ +name: Segment-Anything-Model +# id must match with the model dir name in qai_hub_models +id: sam +status: public +headline: High-quality segmentation mask generation around any object in an image + with simple input prompt. +domain: Computer Vision +use_case: Semantic Segmentation +description: Transformer based encoder-decoder where prompts specify what to segment + in an image thereby allowing segmentation without the need for additional training. + The image encoder generates embeddings and the lightweight decoder operates on the + embeddings for point and mask based image segmentation. +tags: + - foundation +research_paper: https://arxiv.org/abs/2304.02643 +research_paper_title: Segment Anything +license: https://github.com/facebookresearch/segment-anything/blob/main/LICENSE +source_repo: https://github.com/facebookresearch/segment-anything +technical_details: + Model checkpoint: vit_l + Input resolution: 720p (720x1280) + Number of parameters (SAMDecoder): 5.11M + Model size (SAMDecoder): 19.6 MB +applicable_scenarios: + - Factory Automation + - Robotic Navigation + - Camera +form_factors: + - Phone + - Tablet +related_models: [] +has_static_banner: yes +has_animated_banner: yes +license_type: apache-2.0 +dataset: [] diff --git a/qai_hub_models/models/sam/model.py b/qai_hub_models/models/sam/model.py new file mode 100644 index 00000000..1e9561c8 --- /dev/null +++ b/qai_hub_models/models/sam/model.py @@ -0,0 +1,291 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +import sys +import tempfile +from typing import Callable, Tuple + +import numpy as np +import torch + +from qai_hub_models.utils.asset_loaders import ( + CachedWebModelAsset, + load_path, + maybe_clone_git_repo, +) +from qai_hub_models.utils.base_model import BaseModel, CollectionModel +from qai_hub_models.utils.input_spec import InputSpec + +# This is a fork of https://github.com/facebookresearch/segment-anything +# with changes to make the SAM decoder traceable +SAM_SOURCE_REPO = "https://github.com/dmckinnon/segment-anything" +SAM_SOURCE_REPO_COMMIT = "0bc06e062ca883c2524bfa79061807c535eb0d51" +MODEL_ID = __name__.split(".")[-2] +DEFAULT_MODEL_TYPE = "vit_l" +SMALL_MODEL_TYPE = "vit_b" +MODEL_REGISTERY = { + "vit_b": "sam_vit_b_01ec64.pth", # 91M params + "vit_l": "sam_vit_l_0b3195.pth", # 308M params + "vit_h": "sam_vit_h_4b8939.pth", # 636M params +} +MODEL_ASSET_VERSION = 1 + + +class SAMQAIHMWrapper(CollectionModel): + """ + QAIHM version of segment-anything (https://github.com/dmckinnon/segment-anything) + + QAIHM fork modifies following from parent segment-anything repo: + 1. window_partition in encoder works on rank-5 tensor instead of rank-6 tensor + 2. SamOnnxModel accepts `orig_img_size` to use static upsample instead of dynamic upsample + """ + + def __init__( + self, + sam: torch.nn.Module, + sam_encoder: Callable, + SamOnnxModel, + ResizeLongestSide, + SamPredictor, + ): + self.sam = sam + self.sam_encoder = sam_encoder + self.SamOnnxModel = SamOnnxModel + self.ResizeLongestSide = ResizeLongestSide + self.SamPredictor = SamPredictor + + def get_sam(self) -> torch.nn.Module: + return self.sam + + def get_sam_encoder(self) -> Callable: + return self.sam_encoder + + # Create a new decoder + def get_sam_decoder( + self, orig_img_size: Tuple[int, int] = (720, 1280), single_mask_mode=True + ) -> Callable: + self.sam_decoder = SegmentAnythingONNXDecoder( + self, + single_mask_mode=single_mask_mode, + orig_img_size=orig_img_size, + ) + return self.sam_decoder + + @classmethod + def from_pretrained(cls, model_type: str = DEFAULT_MODEL_TYPE) -> SAMQAIHMWrapper: + ( + sam_model_registry, + SamOnnxModel, + ResizeLongestSide, + SamPredictor, + ) = _patch_sam_with_qaihm_modules() + sam = load_sam_model(sam_model_registry, model_type) + sam_encoder = SegmentAnythingEncoder(sam, ResizeLongestSide) + return cls(sam, sam_encoder, SamOnnxModel, ResizeLongestSide, SamPredictor) + + def __call__(self, image: torch.Tensor) -> torch.Tensor: + raise NotImplementedError("Cannot call SAMQAIHMWrapper directly") + + +class SegmentAnythingEncoder(BaseModel): + """Exportable SAM encoder""" + + def __init__( + self, + sam: torch.nn.Module, + ResizeLongestSide: Callable, + ) -> None: + super().__init__() + self.sam = sam + self.transforms = ResizeLongestSide(self.sam.image_encoder.img_size) + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run SAM Image encoder and returns image embeddings + + Parameters: + image: Pixel values pre-procewindow_partitionssed for encoder consumption. + Range: float[0, 255] normalized via preprocess_input_image + 3-channel Color Space: RGB + + Returns: + image_embeddings + """ + return self.sam.image_encoder(image) + + def get_input_spec( + self, + height: int = 720, + width: int = 1280, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + + preprocessed_image = self.preprocess_input_image( + np.ones((height, width, 3), dtype=np.uint8) + ) + return {"image": (preprocessed_image.shape, "float32")} + + def preprocess_input_image(self, input_image: np.ndarray): + """Transform input image to work with SAM encoder""" + transformed_image = torch.as_tensor( + self.transforms.apply_image(input_image) + ).type(torch.float32) + transformed_image = transformed_image.permute(2, 0, 1).contiguous()[ + None, :, :, : + ] + + self.input_size = transformed_image.shape[-2:] + self.original_size = input_image.shape[:2] + return self.sam.preprocess(transformed_image) + + @classmethod + def from_pretrained(cls): + return SAMQAIHMWrapper.from_pretrained().get_sam_encoder() + + +class SegmentAnythingONNXDecoder(BaseModel): + """Exportable SAM decoder""" + + def __init__( + self, + sam_qaihm_wrapper: SAMQAIHMWrapper, + orig_img_size: Tuple[int, int] = (720, 1280), + single_mask_mode: bool = True, + ) -> None: + super().__init__() + self.sam = sam_qaihm_wrapper.get_sam() + self.sam_decoder = sam_qaihm_wrapper.SamOnnxModel( + self.sam, orig_img_size=orig_img_size, return_single_mask=single_mask_mode + ) + self.transforms = sam_qaihm_wrapper.ResizeLongestSide( + self.sam.image_encoder.img_size + ) + + def forward( + self, + image_embeddings: torch.Tensor, + point_coords: torch.Tensor, + point_labels: torch.Tensor, + mask_input: torch.Tensor, + has_mask_input: torch.Tensor, + ) -> torch.Tensor: + """ + Run SAM lightweight decoder and return generated mask for given points + + Parameters: + image_embeddings: torch.Tensor of shape [1, emb_dim, emb_size, emb_size] + Image embeddings generated by Encoder + point_coords: torch.Tensor of shape [1, k, 2] + Point coordinates from input image for segmentation + point_labels: torch.Tensor of shape [1, k] + Point Labels to select/de-select given point for segmentation + e.g. Corresponding value is 1 if this point is to be included, otherwise 0 + mask_input: torch.Tensor of shape [1, 1, 4 * image_emd_size, 4 * image_emb_size] + Input mask to consider for segmentation. If using point based segmentation, set this to torch.zeros() + has_mask_input: torch.Tensor of shape [1] + If has value [1] then mask_input is used, otherwise no. + If using point based segmentation, can set this to [0] + + Returns: + upscaled_masks: torch.Tensor of shape [1, k, ] + score: torch.Tensor of shape [1, k] + masks: torch.Tensor of shape [1, k, 256, 256] + Use this low resolution masks to further slice and upscale for resolutions that Decoder is not intialized to. + + Where, + k = number of points + """ + return self.sam_decoder( + image_embeddings, point_coords, point_labels, mask_input, has_mask_input + ) + + def get_input_spec( + self, + num_of_points=1, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + embed_dim = self.sam.prompt_encoder.embed_dim + embed_size = self.sam.prompt_encoder.image_embedding_size + mask_input_size = [4 * x for x in embed_size] + + input_spec = { + "image_embeddings": ((1, embed_dim, *embed_size), "float32"), + "point_coords": ((1, num_of_points, 2), "float32"), + "point_labels": ((1, num_of_points), "float32"), + "mask_input": ((1, 1, *mask_input_size), "float32"), + "has_mask_input": ((1,), "float32"), + } + return input_spec + + @classmethod + def from_pretrained(cls): + return SAMQAIHMWrapper.from_pretrained().get_sam_decoder() + + +def _get_weights_url(model_type: str = DEFAULT_MODEL_TYPE): + """Convert from names of weights files to the url for the weights file""" + if model_type not in MODEL_REGISTERY.keys(): + raise RuntimeError(f"Weights not found for model type `{model_type}`.") + + return CachedWebModelAsset( + f"https://dl.fbaipublicfiles.com/segment_anything/{MODEL_REGISTERY[model_type]}", + MODEL_ID, + MODEL_ASSET_VERSION, + f"{MODEL_REGISTERY[model_type]}", + ) + + +def load_sam_model( + sam_model_registry, model_type: str = DEFAULT_MODEL_TYPE +) -> torch.nn.Module: + """Loads SAM model of given model type""" + weights_url = _get_weights_url(model_type) + with tempfile.TemporaryDirectory() as tmpdir: + weights_path = load_path(weights_url, tmpdir) + sam = sam_model_registry[model_type](weights_path) + sam.eval() + return sam + + +def _patch_sam_with_qaihm_modules(): + """ + Patches segment-anything with modifications + + Returns: + sam_model_registry: semgment_anything.sam_model_registry + dictionary of str (model_type) to callback to build respective model + SamOnnxModel: torch.nn.Module + light-weight decoder with fix image size + ResizeLongestSide: segment_anything.utils.transforms.ResizeLongestSide + Resizing utility updated to work with input image size + SamPredictor: segment_anything.SamPredictor + Python class wrapper to call image encoder - decoder + """ + sam_repo_path = maybe_clone_git_repo( + SAM_SOURCE_REPO, SAM_SOURCE_REPO_COMMIT, MODEL_ID, MODEL_ASSET_VERSION + ) + cwd = os.getcwd() + try: + # Patch path for this load only + sys.path.insert(0, sam_repo_path) + + # import required modules and utilities + from segment_anything import SamPredictor, sam_model_registry + from segment_anything.utils.onnx import SamOnnxModel + from segment_anything.utils.transforms import ResizeLongestSide + + return sam_model_registry, SamOnnxModel, ResizeLongestSide, SamPredictor + finally: + # Reset global state + os.chdir(cwd) + sys.path.remove(sam_repo_path) diff --git a/qai_hub_models/models/sam/perf.yaml b/qai_hub_models/models/sam/perf.yaml new file mode 100644 index 00000000..7d2b32ee --- /dev/null +++ b/qai_hub_models/models/sam/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: SAMDecoder + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 16696.0 + throughput: 59.89458552946814 + estimated_peak_memory_range: + min: 71995392 + max: 131856168 + primary_compute_unit: GPU + precision: fp16 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 356 + layers_on_cpu: 8 + total_layers: 364 + job_id: j1pvlewr5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-22T17:43:03.980523Z' diff --git a/qai_hub_models/models/sam/requirements.txt b/qai_hub_models/models/sam/requirements.txt new file mode 100644 index 00000000..116e68bc --- /dev/null +++ b/qai_hub_models/models/sam/requirements.txt @@ -0,0 +1,4 @@ +matplotlib +opencv_python +pycocotools +requests diff --git a/qai_hub_models/models/sam/test.py b/qai_hub_models/models/sam/test.py new file mode 100644 index 00000000..640a4286 --- /dev/null +++ b/qai_hub_models/models/sam/test.py @@ -0,0 +1,80 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import pytest +import torch + +from qai_hub_models.models.sam import App +from qai_hub_models.models.sam.demo import IMAGE_ADDRESS +from qai_hub_models.models.sam.demo import main as demo_main +from qai_hub_models.models.sam.model import SMALL_MODEL_TYPE, SAMQAIHMWrapper +from qai_hub_models.utils.asset_loaders import load_image +from qai_hub_models.utils.testing import skip_clone_repo_check_fixture # noqa: F401 + + +@pytest.fixture(scope="module") +def input_image_data() -> np.ndarray: + return np.asarray(load_image(IMAGE_ADDRESS)) + + +def test_e2e_numerical( + input_image_data: np.ndarray, + monkeypatch, + skip_clone_repo_check_fixture, +): + """Verify our driver produces the correct segmentation as source PyTorch model""" + monkeypatch.setattr("builtins.input", lambda: "y") + + sam_wrapper = SAMQAIHMWrapper.from_pretrained(SMALL_MODEL_TYPE) + sam_model = sam_wrapper.get_sam() + sam_predictor = sam_wrapper.SamPredictor(sam_model) + sam_decoder = sam_wrapper.SamOnnxModel( + sam_model, orig_img_size=input_image_data.shape[:2], return_single_mask=True + ) + + sam_predictor.set_image(input_image_data) + # QAIHM SAMApp for segmentation + sam_app = App(sam_wrapper) + # Prepare image for segmentation + sam_app.prepare(input_image_data) + + # Ensure image embeddings match with source model + np.allclose( + sam_predictor.features.detach().numpy(), + sam_app.image_embeddings.detach().numpy(), + ) + + # + # Verify Decoder output is correct + # + + # Create input for decoder + embed_size = sam_predictor.model.prompt_encoder.image_embedding_size + mask_input_size = [4 * x for x in embed_size] + decoder_inputs = { + "image_embeddings": sam_predictor.features.detach(), + "point_coords": torch.randint(low=0, high=500, size=(1, 2), dtype=torch.float), + "point_labels": torch.randint(low=0, high=4, size=(1,), dtype=torch.float), + "mask_input": torch.zeros(1, 1, *mask_input_size, dtype=torch.float), + "has_mask_input": torch.tensor([1], dtype=torch.float), + } + + # Perform inference for decoder models + obs_decoder_output = sam_app.generate_mask_from_points( + decoder_inputs["point_coords"], + decoder_inputs["point_labels"], + ) + + decoder_inputs["point_coords"] = decoder_inputs["point_coords"].unsqueeze(0) + decoder_inputs["point_labels"] = decoder_inputs["point_labels"].unsqueeze(0) + exp_decoder_output = sam_decoder(*decoder_inputs.values()) + + # Ensure segmentation upscaled mask, scores and low-res masks match with source model + for exp, obs in zip(exp_decoder_output, obs_decoder_output): + np.allclose(exp.detach().numpy(), obs.detach().numpy()) + + +def test_demo(skip_clone_repo_check_fixture): + demo_main(is_test=True) diff --git a/qai_hub_models/models/sam/utils.py b/qai_hub_models/models/sam/utils.py new file mode 100644 index 00000000..60c63a4d --- /dev/null +++ b/qai_hub_models/models/sam/utils.py @@ -0,0 +1,25 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import matplotlib.pyplot as plt +import numpy as np + + +## Helper routines +def show_image(image, masks=None): + """Show input image with mask applied""" + plt.figure(figsize=(10, 10)) + plt.imshow(image) + if masks is not None: + _show_mask(masks, plt.gca()) + plt.axis("off") + plt.show() + + +def _show_mask(mask, ax): + """Helper routine to add mask over existing plot""" + color = np.array([30 / 255, 144 / 255, 255 / 255, 0.6]) + h, w = mask.shape[-2:] + mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) + ax.imshow(mask_image) diff --git a/qai_hub_models/models/sesr_m5/README.md b/qai_hub_models/models/sesr_m5/README.md new file mode 100644 index 00000000..2cad4d05 --- /dev/null +++ b/qai_hub_models/models/sesr_m5/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [SESR-M5: Upscale images in real time](https://aihub.qualcomm.com/models/sesr_m5) + +SESR M5 performs efficient on-device upscaling of images. + +This is based on the implementation of SESR-M5 found +[here](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/sesr). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/sesr_m5). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.sesr_m5.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.sesr_m5.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of SESR-M5 can be found + [here](https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf). + + +## References +* [Collapsible Linear Blocks for Super-Efficient Super Resolution](https://arxiv.org/abs/2103.09404) +* [Source Model Implementation](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/sesr) diff --git a/qai_hub_models/models/sesr_m5/__init__.py b/qai_hub_models/models/sesr_m5/__init__.py new file mode 100644 index 00000000..93d6fcaa --- /dev/null +++ b/qai_hub_models/models/sesr_m5/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 + SuperResolutionApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import SESR_M5 as Model # noqa: F401 diff --git a/qai_hub_models/models/sesr_m5/demo.py b/qai_hub_models/models/sesr_m5/demo.py new file mode 100644 index 00000000..d8b0c0f9 --- /dev/null +++ b/qai_hub_models/models/sesr_m5/demo.py @@ -0,0 +1,25 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo +from qai_hub_models.models.sesr_m5.model import MODEL_ASSET_VERSION, MODEL_ID, SESR_M5 +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "sesr_m5_demo.jpg" +) + + +# Run QuickSRNet end-to-end on a sample image. +# The demo will display an upscaled image +def main(is_test: bool = False): + super_resolution_demo( + model_cls=SESR_M5, + default_image=IMAGE_ADDRESS, + is_test=is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/sesr_m5/export.py b/qai_hub_models/models/sesr_m5/export.py new file mode 100644 index 00000000..76a2ce93 --- /dev/null +++ b/qai_hub_models/models/sesr_m5/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.sesr_m5 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "sesr_m5" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "sesr_m5", + "SESR-M5", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/sesr_m5/info.yaml b/qai_hub_models/models/sesr_m5/info.yaml new file mode 100644 index 00000000..081f7b0b --- /dev/null +++ b/qai_hub_models/models/sesr_m5/info.yaml @@ -0,0 +1,32 @@ +name: SESR-M5 +# id must match with the model dir name in qai_hub_models +id: sesr_m5 +status: public +headline: Upscale images in real time. +domain: Computer Vision +use_case: Super Resolution +description: SESR M5 performs efficient on-device upscaling of images. +tags: [] +research_paper: https://arxiv.org/abs/2103.09404 +research_paper_title: Collapsible Linear Blocks for Super-Efficient Super Resolution +license: https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf +source_repo: https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/sesr +technical_details: + Model checkpoint: sesr_m5_4x_checkpoint_float32 + Input resolution: 128x128 + Number of parameters: 343K + Model size: 1.32 MB +applicable_scenarios: + - Virtual Real Estate Tours + - Gaming + - ARVR +form_factors: + - Phone + - Tablet +related_models: + - xlsr + - esrgan +has_static_banner: yes +has_animated_banner: yes +license_type: other +dataset: [] diff --git a/qai_hub_models/models/sesr_m5/model.py b/qai_hub_models/models/sesr_m5/model.py new file mode 100644 index 00000000..2b0b6ec8 --- /dev/null +++ b/qai_hub_models/models/sesr_m5/model.py @@ -0,0 +1,86 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.evaluators.superres_evaluator import SuperResolutionOutputEvaluator +from qai_hub_models.models._shared.sesr.common import _load_sesr_source_model +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 3 +# Weights and config stored in S3 are sourced from +# https://github.com/quic/aimet-model-zoo/blob/develop/aimet_zoo_torch/sesr/model/model_cards/sesr_m5_2x_w8a8.json +# https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/sesr_m5_2x_checkpoint_float32.pth.tar +SESR_WEIGHTS = "sesr_m5_4x_checkpoint_float32.pth.tar" +SCALING_FACTOR = 4 +NUM_CHANNELS = 16 +NUM_LBLOCKS = 5 + + +class SESR_M5(BaseModel): + """Exportable SESR M5 super resolution model, end-to-end.""" + + def __init__( + self, + sesr_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = sesr_model + + @classmethod + def from_pretrained(cls) -> SESR_M5: + model = _load_sesr_source_model( + MODEL_ID, + MODEL_ASSET_VERSION, + SCALING_FACTOR, + NUM_CHANNELS, + NUM_LBLOCKS, + ) + dst = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, SESR_WEIGHTS + ).fetch() + checkpoint = torch.load(dst, map_location=torch.device("cpu")) + model.load_state_dict(checkpoint["state_dict"]) + model.eval() + + return cls(model) + + def get_evaluator(self) -> BaseEvaluator: + return SuperResolutionOutputEvaluator() + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run SESR M5 on `image`, and produce an upscaled image + + Parameters: + image: Pixel values pre-processed for model consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + image: Pixel values + Range: float[0, 1] + 3-channel Color Space: RGB + """ + + return self.model(image) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 128, + width: int = 128, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} diff --git a/qai_hub_models/models/sesr_m5/perf.yaml b/qai_hub_models/models/sesr_m5/perf.yaml new file mode 100644 index 00000000..95e47b7b --- /dev/null +++ b/qai_hub_models/models/sesr_m5/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: SESR-M5 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 2214.0 + throughput: 451.6711833785005 + estimated_peak_memory_range: + min: 49152 + max: 8233656 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 22 + layers_on_gpu: 0 + layers_on_cpu: 3 + total_layers: 25 + job_id: jz5wl394p + job_status: Passed + torchscript_onnx_qnn: + inference_time: 2149.0 + throughput: 465.33271288971616 + estimated_peak_memory_range: + min: 212992 + max: 77434640 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 32 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 32 + job_id: jmg9zy4mp + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:36:38.760826Z' diff --git a/qai_hub_models/models/sesr_m5/test.py b/qai_hub_models/models/sesr_m5/test.py new file mode 100644 index 00000000..8412b7dc --- /dev/null +++ b/qai_hub_models/models/sesr_m5/test.py @@ -0,0 +1,38 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models._shared.super_resolution.app import SuperResolutionApp +from qai_hub_models.models.sesr_m5.demo import IMAGE_ADDRESS +from qai_hub_models.models.sesr_m5.demo import main as demo_main +from qai_hub_models.models.sesr_m5.model import MODEL_ASSET_VERSION, MODEL_ID, SESR_M5 +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_same, skip_clone_repo_check + +OUTPUT_IMAGE_LOCAL_PATH = "sesr_m5_demo_output.png" +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, OUTPUT_IMAGE_LOCAL_PATH +) + + +@skip_clone_repo_check +def test_task(): + image = load_image(IMAGE_ADDRESS) + model = SESR_M5.from_pretrained() + app = SuperResolutionApp(model=model) + output_img = app.upscale_image(image)[0] + + output_img.save("/local/mnt/workspace/sesr_m5_output.png") + + expected_output_image = load_image(OUTPUT_IMAGE_ADDRESS) + assert_most_same( + np.asarray(expected_output_image, dtype=np.float32), + np.array(output_img).astype(np.float32), + diff_tol=0.01, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/sesr_m5_quantized/README.md b/qai_hub_models/models/sesr_m5_quantized/README.md new file mode 100644 index 00000000..7b5358b3 --- /dev/null +++ b/qai_hub_models/models/sesr_m5_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [SESR-M5-Quantized: Upscale images in real time](https://aihub.qualcomm.com/models/sesr_m5_quantized) + +SESR M5 performs efficient on-device upscaling of images. + +This is based on the implementation of SESR-M5-Quantized found +[here](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/sesr). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/sesr_m5_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.sesr_m5_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.sesr_m5_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of SESR-M5-Quantized can be found + [here](https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf). + + +## References +* [Collapsible Linear Blocks for Super-Efficient Super Resolution](https://arxiv.org/abs/2103.09404) +* [Source Model Implementation](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/sesr) diff --git a/qai_hub_models/models/sesr_m5_quantized/__init__.py b/qai_hub_models/models/sesr_m5_quantized/__init__.py new file mode 100644 index 00000000..ef829a53 --- /dev/null +++ b/qai_hub_models/models/sesr_m5_quantized/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 + SuperResolutionApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import SESR_M5Quantizable as Model # noqa: F401 diff --git a/qai_hub_models/models/sesr_m5_quantized/demo.py b/qai_hub_models/models/sesr_m5_quantized/demo.py new file mode 100644 index 00000000..cb08ed1f --- /dev/null +++ b/qai_hub_models/models/sesr_m5_quantized/demo.py @@ -0,0 +1,29 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo +from qai_hub_models.models.sesr_m5_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + SESR_M5Quantizable, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.base_model import TargetRuntime + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "sesr_m5_quantized_demo.jpg" +) + + +def main(is_test: bool = False): + super_resolution_demo( + SESR_M5Quantizable, + default_image=IMAGE_ADDRESS, + is_test=is_test, + available_target_runtimes=[TargetRuntime.TFLITE], + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/sesr_m5_quantized/export.py b/qai_hub_models/models/sesr_m5_quantized/export.py new file mode 100644 index 00000000..f3cda7c2 --- /dev/null +++ b/qai_hub_models/models/sesr_m5_quantized/export.py @@ -0,0 +1,196 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.sesr_m5_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "sesr_m5_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "sesr_m5_quantized", + "SESR-M5-Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/sesr_m5_quantized/info.yaml b/qai_hub_models/models/sesr_m5_quantized/info.yaml new file mode 100644 index 00000000..b4ac2e2e --- /dev/null +++ b/qai_hub_models/models/sesr_m5_quantized/info.yaml @@ -0,0 +1,30 @@ +name: SESR-M5-Quantized +# id must match with the model dir name in qai_hub_models +id: sesr_m5_quantized +status: public +headline: Upscale images in real time. +domain: Computer Vision +use_case: Super Resolution +description: SESR M5 performs efficient on-device upscaling of images. +tags: [quantized] +research_paper: https://arxiv.org/abs/2103.09404 +research_paper_title: Collapsible Linear Blocks for Super-Efficient Super Resolution +license: https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf +source_repo: https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/sesr +technical_details: + Model checkpoint: sesr_m5_4x_checkpoint_int8 + Input resolution: 128x128 + Number of parameters: 32.3K + Model size: 45.9 KB +applicable_scenarios: + - Virtual Real Estate Tours + - Gaming + - ARVR +form_factors: + - Phone + - Tablet +related_models: [xlsr, xlsr_quantized, quicksrnetlarge] +has_static_banner: yes +has_animated_banner: yes +license_type: other +dataset: [] diff --git a/qai_hub_models/models/sesr_m5_quantized/model.py b/qai_hub_models/models/sesr_m5_quantized/model.py new file mode 100644 index 00000000..c3cf1b57 --- /dev/null +++ b/qai_hub_models/models/sesr_m5_quantized/model.py @@ -0,0 +1,99 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +from aimet_torch.cross_layer_equalization import equalize_model +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models._shared.sesr.common import _load_sesr_source_model +from qai_hub_models.models.sesr_m5.model import ( + NUM_CHANNELS, + NUM_LBLOCKS, + SCALING_FACTOR, + SESR_M5, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( # isort: skip + AIMETQuantizableMixin, +) + + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 2 + +# Weights and config stored in S3 are sourced from +# https://github.com/quic/aimet-model-zoo/blob/develop/aimet_zoo_torch/sesr/model/model_cards/sesr_m5_4x_w8a8.json: +# https://github.com/quic/aimet-model-zoo/releases/download/phase_2_january_artifacts/sesr_m5_4x_checkpoint_int8.pth +# and +# https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.js +# Encodings were generated with AIMET QuantSim library +QUANTIZED_WEIGHTS = "sesr_m5_4x_checkpoint_int8.pth" +AIMET_ENCODINGS = "sesr_m5_quantized_encodings.json" +AIMET_CONFIG = "default_config_per_channel.json" + + +class SESR_M5Quantizable(AIMETQuantizableMixin, SESR_M5): + """QuickSRNetLarge with post train quantization support. + + Supports only 8 bit weights and activations, and only loads pre-quantized checkpoints. + Support for quantizing using your own weights & data will come at a later date.""" + + def __init__( + self, + sesr_model: QuantizationSimModel, + ) -> None: + SESR_M5.__init__(self, sesr_model.model) + AIMETQuantizableMixin.__init__( + self, sesr_model, needs_onnx_direct_aimet_export=False + ) + + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> SESR_M5Quantizable: + # Load Model + sesr = _load_sesr_source_model( + MODEL_ID, MODEL_ASSET_VERSION, SCALING_FACTOR, NUM_CHANNELS, NUM_LBLOCKS + ) + input_shape = SESR_M5.get_input_spec()["image"][0] + equalize_model(sesr, input_shape) + + # Download weights and quantization parameters + weights = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, QUANTIZED_WEIGHTS + ).fetch() + aimet_config = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, AIMET_CONFIG + ).fetch() + + # Load the model weights and quantization parameters + state_dict = torch.load(weights, map_location=torch.device("cpu"))["state_dict"] + # Here we collapse before loading the quantized weights. + # The model is collapsed pre-quantization - see + # https://github.com/quic/aimet-model-zoo/blob/d09d2b0404d10f71a7640a87e9d5e5257b028802/aimet_zoo_torch/common/super_resolution/models.py#L110 + sesr.collapse() + sesr.load_state_dict(state_dict) + sim = QuantizationSimModel( + sesr, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=aimet_config, + dummy_input=torch.rand(input_shape), + ) + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, AIMET_ENCODINGS + ).fetch() + load_encodings_to_sim(sim, aimet_encodings) + + sim.model.eval() + + return cls(sim) diff --git a/qai_hub_models/models/sesr_m5_quantized/perf.yaml b/qai_hub_models/models/sesr_m5_quantized/perf.yaml new file mode 100644 index 00000000..14a85ccd --- /dev/null +++ b/qai_hub_models/models/sesr_m5_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: SESR-M5-Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1743.0 + throughput: 573.7234652897304 + estimated_peak_memory_range: + min: 24576 + max: 2845656 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 13 + layers_on_gpu: 0 + layers_on_cpu: 3 + total_layers: 16 + job_id: jz5wl31jp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:34:35.502394Z' diff --git a/qai_hub_models/models/sesr_m5_quantized/test.py b/qai_hub_models/models/sesr_m5_quantized/test.py new file mode 100644 index 00000000..46c55138 --- /dev/null +++ b/qai_hub_models/models/sesr_m5_quantized/test.py @@ -0,0 +1,87 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os +import tempfile +import zipfile + +import numpy as np +import torch + +from qai_hub_models.models._shared.super_resolution.app import SuperResolutionApp +from qai_hub_models.models.sesr_m5_quantized.demo import IMAGE_ADDRESS +from qai_hub_models.models.sesr_m5_quantized.demo import main as demo_main +from qai_hub_models.models.sesr_m5_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + SESR_M5Quantizable, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_close, skip_clone_repo_check + +OUTPUT_IMAGE_LOCAL_PATH = "sesr_m5_quantized_demo_output.png" +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, OUTPUT_IMAGE_LOCAL_PATH +) + + +@skip_clone_repo_check +def test_task(): + # AIMET Quantization Simulator introduces randomness. Eliminate that for this test. + torch.manual_seed(0) + image = load_image(IMAGE_ADDRESS) + model = SESR_M5Quantizable.from_pretrained() + app = SuperResolutionApp(model=model) + app_output_image = app.predict(image)[0] + + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + assert_most_close( + np.asarray(app_output_image, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_trace(): + image = load_image(IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + app = SuperResolutionApp( + SESR_M5Quantizable.from_pretrained().convert_to_quantized_torchscript() + ) + app_output_image = app.predict(image)[0] + + assert_most_close( + np.asarray(app_output_image, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_aimet_export(): + model = SESR_M5Quantizable.from_pretrained() + name = model.__class__.__name__ + with tempfile.TemporaryDirectory() as tmpdir: + output_zip = model.convert_to_onnx_and_aimet_encodings( + tmpdir, + model.get_input_spec(), + ) + assert os.path.exists(output_zip) + with zipfile.ZipFile(output_zip, "r") as zip: + assert f"{name}.aimet/" in zip.namelist() + assert f"{name}.aimet/{name}.encodings" in zip.namelist() + assert f"{name}.aimet/{name}.onnx" in zip.namelist() + assert len(zip.filelist) == 3 + + # No test of torchscipt and aimet encodings due to #8954 + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/shufflenet_v2/README.md b/qai_hub_models/models/shufflenet_v2/README.md new file mode 100644 index 00000000..41addb61 --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Shufflenet-v2: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/shufflenet_v2) + +ShufflenetV2 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of Shufflenet-v2 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/shufflenet_v2). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.shufflenet_v2.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.shufflenet_v2.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Shufflenet-v2 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design](https://arxiv.org/abs/1807.11164) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py) diff --git a/qai_hub_models/models/shufflenet_v2/__init__.py b/qai_hub_models/models/shufflenet_v2/__init__.py new file mode 100644 index 00000000..19450f19 --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import ShufflenetV2 as Model # noqa: F401 diff --git a/qai_hub_models/models/shufflenet_v2/demo.py b/qai_hub_models/models/shufflenet_v2/demo.py new file mode 100644 index 00000000..e1c488de --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.shufflenet_v2.model import ShufflenetV2 + + +def main(is_test: bool = False): + imagenet_demo(ShufflenetV2, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/shufflenet_v2/export.py b/qai_hub_models/models/shufflenet_v2/export.py new file mode 100644 index 00000000..1aef879e --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.shufflenet_v2 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "shufflenet_v2" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "shufflenet_v2", + "Shufflenet-v2", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/shufflenet_v2/info.yaml b/qai_hub_models/models/shufflenet_v2/info.yaml new file mode 100644 index 00000000..b33359f4 --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2/info.yaml @@ -0,0 +1,40 @@ +name: Shufflenet-v2 +# id must match with the model dir name in qai_hub_models +id: shufflenet_v2 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: ShufflenetV2 is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: [] +research_paper: https://arxiv.org/abs/1807.11164 +research_paper_title: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture + Design' +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 1.36M + Model size: 5.25 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/shufflenet_v2/model.py b/qai_hub_models/models/shufflenet_v2/model.py new file mode 100644 index 00000000..9a3b8e77 --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class ShufflenetV2(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.shufflenet_v2_x0_5(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/shufflenet_v2/perf.yaml b/qai_hub_models/models/shufflenet_v2/perf.yaml new file mode 100644 index 00000000..8c57e692 --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Shufflenet-v2 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 921.0 + throughput: 1085.7763300760043 + estimated_peak_memory_range: + min: 16384 + max: 2322736 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 202 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 202 + job_id: j1gly27e5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 321.0 + throughput: 3115.264797507788 + estimated_peak_memory_range: + min: 622592 + max: 4181728 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 157 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 157 + job_id: jw568zvvg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:27:51.522582Z' diff --git a/qai_hub_models/models/shufflenet_v2/test.py b/qai_hub_models/models/shufflenet_v2/test.py new file mode 100644 index 00000000..1198a8ad --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.shufflenet_v2.demo import main as demo_main +from qai_hub_models.models.shufflenet_v2.model import MODEL_ID, ShufflenetV2 + + +def test_task(): + run_imagenet_classifier_test(ShufflenetV2.from_pretrained(), MODEL_ID) + + +def test_trace(): + run_imagenet_classifier_trace_test(ShufflenetV2.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/shufflenet_v2_quantized/README.md b/qai_hub_models/models/shufflenet_v2_quantized/README.md new file mode 100644 index 00000000..0eeda872 --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Shufflenet-v2Quantized: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/shufflenet_v2_quantized) + +ShufflenetV2 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of Shufflenet-v2Quantized found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/shufflenet_v2_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.shufflenet_v2_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.shufflenet_v2_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Shufflenet-v2Quantized can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design](https://arxiv.org/abs/1807.11164) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py) diff --git a/qai_hub_models/models/shufflenet_v2_quantized/__init__.py b/qai_hub_models/models/shufflenet_v2_quantized/__init__.py new file mode 100644 index 00000000..0e5511ab --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2_quantized/__init__.py @@ -0,0 +1,11 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) +from qai_hub_models.models.shufflenet_v2_quantized.model import MODEL_ID # noqa: F401 +from qai_hub_models.models.shufflenet_v2_quantized.model import ( # noqa: F401 + ShufflenetV2Quantizable as Model, +) diff --git a/qai_hub_models/models/shufflenet_v2_quantized/demo.py b/qai_hub_models/models/shufflenet_v2_quantized/demo.py new file mode 100644 index 00000000..bf864ee1 --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2_quantized/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.shufflenet_v2_quantized.model import ShufflenetV2Quantizable + + +def main(is_test: bool = False): + imagenet_demo(ShufflenetV2Quantizable, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/shufflenet_v2_quantized/export.py b/qai_hub_models/models/shufflenet_v2_quantized/export.py new file mode 100644 index 00000000..4e3c03fe --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2_quantized/export.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.shufflenet_v2_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "shufflenet_v2_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "shufflenet_v2_quantized", + "Shufflenet-v2Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/shufflenet_v2_quantized/info.yaml b/qai_hub_models/models/shufflenet_v2_quantized/info.yaml new file mode 100644 index 00000000..2f7e3b8a --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2_quantized/info.yaml @@ -0,0 +1,41 @@ +name: Shufflenet-v2Quantized +# id must match with the model dir name in qai_hub_models +id: shufflenet_v2_quantized +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: ShufflenetV2 is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: + - quantized +research_paper: https://arxiv.org/abs/1807.11164 +research_paper_title: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture + Design' +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 1.37M + Model size: 4.42 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/shufflenet_v2_quantized/model.py b/qai_hub_models/models/shufflenet_v2_quantized/model.py new file mode 100644 index 00000000..5d2c4f06 --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2_quantized/model.py @@ -0,0 +1,76 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +# isort: off +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( + AIMETQuantizableMixin, +) + +# isort: on + +import torch +from aimet_torch.cross_layer_equalization import equalize_model +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models.shufflenet_v2.model import ShufflenetV2 +from qai_hub_models.utils.aimet.config_loader import get_per_channel_aimet_config +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_ENCODINGS = "shufflenet_v2_quantized_encodings.json" + + +class ShufflenetV2Quantizable(AIMETQuantizableMixin, ShufflenetV2): + """ShufflenetV2 with post train quantization support. + + Supports only 8 bit weights and activations, and only loads pre-quantized checkpoints. + Support for quantizing using your own weights & data will come at a later date.""" + + def __init__( + self, + sim_model: QuantizationSimModel, + ) -> None: + ShufflenetV2.__init__(self, sim_model.model) + AIMETQuantizableMixin.__init__( + self, sim_model, needs_onnx_direct_aimet_export=True + ) + + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> "ShufflenetV2Quantizable": + """ + Parameters: + aimet_encodings: + if "DEFAULT": Loads the model with aimet encodings calibrated on imagenette. + elif None: Doesn't load any encodings. Used when computing encodings. + else: Interprets as a filepath and loads the encodings stored there. + """ + model = ShufflenetV2.from_pretrained() + input_shape = model.get_input_spec()["image_tensor"][0] + + equalize_model(model, input_shape) + sim = QuantizationSimModel( + model.net, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=get_per_channel_aimet_config(), + dummy_input=torch.rand(input_shape), + ) + + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() + load_encodings_to_sim(sim, aimet_encodings) + + sim.model.eval() + return cls(sim) diff --git a/qai_hub_models/models/shufflenet_v2_quantized/perf.yaml b/qai_hub_models/models/shufflenet_v2_quantized/perf.yaml new file mode 100644 index 00000000..877fce75 --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Shufflenet-v2Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 30460.0 + throughput: 32.829940906106366 + estimated_peak_memory_range: + min: 294912 + max: 4752264 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 221 + layers_on_gpu: 0 + layers_on_cpu: 17 + total_layers: 238 + job_id: jnp1nw8kg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 355.0 + throughput: 2816.9014084507044 + estimated_peak_memory_range: + min: 0 + max: 3208840 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 122 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 122 + job_id: jvgddqvkg + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:21:57.529965Z' diff --git a/qai_hub_models/models/shufflenet_v2_quantized/test.py b/qai_hub_models/models/shufflenet_v2_quantized/test.py new file mode 100644 index 00000000..339d2f16 --- /dev/null +++ b/qai_hub_models/models/shufflenet_v2_quantized/test.py @@ -0,0 +1,40 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.shufflenet_v2_quantized.demo import main as demo_main +from qai_hub_models.models.shufflenet_v2_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + ShufflenetV2Quantizable, +) + + +def test_task(): + run_imagenet_classifier_test( + ShufflenetV2Quantizable.from_pretrained(), + MODEL_ID, + asset_version=MODEL_ASSET_VERSION, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test( + ShufflenetV2Quantizable.from_pretrained(), + diff_tol=0.01, + rtol=0.02, + atol=0.2, + is_quantized=True, + ) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/sinet/README.md b/qai_hub_models/models/sinet/README.md new file mode 100644 index 00000000..660348f9 --- /dev/null +++ b/qai_hub_models/models/sinet/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [SINet: Lightweight portrait segmentation for background removal](https://aihub.qualcomm.com/models/sinet) + +SINet is a machine learning model that is designed to segment people from close-up portrait images in real time. + +This is based on the implementation of SINet found +[here](https://github.com/clovaai/ext_portrait_segmentation). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/sinet). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.sinet.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.sinet.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of SINet can be found + [here](https://github.com/clovaai/ext_portrait_segmentation/blob/master/LICENSE). + + +## References +* [SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and Information Blocking Decoder](https://arxiv.org/abs/1911.09099) +* [Source Model Implementation](https://github.com/clovaai/ext_portrait_segmentation) diff --git a/qai_hub_models/models/sinet/__init__.py b/qai_hub_models/models/sinet/__init__.py new file mode 100644 index 00000000..6c3a4d7d --- /dev/null +++ b/qai_hub_models/models/sinet/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import SINetApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import SINet as Model # noqa: F401 diff --git a/qai_hub_models/models/sinet/app.py b/qai_hub_models/models/sinet/app.py new file mode 100644 index 00000000..00a29a2f --- /dev/null +++ b/qai_hub_models/models/sinet/app.py @@ -0,0 +1,108 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from collections import OrderedDict +from typing import Callable, Tuple + +import numpy as np +import PIL +import torch +from PIL.Image import Image + + +def preprocess_image(image: Image) -> torch.Tensor: + """ + Preprocesses images to be run through SINet + as prescribed here: + https://github.com/clovaai/ext_portrait_segmentation/blob/9bc1bada1cb7bd17a3a80a2964980f4b4befef5b/etc/Visualize_webCam.py#L100C1-L109C53 + + Parameters: + image: Input image to be run through the classifier model. + + Returns: + img_tensor: torch tensor 1x3xHxW to be directly passed to the model. + """ + # These mean and std values were computed using the prescribed training data + # and process in https://github.com/clovaai/ext_portrait_segmentation/blob/9bc1bada1cb7bd17a3a80a2964980f4b4befef5b/data/loadData.py#L44 + mean = [113.05697, 120.847824, 133.786] + std = [65.05263, 65.393776, 67.238205] + img_array = np.array(image) + img = img_array.astype(np.float32) + img -= np.array(mean).reshape(1, 1, 3) + img /= np.array(std).reshape(1, 1, 3) + + img /= 255 + img = img.transpose((2, 0, 1)) + img_tensor = torch.from_numpy(img) + img_tensor = torch.unsqueeze(img_tensor, 0) # add a batch dimension + + return img_tensor + + +class SINetApp: + """ + This class consists of light-weight "app code" that is required to + perform end to end inference with SINet. + + For a given image input, the app will: + * Pre-process the image (normalize) + * Run image segmentation + * Convert the raw output into probabilities using softmax + """ + + def __init__(self, model: Callable[[torch.Tensor], OrderedDict]): + self.model = model + + def predict( + self, image: Image, raw_output: bool = False, show_face: bool = True + ) -> Image | Tuple[np.ndarray, np.ndarray]: + """ + From the provided image or tensor, segment the image + + Parameters: + image: A PIL Image in RGB format of size 224x224. + raw_output: if True, output returned is the raw class predictions per pixel + show_face: if True, image output returned is the background + + Returns: + If raw_output is true, returns: + masks: np.ndarray + a tuple of arrays 1x2xHxW of mask predictions per pixel as 0 or 1 + + Otherwise, returns: + segmented_images: List[PIL.Image] + Image of face segmented out or background segmented out + """ + + input_tensor = preprocess_image(image) + with torch.no_grad(): + output = self.model(input_tensor) + + face_map = (output[0].data.cpu() > 0).numpy()[0] + bg_map = output[0].max(0)[1].byte().data.cpu().numpy() + + if raw_output: + return face_map, bg_map + + idx_fg = face_map == 1 + idx_bg = bg_map == 1 + + img_orig = np.array(image.getdata()).reshape(image.size[0], image.size[1], 3) + + # Display foreground blue-tinted, background red-tinted + seg_img = 0 * img_orig + seg_img[:, :, 0] = ( + img_orig[:, :, 0] * idx_fg * 0.9 + img_orig[:, :, 0] * idx_bg * 0.1 + ) + seg_img[:, :, 1] = ( + img_orig[:, :, 1] * idx_fg * 0.4 + img_orig[:, :, 0] * idx_bg * 0.6 + ) + seg_img[:, :, 2] = ( + img_orig[:, :, 2] * idx_fg * 0.4 + img_orig[:, :, 0] * idx_bg * 0.6 + ) + out_image = PIL.Image.fromarray(seg_img.astype(np.uint8)) + + return out_image diff --git a/qai_hub_models/models/sinet/demo.py b/qai_hub_models/models/sinet/demo.py new file mode 100644 index 00000000..51972075 --- /dev/null +++ b/qai_hub_models/models/sinet/demo.py @@ -0,0 +1,46 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.sinet.app import SINetApp +from qai_hub_models.models.sinet.model import MODEL_ASSET_VERSION, MODEL_ID, SINet +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.display import display_or_save_image + +INPUT_IMAGE_LOCAL_PATH = "sinet_demo.png" +INPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, INPUT_IMAGE_LOCAL_PATH +) + + +def main(is_test: bool = False): + # Demo parameters + parser = get_model_cli_parser(SINet) + parser = get_on_device_demo_parser(parser, add_output_dir=True) + parser.add_argument( + "--image", + type=str, + default=INPUT_IMAGE_ADDRESS, + help="image file path or URL.", + ) + args = parser.parse_args([] if is_test else None) + model = demo_model_from_cli_args(SINet, args) + validate_on_device_demo_args(args, SINet.get_model_id()) + + # load image and model + image = load_image(args.image) + input_image = image.convert("RGB") + app = SINetApp(model) + output = app.predict(input_image, False, False) + if not is_test: + display_or_save_image(output, args.output_dir, "sinet_demo_output.png") + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/sinet/export.py b/qai_hub_models/models/sinet/export.py new file mode 100644 index 00000000..4b0fc545 --- /dev/null +++ b/qai_hub_models/models/sinet/export.py @@ -0,0 +1,197 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.sinet import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "sinet" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "sinet", + "SINet", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/sinet/info.yaml b/qai_hub_models/models/sinet/info.yaml new file mode 100644 index 00000000..94bcbfc6 --- /dev/null +++ b/qai_hub_models/models/sinet/info.yaml @@ -0,0 +1,35 @@ +name: SINet +# id must match with the model dir name in qai_hub_models +id: sinet +status: public +headline: Lightweight portrait segmentation for background removal. +domain: Computer Vision +use_case: Semantic Segmentation +description: SINet is a machine learning model that is designed to segment people + from close-up portrait images in real time. +tags: [] +research_paper: https://arxiv.org/abs/1911.09099 +research_paper_title: 'SINet: Extreme Lightweight Portrait Segmentation Networks with + Spatial Squeeze Modules and Information Blocking Decoder' +license: https://github.com/clovaai/ext_portrait_segmentation/blob/master/LICENSE +source_repo: https://github.com/clovaai/ext_portrait_segmentation +technical_details: + Model checkpoint: SINet.pth + Input resolution: 224x224 + Number of parameters: 91.9K + Model size: 430 KB +applicable_scenarios: + - Background replacement + - Face removal +related_models: + - fcn_resnet50 + - unet_segmentation + - mediapipe_selfie +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: no +license_type: other +dataset: [] diff --git a/qai_hub_models/models/sinet/model.py b/qai_hub_models/models/sinet/model.py new file mode 100644 index 00000000..ea82116f --- /dev/null +++ b/qai_hub_models/models/sinet/model.py @@ -0,0 +1,129 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os + +import torch + +from qai_hub_models.utils.asset_loaders import ( + CachedWebModelAsset, + SourceAsRoot, + load_torch, +) +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +SINET_SOURCE_REPOSITORY = "https://github.com/clovaai/ext_portrait_segmentation" +SINET_SOURCE_REPO_COMMIT = "9bc1bada1cb7bd17a3a80a2964980f4b4befef5b" +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_WEIGHTS = "SINet.pth" +NUM_CLASSES = 2 + + +class SINet(BaseModel): + """Exportable SINet portrait segmentation application, end-to-end.""" + + def __init__( + self, + sinet_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = sinet_model + + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> SINet: + sinet_model = _load_sinet_source_model_from_weights(weights) + + return cls(sinet_model.eval()) + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run SINet on `image`, and produce a tensor of classes for segmentation + + Parameters: + image: Pixel values pre-processed for model consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + tensor: 1x2xHxW tensor of class logits per pixel + """ + return self.model(image) + + def get_input_spec( + self, + batch_size: int = 1, + num_channels: int = 3, + height: int = 224, + width: int = 224, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} + + +def _get_weightsfile_from_name(weights_name: str = DEFAULT_WEIGHTS): + """Convert from names of weights files to the url for the weights file""" + if weights_name == DEFAULT_WEIGHTS: + return CachedWebModelAsset( + "https://github.com/clovaai/ext_portrait_segmentation/raw/master/result/SINet/SINet.pth", + MODEL_ID, + MODEL_ASSET_VERSION, + "SINet.pth", + ) + else: + raise NotImplementedError(f"Cannot get weights file from name {weights_name}") + + +def _load_sinet_source_model_from_weights( + weights_name_or_path: str, +) -> torch.nn.Module: + with SourceAsRoot( + SINET_SOURCE_REPOSITORY, SINET_SOURCE_REPO_COMMIT, MODEL_ID, MODEL_ASSET_VERSION + ): + if os.path.exists(os.path.expanduser(weights_name_or_path)): + weights_path = os.path.expanduser(weights_name_or_path) + else: + if not os.path.exists(weights_name_or_path): + # Load SINet model from the source repository using the given weights. + weights_path = _get_weightsfile_from_name(weights_name_or_path) + else: + weights_path = None + weights = load_torch(weights_path or weights_name_or_path) + + # Perform a find and replace for .data.size() in SINet's shuffle implementation + # as tracing treats this as a constant, but does not treat .shape as a constant + with open("models/SINet.py", "r") as file: + file_content = file.read() + new_content = file_content.replace(".data.size()", ".shape") + with open("models/SINet.py", "w") as file: + file.write(new_content) + + # import the model arch + from models.SINet import SINet + + # This config is copied from the main function in Sinet.py: + # https://github.com/clovaai/ext_portrait_segmentation/blob/9bc1bada1cb7bd17a3a80a2964980f4b4befef5b/models/SINet.py#L557 + config = [ + [[3, 1], [5, 1]], + [[3, 1], [3, 1]], + [[3, 1], [5, 1]], + [[3, 1], [3, 1]], + [[5, 1], [3, 2]], + [[5, 2], [3, 4]], + [[3, 1], [3, 1]], + [[5, 1], [5, 1]], + [[3, 2], [3, 4]], + [[3, 1], [5, 2]], + ] + + sinet_model = SINet(classes=2, p=2, q=8, config=config, chnn=1) + sinet_model.load_state_dict(weights, strict=True) + + return sinet_model diff --git a/qai_hub_models/models/sinet/perf.yaml b/qai_hub_models/models/sinet/perf.yaml new file mode 100644 index 00000000..936c588b --- /dev/null +++ b/qai_hub_models/models/sinet/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: SINet + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1817.0 + throughput: 550.357732526142 + estimated_peak_memory_range: + min: 434176 + max: 2872792 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 240 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 240 + job_id: jegnzmkmg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 1192.0 + throughput: 838.9261744966443 + estimated_peak_memory_range: + min: 622592 + max: 51366312 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 187 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 187 + job_id: joprl2wep + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:11:37.141843Z' diff --git a/qai_hub_models/models/sinet/test.py b/qai_hub_models/models/sinet/test.py new file mode 100644 index 00000000..365aad84 --- /dev/null +++ b/qai_hub_models/models/sinet/test.py @@ -0,0 +1,36 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models.sinet.app import SINetApp +from qai_hub_models.models.sinet.demo import INPUT_IMAGE_ADDRESS +from qai_hub_models.models.sinet.demo import main as demo_main +from qai_hub_models.models.sinet.model import MODEL_ASSET_VERSION, MODEL_ID, SINet +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_LOCAL_PATH = "sinet_demo_output.png" +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, OUTPUT_IMAGE_LOCAL_PATH +) + + +@skip_clone_repo_check +def test_task(): + image = load_image(INPUT_IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + app = SINetApp(SINet.from_pretrained()) + app_output_image = app.predict(image, False) + + np.testing.assert_allclose( + np.asarray(app_output_image, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + rtol=0.02, + atol=0.2, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/squeezenet1_1/README.md b/qai_hub_models/models/squeezenet1_1/README.md new file mode 100644 index 00000000..1c424ec0 --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [SqueezeNet-1_1: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/squeezenet1_1) + +SqueezeNet is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of SqueezeNet-1_1 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/squeezenet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/squeezenet1_1). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.squeezenet1_1.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.squeezenet1_1.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of SqueezeNet-1_1 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size](https://arxiv.org/abs/1602.07360) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/squeezenet.py) diff --git a/qai_hub_models/models/squeezenet1_1/__init__.py b/qai_hub_models/models/squeezenet1_1/__init__.py new file mode 100644 index 00000000..b05a5f75 --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import SqueezeNet as Model # noqa: F401 diff --git a/qai_hub_models/models/squeezenet1_1/demo.py b/qai_hub_models/models/squeezenet1_1/demo.py new file mode 100644 index 00000000..75640bd4 --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.squeezenet1_1.model import SqueezeNet + + +def main(is_test: bool = False): + imagenet_demo(SqueezeNet, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/squeezenet1_1/export.py b/qai_hub_models/models/squeezenet1_1/export.py new file mode 100644 index 00000000..920ba9e6 --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.squeezenet1_1 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "squeezenet1_1" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "squeezenet1_1", + "SqueezeNet-1_1", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/squeezenet1_1/info.yaml b/qai_hub_models/models/squeezenet1_1/info.yaml new file mode 100644 index 00000000..3763db61 --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1/info.yaml @@ -0,0 +1,40 @@ +name: SqueezeNet-1_1 +# id must match with the model dir name in qai_hub_models +id: squeezenet1_1 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: SqueezeNet is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: + - backbone +research_paper: https://arxiv.org/abs/1602.07360 +research_paper_title: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters + and <0.5MB model size' +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/squeezenet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 1.24M + Model size: 4.73 MB +applicable_scenarios: + - Gaming + - Robotics +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: no +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/squeezenet1_1/model.py b/qai_hub_models/models/squeezenet1_1/model.py new file mode 100644 index 00000000..6a08155f --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = "squeezenet1_1" +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class SqueezeNet(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.squeezenet1_1(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/squeezenet1_1/perf.yaml b/qai_hub_models/models/squeezenet1_1/perf.yaml new file mode 100644 index 00000000..6fe797b1 --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: SqueezeNet-1_1 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 212.0 + throughput: 4716.981132075472 + estimated_peak_memory_range: + min: 20480 + max: 1439360 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 39 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 39 + job_id: j1pvlre75 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 280.0 + throughput: 3571.4285714285716 + estimated_peak_memory_range: + min: 20480 + max: 12471928 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 69 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 69 + job_id: j7gjr2o7p + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:17:05.340427Z' diff --git a/qai_hub_models/models/squeezenet1_1/test.py b/qai_hub_models/models/squeezenet1_1/test.py new file mode 100644 index 00000000..0b6f2e19 --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.squeezenet1_1.demo import main as demo_main +from qai_hub_models.models.squeezenet1_1.model import MODEL_ID, SqueezeNet + + +def test_task(): + run_imagenet_classifier_test(SqueezeNet.from_pretrained(), MODEL_ID) + + +def test_trace(): + run_imagenet_classifier_trace_test(SqueezeNet.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/squeezenet1_1_quantized/README.md b/qai_hub_models/models/squeezenet1_1_quantized/README.md new file mode 100644 index 00000000..4829dd66 --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [SqueezeNet-1_1Quantized: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/squeezenet1_1_quantized) + +SqueezeNet is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of SqueezeNet-1_1Quantized found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/squeezenet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/squeezenet1_1_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.squeezenet1_1_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.squeezenet1_1_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of SqueezeNet-1_1Quantized can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size](https://arxiv.org/abs/1602.07360) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/squeezenet.py) diff --git a/qai_hub_models/models/squeezenet1_1_quantized/__init__.py b/qai_hub_models/models/squeezenet1_1_quantized/__init__.py new file mode 100644 index 00000000..d02b38bd --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1_quantized/__init__.py @@ -0,0 +1,11 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) +from qai_hub_models.models.squeezenet1_1_quantized.model import MODEL_ID # noqa: F401 +from qai_hub_models.models.squeezenet1_1_quantized.model import ( # noqa: F401 + SqueezeNetQuantizable as Model, +) diff --git a/qai_hub_models/models/squeezenet1_1_quantized/demo.py b/qai_hub_models/models/squeezenet1_1_quantized/demo.py new file mode 100644 index 00000000..fdd8fc5d --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1_quantized/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.squeezenet1_1_quantized.model import SqueezeNetQuantizable + + +def main(is_test: bool = False): + imagenet_demo(SqueezeNetQuantizable, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/squeezenet1_1_quantized/export.py b/qai_hub_models/models/squeezenet1_1_quantized/export.py new file mode 100644 index 00000000..552d6632 --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1_quantized/export.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.squeezenet1_1_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "squeezenet1_1_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "squeezenet1_1_quantized", + "SqueezeNet-1_1Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/squeezenet1_1_quantized/info.yaml b/qai_hub_models/models/squeezenet1_1_quantized/info.yaml new file mode 100644 index 00000000..6284192b --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1_quantized/info.yaml @@ -0,0 +1,41 @@ +name: SqueezeNet-1_1Quantized +# id must match with the model dir name in qai_hub_models +id: squeezenet1_1_quantized +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: SqueezeNet is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: + - backbone + - quantized +research_paper: https://arxiv.org/abs/1602.07360 +research_paper_title: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters + and <0.5MB model size' +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/squeezenet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 1.24M + Model size: 1.30 MB +applicable_scenarios: + - Gaming + - Robotics +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: no +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/squeezenet1_1_quantized/model.py b/qai_hub_models/models/squeezenet1_1_quantized/model.py new file mode 100644 index 00000000..554e4a15 --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1_quantized/model.py @@ -0,0 +1,76 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +# isort: off +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( + AIMETQuantizableMixin, +) + +# isort: on + +import torch +from aimet_torch.cross_layer_equalization import equalize_model +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models.squeezenet1_1.model import SqueezeNet +from qai_hub_models.utils.aimet.config_loader import get_per_channel_aimet_config +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_ENCODINGS = "squeezenet1_1_quantized_encodings.json" + + +class SqueezeNetQuantizable(AIMETQuantizableMixin, SqueezeNet): + """SqueezeNet with post train quantization support. + + Supports only 8 bit weights and activations, and only loads pre-quantized checkpoints. + Support for quantizing using your own weights & data will come at a later date.""" + + def __init__( + self, + sim_model: QuantizationSimModel, + ) -> None: + SqueezeNet.__init__(self, sim_model.model) + AIMETQuantizableMixin.__init__( + self, sim_model, needs_onnx_direct_aimet_export=True + ) + + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> "SqueezeNetQuantizable": + """ + Parameters: + aimet_encodings: + if "DEFAULT": Loads the model with aimet encodings calibrated on imagenette. + elif None: Doesn't load any encodings. Used when computing encodings. + else: Interprets as a filepath and loads the encodings stored there. + """ + model = SqueezeNet.from_pretrained() + input_shape = model.get_input_spec()["image_tensor"][0] + + equalize_model(model, input_shape) + sim = QuantizationSimModel( + model.net, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=get_per_channel_aimet_config(), + dummy_input=torch.rand(input_shape), + ) + + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() + load_encodings_to_sim(sim, aimet_encodings) + + sim.model.eval() + return cls(sim) diff --git a/qai_hub_models/models/squeezenet1_1_quantized/perf.yaml b/qai_hub_models/models/squeezenet1_1_quantized/perf.yaml new file mode 100644 index 00000000..00894c2f --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: SqueezeNet-1_1Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 215.0 + throughput: 4651.162790697675 + estimated_peak_memory_range: + min: 20480 + max: 1657648 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 43 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 43 + job_id: jegnzmovg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 227.0 + throughput: 4405.286343612334 + estimated_peak_memory_range: + min: 622592 + max: 62441592 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 45 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 45 + job_id: joprl2ovp + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:29:43.800896Z' diff --git a/qai_hub_models/models/squeezenet1_1_quantized/test.py b/qai_hub_models/models/squeezenet1_1_quantized/test.py new file mode 100644 index 00000000..bf4f2ec9 --- /dev/null +++ b/qai_hub_models/models/squeezenet1_1_quantized/test.py @@ -0,0 +1,40 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.squeezenet1_1_quantized.demo import main as demo_main +from qai_hub_models.models.squeezenet1_1_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + SqueezeNetQuantizable, +) + + +def test_task(): + run_imagenet_classifier_test( + SqueezeNetQuantizable.from_pretrained(), + MODEL_ID, + asset_version=MODEL_ASSET_VERSION, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test( + SqueezeNetQuantizable.from_pretrained(), + diff_tol=0.01, + rtol=0.02, + atol=0.2, + is_quantized=True, + ) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/stable_diffusion_quantized/README.md b/qai_hub_models/models/stable_diffusion_quantized/README.md new file mode 100644 index 00000000..4bd0f303 --- /dev/null +++ b/qai_hub_models/models/stable_diffusion_quantized/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Stable-Diffusion: State-of-the-art generative AI model used to generate detailed images conditioned on text descriptions](https://aihub.qualcomm.com/models/stable_diffusion_quantized) + +Generates high resolution images from text prompts using a latent diffusion model. This model uses CLIP ViT-L/14 as text encoder, U-Net based latent denoising, and VAE based decoder to generate the final image. + +This is based on the implementation of Stable-Diffusion found +[here](https://github.com/CompVis/stable-diffusion/tree/main). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/stable_diffusion_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[stable_diffusion_quantized]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.stable_diffusion_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.stable_diffusion_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Stable-Diffusion can be found + [here](https://github.com/CompVis/stable-diffusion/blob/main/LICENSE). + + +## References +* [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) +* [Source Model Implementation](https://github.com/CompVis/stable-diffusion/tree/main) diff --git a/qai_hub_models/models/stable_diffusion_quantized/__init__.py b/qai_hub_models/models/stable_diffusion_quantized/__init__.py new file mode 100644 index 00000000..7cc325fb --- /dev/null +++ b/qai_hub_models/models/stable_diffusion_quantized/__init__.py @@ -0,0 +1,12 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.stable_diffusion_quantized.model import ( # noqa: F401 + MODEL_ID, +) +from qai_hub_models.models.stable_diffusion_quantized.model import ( # noqa: F401 + StableDiffusionQuantized as Model, +) + +from .app import StableDiffusionApp as App # noqa: F401 diff --git a/qai_hub_models/models/stable_diffusion_quantized/app.py b/qai_hub_models/models/stable_diffusion_quantized/app.py new file mode 100644 index 00000000..48d33849 --- /dev/null +++ b/qai_hub_models/models/stable_diffusion_quantized/app.py @@ -0,0 +1,205 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from typing import Any, Callable, Tuple + +import torch +from diffusers.models.embeddings import get_timestep_embedding + +OUT_H, OUT_W = 512, 512 + + +class StableDiffusionApp: + """ + StableDiffusionApp represents the application code needed to string + together the various neural networks that make up the Stable Diffusion + algorithm. This code is written in Python and uses PyTorch and is meant to + serve as a reference implementation for application in other languages and + for other platforms. + + Please run the app via `demo.py`. + + References + ---------- + * https://arxiv.org/abs/2112.10752 + * https://github.com/apple/ml-stable-diffusion + """ + + def __init__( + self, + text_encoder: Callable[..., Tuple[torch.Tensor, ...]], + vae_decoder: Callable[..., Tuple[torch.Tensor, ...]], + unet: Callable[..., Tuple[torch.Tensor, ...]], + tokenizer: Any, + scheduler: Any, + time_embedding: Any, + ): + """ + Initializes StableDiffusionApp with required neural networks for end-to-end pipeline. + + Parameters + ---------- + text_encoder: + Encoder input text + vae_decoder: + Decoder to decode latent space into output image + unet: + Denoises image in latent space + tokenizer: + Tokenizer for input text. + Output of Tokenizer is fed to text_encoder. + One can experiments with different tokenizers available based on Clip-ViT. + scheduler: + Solver for diffusion steps. + Updates latent space during each iteration. + time_embedding: + Projects time-step into embedding used during denoising in latent space. + """ + + self.text_encoder = text_encoder + self.vae_decoder = vae_decoder + self.unet = unet + self.tokenizer = tokenizer + self.scheduler = scheduler + self.time_embedding = time_embedding + + def get_time_embedding(self, timestep): + timestep = torch.tensor([timestep]) + t_emb = get_timestep_embedding(timestep, 320, True, 0) + emb = self.time_embedding(t_emb) + + return emb + + def _encode_text_prompt(self, prompt: str) -> torch.Tensor: + """ + Takes a text prompt and returns a tensor with its text embedding. + + Parameters + ---------- + prompt: The text prompt to encode. + """ + # Tokenize input prompt + text_input = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ) + + # Tokenize empty prompt + max_length = text_input.input_ids.shape[-1] + uncond_input = self.tokenizer( + [""], + padding="max_length", + max_length=max_length, + return_tensors="pt", + ) + + # Embed using the text encoder neural network + # Encode input and empty prompt in one go + print(f"\nExtracting embeddings (inference on TextEncoder)\n{'-' * 50}") + embeddings = self.text_encoder( + [ + text_input.input_ids.type(torch.int32), + uncond_input.input_ids.type(torch.int32), + ] + ) + cond_embeddings, uncond_embeddings = torch.split(embeddings, 1, 0) + return cond_embeddings, uncond_embeddings + + def predict(self, *args, **kwargs): + # See generate_image. + return self.generate_image(*args, **kwargs) + + def generate_image( + self, + prompt: str, + num_steps: int = 50, + seed: int = 0, + guidance_scale: float = 7.5, + ) -> torch.Tensor: + """ + Generate an image using the PyTorch reference neural networks. This + code can be used as a reference for how to glue together the neural + networks in an application. Note that this code relies on a tokenizer + and scheduler from the HuggingFace's diffusers library, so those would + have to be ported to the application as well. + + Parameters + ---------- + prompt: + The text prompt to generate an image from. + num_steps: + The number of steps to run the diffusion process for. Higher value + may lead to better image quality. + seed: + The seed to use for the random number generator. + guidance_scale: + Classifier-free guidance is a method that allows us to control how + strongly the image generation is guided by the prompt. This is done + by always processing two samples at once: an unconditional (using a + text embedding of an empty prompt) and a conditional (using a text + embedding of the provided prompt). Given the noise prediction of + both of these, we linearly interpolate between them based on the + guidance_scale. A guidance scale of 0 is the same as using an empty + prompt. A guidance scale of 1 turns off classifier-free guidance + and is computationally less expensive since it only processes one + sample at a time. Intuitively you may think the rest of guidance + scales are between 0 and 1, but it is common to use a scale greater + than 1 as a method of amplifying the prompt's influence on the + image, pushing it further away from the unconditional sample. + + Returns + ------- + torch.Tensor + The generated image in RGB scaled in [0, 1] with tensor shape (H, + W, 3). The height and the width may depend on the underlying Stable + Diffusion version, but is typically 512x512. + """ + + # Encode text prompt + cond_embeddings, uncond_embeddings = self._encode_text_prompt(prompt) + self.scheduler.set_timesteps(num_steps) + self.scheduler.config.prediction_type = "epsilon" + + # Channel last input + latents_shape = (1, 4, OUT_H // 8, OUT_W // 8) + + generator = torch.manual_seed(seed) + latents = torch.randn(latents_shape, generator=generator) + + latents = latents * self.scheduler.init_noise_sigma + + # Helper method to go back and forth from channel-first to channel-last + def _make_channel_last_torch(input_tensor): + return torch.permute(input_tensor, [0, 2, 3, 1]) + + def _make_channel_first_torch(input_tensor): + return torch.permute(torch.Tensor(input_tensor), [0, 3, 1, 2]) + + for i, t in enumerate(self.scheduler.timesteps): + print(f"\nStep: {i + 1}\n{'-' * 10}") + time_emb = self.get_time_embedding(t) + latent_model_input = self.scheduler.scale_model_input(latents, t) + latent_model_input = _make_channel_last_torch(latent_model_input) + + print(f"\nDenoising image in latent space (inference on UNet)\n{'-' * 50}") + # Denoise image in latent space + noise = self.unet( + [latent_model_input, latent_model_input], + [time_emb, time_emb], + [cond_embeddings, uncond_embeddings], + ) + + noise_cond, noise_uncond = torch.split(noise, 1, 0) + noise_pred = noise_uncond + guidance_scale * (noise_cond - noise_uncond) + + noise_pred = _make_channel_first_torch(noise_pred) + latents = self.scheduler.step(noise_pred, t, latents).prev_sample + + print(f"\nDecoding generated image (inference on VAEDecoder)\n{'-' * 50}") + # Decode generated image from latent space + latents_vae = _make_channel_last_torch(latents) + image = self.vae_decoder(latents_vae) + return image diff --git a/qai_hub_models/models/stable_diffusion_quantized/demo.py b/qai_hub_models/models/stable_diffusion_quantized/demo.py new file mode 100644 index 00000000..de1dacbb --- /dev/null +++ b/qai_hub_models/models/stable_diffusion_quantized/demo.py @@ -0,0 +1,149 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse + +import numpy as np +import qai_hub as hub +from diffusers import DPMSolverMultistepScheduler, UNet2DConditionModel +from PIL import Image +from transformers import CLIPTokenizer + +from qai_hub_models.models.stable_diffusion_quantized.app import StableDiffusionApp +from qai_hub_models.models.stable_diffusion_quantized.model import ( + ClipVITTextEncoder, + Unet, + VAEDecoder, +) +from qai_hub_models.utils.args import add_output_dir_arg +from qai_hub_models.utils.base_model import BasePrecompiledModel +from qai_hub_models.utils.display import display_or_save_image +from qai_hub_models.utils.inference import HubModel +from qai_hub_models.utils.qai_hub_helpers import can_access_qualcomm_ai_hub + +DEFAULT_DEMO_PROMPT = "spectacular view of northern lights from Alaska" +DEFAULT_DEVICE_NAME = "Samsung Galaxy S23 Ultra" + + +def _get_hub_model(input_model: BasePrecompiledModel, device_name=DEFAULT_DEVICE_NAME): + if not can_access_qualcomm_ai_hub(): + raise RuntimeError( + "Stable-diffusion on-device demo requires access to QAI-Hub.\n" + "Please visit https://aihub.qualcomm.com/ and sign-up." + ) + # Upload model + uploaded_model = hub.upload_model(input_model.get_target_model_path()) + inputs = list(input_model.get_input_spec().keys()) + return HubModel(uploaded_model, inputs, hub.Device(name=device_name)) + + +# Run Stable Diffuison end-to-end on a given prompt. The demo will output an +# AI-generated image based on the description in the prompt. +def main(is_test: bool = False): + parser = argparse.ArgumentParser() + parser.add_argument( + "--prompt", + default=DEFAULT_DEMO_PROMPT, + help="Prompt to generate image from.", + ) + parser.add_argument( + "--num-steps", + default=2, + type=int, + help="The number of diffusion iteration steps (higher means better quality).", + ) + parser.add_argument( + "--seed", + default=0, + type=int, + help="Random seed.", + ) + add_output_dir_arg(parser) + parser.add_argument( + "--guidance-scale", + type=float, + default=7.5, + help="Strength of guidance (higher means more influence from prompt).", + ) + parser.add_argument( + "--device-name", + type=str, + default=DEFAULT_DEVICE_NAME, + help="Device to run stable-diffusion demo on.", + ) + args = parser.parse_args([] if is_test else None) + + if not is_test: + print(f"\n{'-' * 100}") + print( + f"** Performing image generation on-device({args.device_name}) with Stable Diffusion **" + ) + print() + print("Prompt:", args.prompt) + print("Number of steps:", args.num_steps) + print("Guidance scale:", args.guidance_scale) + print("Seed:", args.seed) + print() + print( + "Note: This reference demo uses significant amounts of memory and may take 5-10 minutes to run per step." + ) + print(f"{'-' * 100}\n") + + print(f"Downloading model assets\n{'-' * 35}") + # Load target models + text_encoder = ClipVITTextEncoder.from_precompiled() + unet = Unet.from_precompiled() + vae_decoder = VAEDecoder.from_precompiled() + + # Create three HubModel instances to prepare for on-device inference. + # This is similar to initializing PyTorch model to call forward method later. + # Instead of forward, we later submit inference_jobs on QAI-Hub for + # on-device evaluation. + print(f"Uploading model assets on QAI-Hub\n{'-' * 35}") + text_encoder = _get_hub_model(text_encoder, args.device_name) + unet = _get_hub_model(unet, args.device_name) + vae_decoder = _get_hub_model(vae_decoder, args.device_name) + + # Create tokenizer, scheduler and time_embedding required + # for stable-diffusion pipeline. + tokenizer = CLIPTokenizer.from_pretrained( + "stabilityai/stable-diffusion-2-1-base", subfolder="tokenizer", revision="main" + ) + + scheduler = DPMSolverMultistepScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + num_train_timesteps=1000, + ) + + time_embedding = UNet2DConditionModel.from_pretrained( + "runwayml/stable-diffusion-v1-5", subfolder="unet" + ).time_embedding + # Load Application + app = StableDiffusionApp( + text_encoder=text_encoder, + vae_decoder=vae_decoder, + unet=unet, + tokenizer=tokenizer, + scheduler=scheduler, + time_embedding=time_embedding, + ) + + # Generate image + image = app.generate_image( + args.prompt, + num_steps=args.num_steps, + seed=args.seed, + guidance_scale=args.guidance_scale, + ) + + pil_img = Image.fromarray(np.round(image.numpy() * 255).astype(np.uint8)[0]) + + if not is_test: + display_or_save_image(pil_img, args.output_dir) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/stable_diffusion_quantized/export.py b/qai_hub_models/models/stable_diffusion_quantized/export.py new file mode 100644 index 00000000..532f23b2 --- /dev/null +++ b/qai_hub_models/models/stable_diffusion_quantized/export.py @@ -0,0 +1,170 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import warnings +from pathlib import Path +from typing import List, Mapping, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.stable_diffusion_quantized import Model +from qai_hub_models.utils.args import TargetRuntime, export_parser +from qai_hub_models.utils.printing import print_profile_metrics_from_job +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, +) + +ALL_COMPONENTS = ["text_encoder", "unet", "vae_decoder"] +DEFAULT_COMPONENTS = ["text_encoder", "vae_decoder", "unet"] + + +def export_model( + device: str = "Samsung Galaxy S23", + components: Optional[List[str]] = None, + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + profile_options: str = "", + **additional_model_kwargs, +) -> Mapping[str, Tuple[Optional[hub.ProfileJob], Optional[hub.InferenceJob]]] | List[ + str +]: + """ + This function accomplishes 5 main tasks: + + 1. Initialize model. + 2. Upload model assets to hub. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Summarizes the results from profiling. + + Each of the last three steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + components: List of sub-components of the model that will be exported. + Each component is compiled and profiled separately. + Defaults to ALL_COMPONENTS if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_summary: If set, skips waiting for and summarizing results + from profiling. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_precompiled` + + Returns: + A Mapping from component_name to a 2-tuple of: + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "stable_diffusion_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + component_arg = components + components = components or DEFAULT_COMPONENTS + for component in components: + if component not in ALL_COMPONENTS: + raise ValueError(f"Invalid component {component}.") + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "stable_diffusion_quantized", + "Stable-Diffusion", + device, + skip_profiling, + skip_inferencing, + False, + skip_summary, + output_path, + TargetRuntime.QNN, + "", + profile_options, + component_arg, + ) + + # 1. Initialize model + print("Initializing model class") + model = Model.from_precompiled() + components_dict = {} + if "text_encoder" in components: + components_dict["text_encoder"] = model.text_encoder + if "unet" in components: + components_dict["unet"] = model.unet + if "vae_decoder" in components: + components_dict["vae_decoder"] = model.vae_decoder + + # 2. Upload model assets to hub + print("Uploading model assets on hub") + uploaded_models = {} + for component_name in components: + uploaded_models[component_name] = hub.upload_model( + components_dict[component_name].get_target_model_path() + ) + + # 3. Profile the model assets on real devices + profile_jobs = {} + if not skip_profiling: + for component_name in components: + print(f"Profiling model {component_name} on a hosted device.") + profile_jobs[component_name] = hub.submit_profile_job( + model=uploaded_models[component_name], + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_jobs = {} + if not skip_inferencing: + for component_name in components: + print( + f"Running inference for {component_name} on a hosted device with example inputs." + ) + sample_inputs = components_dict[component_name].sample_inputs() + inference_jobs[component_name] = hub.submit_inference_job( + model=uploaded_models[component_name], + inputs=sample_inputs, + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 5. Summarize the results from profiling + if not skip_summary and not skip_profiling: + for component_name in components: + profile_job = profile_jobs[component_name] + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + return { + component_name: ( + profile_jobs.get(component_name, None), + inference_jobs.get(component_name, None), + ) + for component_name in components + } + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser( + model_cls=Model, components=ALL_COMPONENTS, exporting_compiled_model=True + ) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/stable_diffusion_quantized/info.yaml b/qai_hub_models/models/stable_diffusion_quantized/info.yaml new file mode 100644 index 00000000..86efe1b8 --- /dev/null +++ b/qai_hub_models/models/stable_diffusion_quantized/info.yaml @@ -0,0 +1,37 @@ +name: Stable-Diffusion +id: stable_diffusion_quantized +status: public +headline: State-of-the-art generative AI model used to generate detailed images conditioned + on text descriptions. +domain: Generative AI +description: Generates high resolution images from text prompts using a latent diffusion + model. This model uses CLIP ViT-L/14 as text encoder, U-Net based latent denoising, + and VAE based decoder to generate the final image. +use_case: Image Generation +tags: + - generative-ai + - quantized +research_paper: https://arxiv.org/abs/2112.10752 +research_paper_title: High-Resolution Image Synthesis with Latent Diffusion Models +license: https://github.com/CompVis/stable-diffusion/blob/main/LICENSE +source_repo: https://github.com/CompVis/stable-diffusion/tree/main +technical_details: + Input: Text prompt to generate image + QNN-SDK: '2.19' + Text Encoder Number of parameters: 340M + UNet Number of parameters: 865M + VAE Decoder Number of parameters: 83M + Model size: 1GB +applicable_scenarios: + - Image Generation + - Image Editing + - Content Creation +related_models: + - controlnet_quantized +form_factors: + - Phone + - Tablet +has_static_banner: yes +has_animated_banner: yes +license_type: creativeml-openrail-m +dataset: [] diff --git a/qai_hub_models/models/stable_diffusion_quantized/model.py b/qai_hub_models/models/stable_diffusion_quantized/model.py new file mode 100644 index 00000000..54d0144a --- /dev/null +++ b/qai_hub_models/models/stable_diffusion_quantized/model.py @@ -0,0 +1,122 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os + +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.base_model import BasePrecompiledModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +QNN_SDK_PREFIX = "QNN219" +TEXT_ENCODER = os.path.join(QNN_SDK_PREFIX, "text_encoder.serialized.bin") +UNET_DIFFUSER = os.path.join(QNN_SDK_PREFIX, "unet.serialized.bin") +VAE_DECODER = os.path.join(QNN_SDK_PREFIX, "vae_decoder.serialized.bin") + + +class StableDiffusionQuantized: + """ + Stable Diffusion wrapper class consists of + - Text Encoder + - UNet based diffuser + - VAE decoder + + All three models are pre-trained, quantized (int8 weight, uint16 activations) + and compiled into serialized binary for Qualcomm Snapdragon Gen2+. + """ + + def __init__(self, text_encoder, unet, vae_decoder) -> None: + self.text_encoder = text_encoder + self.unet = unet + self.vae_decoder = vae_decoder + + @classmethod + def from_precompiled(cls) -> "StableDiffusionQuantized": + return StableDiffusionQuantized( + text_encoder=ClipVITTextEncoder.from_precompiled(), + unet=Unet.from_precompiled(), + vae_decoder=VAEDecoder.from_precompiled(), + ) + + +class ClipVITTextEncoder(BasePrecompiledModel): + """ + CLIP-ViT based Text Encoder. + + Pre-trained, quantized (int8 weight, uint16 activations) + and compiled into serialized binary for Qualcomm Snapdragon Gen2+. + """ + + def __init__(self, target_model_path) -> None: + self.target_model_path = target_model_path + + @classmethod + def from_precompiled(cls) -> "ClipVITTextEncoder": + text_encoder_path = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, TEXT_ENCODER + ).fetch() + return ClipVITTextEncoder(text_encoder_path) + + def get_target_model_path(self) -> str: + return self.target_model_path + + def get_input_spec(self) -> InputSpec: + return {"input_1": ((1, 77), "int32")} + + +class Unet(BasePrecompiledModel): + """ + UNet model to denoise image in latent space. + + Pre-trained, quantized (int8 weight, uint16 activations) + and compiled into serialized binary for Qualcomm Snapdragon Gen2+. + """ + + def __init__(self, target_model_path) -> None: + self.target_model_path = target_model_path + + @classmethod + def from_precompiled(cls) -> "Unet": + model_path = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, UNET_DIFFUSER + ).fetch() + return Unet(model_path) + + def get_target_model_path(self) -> str: + return self.target_model_path + + def get_input_spec(self) -> InputSpec: + return { + "input_1": ((1, 64, 64, 4), "float32"), + "input_2": ((1, 1280), "float32"), + "input_3": ((1, 77, 768), "float32"), + } + + +class VAEDecoder(BasePrecompiledModel): + """ + Decodes image from latent into output generated image. + + Pre-trained, quantized (int8 weight, uint16 activations) + and compiled into serialized binary for Qualcomm Snapdragon Gen2+. + """ + + def __init__(self, target_model_path) -> None: + self.target_model_path = target_model_path + + @classmethod + def from_precompiled(cls) -> "VAEDecoder": + model_path = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, VAE_DECODER + ).fetch() + return VAEDecoder(model_path) + + def get_target_model_path(self) -> str: + return self.target_model_path + + def get_input_spec(self) -> InputSpec: + return {"input_1": ((1, 64, 64, 4), "float32")} diff --git a/qai_hub_models/models/stable_diffusion_quantized/perf.yaml b/qai_hub_models/models/stable_diffusion_quantized/perf.yaml new file mode 100644 index 00000000..36014f84 --- /dev/null +++ b/qai_hub_models/models/stable_diffusion_quantized/perf.yaml @@ -0,0 +1,102 @@ +models: +- name: Text-Encoder-Quantized + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-14T05:29:28.928297Z' + torchscript_onnx_qnn: + inference_time: 11362 + throughput: 88.01 + estimated_peak_memory_range: + min: 53248 + max: 44039432 + layer_info: + layers_on_npu: 570 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 570 + precision: uint16 + primary_compute_unit: NPU + job_id: jo5m87owp + job_status: Passed +- name: VAE-Decoder-Quantized + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-14T05:29:28.928297Z' + torchscript_onnx_qnn: + inference_time: 393878 + throughput: 2.53 + estimated_peak_memory_range: + min: 225280 + max: 11689680 + layer_info: + layers_on_npu: 409 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 409 + precision: uint16 + primary_compute_unit: NPU + job_id: joprwro95 + job_status: Passed +- name: UNet-Quantized + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-14T05:29:28.928297Z' + torchscript_onnx_qnn: + inference_time: 256698 + throughput: 3.89 + estimated_peak_memory_range: + min: 143360 + max: 12844792 + layer_info: + layers_on_npu: 5421 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 5421 + precision: uint16 + primary_compute_unit: NPU + job_id: jegnk4org + job_status: Passed +aggregated: + supported_devices: + - Samsung Galaxy S23 Ultra + supported_oses: + - Android + supported_chipsets: + - Snapdragon® 8 Gen 2 + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-14T05:29:28.928297Z' + torchscript_onnx_qnn: + inference_time: 661938 + throughput: 1.51 + estimated_peak_memory_range: + min: 225280 + max: 44039432 + precision: uint16 + primary_compute_unit: NPU + job_id: "" + job_status: Passed diff --git a/qai_hub_models/models/stable_diffusion_quantized/requirements.txt b/qai_hub_models/models/stable_diffusion_quantized/requirements.txt new file mode 100644 index 00000000..e21d8196 --- /dev/null +++ b/qai_hub_models/models/stable_diffusion_quantized/requirements.txt @@ -0,0 +1,2 @@ +transformers==4.31.0 +diffusers[torch]==0.21.4 diff --git a/qai_hub_models/models/stable_diffusion_quantized/test.py b/qai_hub_models/models/stable_diffusion_quantized/test.py new file mode 100644 index 00000000..b1c0b2b0 --- /dev/null +++ b/qai_hub_models/models/stable_diffusion_quantized/test.py @@ -0,0 +1,39 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import tempfile + +import pytest + +from qai_hub_models.models.stable_diffusion_quantized.demo import main as demo_main +from qai_hub_models.models.stable_diffusion_quantized.export import export_model + + +@pytest.mark.skip("#105 move slow_cloud and slow tests to nightly.") +@pytest.mark.slow_cloud +def test_export(): + with tempfile.TemporaryDirectory() as tmpdir: + exported_jobs = export_model( + # Testing text_encoder as it's smallest model in + # Stable-Diffusion pipeline + components=["text_encoder"], + skip_inferencing=True, + skip_downloading=True, + skip_summary=True, + output_dir=tmpdir, + ) + + # NOTE: Not waiting for job to finish + # as it will slow CI down. + # Rather, we should create waiting test and move to nightly. + for jobs in exported_jobs.values(): + profile_job, inference_job = jobs[0], jobs[1] + assert profile_job is not None + assert inference_job is None + + +@pytest.mark.skip("#105 move slow_cloud and slow tests to nightly.") +@pytest.mark.slow_cloud +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/stylegan2/README.md b/qai_hub_models/models/stylegan2/README.md new file mode 100644 index 00000000..25bdb7e1 --- /dev/null +++ b/qai_hub_models/models/stylegan2/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [StyleGAN2: Generate realistic, randomized images of real classes](https://aihub.qualcomm.com/models/stylegan2) + +StyleGAN2 is a machine learning model that generates realistic images from random input state vectors. + +This is based on the implementation of StyleGAN2 found +[here](https://github.com/NVlabs/stylegan3). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/stylegan2). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[stylegan2]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.stylegan2.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.stylegan2.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of StyleGAN2 can be found + [here](https://github.com/NVlabs/stylegan3/blob/main/LICENSE.txt). + + +## References +* [Analyzing and Improving the Image Quality of StyleGAN](http://arxiv.org/abs/1912.04958) +* [Source Model Implementation](https://github.com/NVlabs/stylegan3) diff --git a/qai_hub_models/models/stylegan2/__init__.py b/qai_hub_models/models/stylegan2/__init__.py new file mode 100644 index 00000000..27a48c54 --- /dev/null +++ b/qai_hub_models/models/stylegan2/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import StyleGAN2App as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import StyleGAN2 as Model # noqa: F401 diff --git a/qai_hub_models/models/stylegan2/app.py b/qai_hub_models/models/stylegan2/app.py new file mode 100644 index 00000000..0f34df19 --- /dev/null +++ b/qai_hub_models/models/stylegan2/app.py @@ -0,0 +1,112 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, List + +import numpy as np +import torch +from PIL import Image + +from qai_hub_models.models.stylegan2.model import StyleGAN2 + + +class StyleGAN2App: + def __init__( + self, + model: Callable[[torch.Tensor, torch.Tensor | None], torch.Tensor] + | Callable[[torch.Tensor], torch.Tensor], + output_dims: int = 512, + num_classes: int = 0, + ): + self.model = model + self.output_dims = output_dims + self.num_classes = num_classes + + def generate_random_vec(self, batch_size=1, seed=None) -> torch.Tensor: + if isinstance(self.model, StyleGAN2): + input_spec = self.model.get_input_spec(batch_size) + return torch.from_numpy( + self.model.sample_inputs(input_spec, seed=seed)["image_noise"][0] + ) + return torch.from_numpy( + np.random.RandomState(seed).randn(batch_size, self.output_dims) + ) + + def predict(self, *args, **kwargs): + # See generate_images. + return self.generate_images(*args, **kwargs) + + def generate_images( + self, + image_noise: torch.Tensor | None = None, + class_idx: torch.Tensor | None = None, + raw_output: bool = False, + ) -> torch.Tensor | List[Image.Image]: + """ + Generate an image. + + Inputs: + image_noise: torch.Tensor | None + Random state vector from which images should be generated. + Shape: [N, self.output_dims] + + class_idx: int | torch.tensor | None + Class index[es] to generate. If the model was not trained on more than 1 + class, this is unused. + + If an integer, generate all batches with the class index defined by the integer. + + If a tensor, provide tensor of either shape: + [N, self.num_classes]. + If a value of class_idx[b, n] is 1, that class will be generated. + A maximum of 1 class can be set to 1 per batch. + [N] + Each element is a class index. + Generate one batch for each provided class index. + + raw_output: + If true, returns a tensor of N generated RGB images. It has shape [N, 3, self.output_dims, self.output_dims]. + Otherwise, returns List[PIL.Image] + + Returns: + See raw_output parameter description. + """ + with torch.no_grad(): + if image_noise is None: + image_noise = self.generate_random_vec( + batch_size=class_idx.shape[0] if class_idx is not None else 1 + ) + + if self.num_classes != 0: + if isinstance(class_idx, int): + class_idx = torch.Tensor([class_idx] * image_noise.shape[0]) + + if isinstance(class_idx, torch.Tensor) and len(class_idx.shape) == 1: + # Convert from [N] class index to one-hot [N, # of classes] + assert class_idx.dtype == torch.int + model_classes = torch.nn.functional.one_hot( + class_idx, self.num_classes + ) + else: + model_classes = class_idx + + image_tensor = self.model(image_noise, model_classes) + else: + image_tensor = self.model(image_noise) + + image_tensor = ( + (image_tensor.permute(0, 2, 3, 1) * 127.5 + 128) + .clamp(0, 255) + .to(torch.uint8) + ) + + if raw_output: + return image_tensor + + image_list = [] + for image_tensor in image_tensor: + image_list.append(Image.fromarray(image_tensor.numpy(), "RGB")) + return image_list diff --git a/qai_hub_models/models/stylegan2/demo.py b/qai_hub_models/models/stylegan2/demo.py new file mode 100644 index 00000000..62560946 --- /dev/null +++ b/qai_hub_models/models/stylegan2/demo.py @@ -0,0 +1,76 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import torch + +from qai_hub_models.models.stylegan2.app import StyleGAN2App +from qai_hub_models.models.stylegan2.model import StyleGAN2 +from qai_hub_models.utils.args import ( + add_output_dir_arg, + get_model_cli_parser, + model_from_cli_args, +) +from qai_hub_models.utils.display import display_or_save_image + + +def main(is_test: bool = False): + parser = get_model_cli_parser(StyleGAN2) + parser.add_argument( + "--seed", + type=int, + default=None, + help="Random seed to use for image generation.", + ) + parser.add_argument( + "--num-images", + type=int, + default=1, + help="Number of images to generate (all computed in 1 inference call).", + ) + add_output_dir_arg(parser) + parser.add_argument( + "--classes", + type=int, + nargs="*", + default=None, + help="Class[es] to use for image generation (if applicable).", + ) + args = parser.parse_args([] if is_test else None) + + # Create model and app + model = model_from_cli_args(StyleGAN2, args) + assert isinstance(model, StyleGAN2) + app = StyleGAN2App(model, model.output_size, model.num_classes) + + # Verify model input args + if model.num_classes == 0 and args.classes: + raise ValueError( + "Classes cannot be provided for models trained without classes." + ) + if args.classes and len(args.classes) > 1 and len(args.classes) != args.num_images: + raise ValueError( + "You may provide 1 class for all images, or one class per image." + ) + if not args.classes and model.num_classes: + args.classes = [0] # Default to class 0 + + # Get desired batch size + batch_size = len(args.classes) if args.classes else args.num_images + + # Generate input and run inference + z = app.generate_random_vec(batch_size=batch_size, seed=args.seed) + images = app.generate_images( + z, + class_idx=torch.Tensor(args.classes).type(torch.int) if args.classes else None, + ) + + # Display images + assert isinstance(images, list) + if not is_test: + for (i, image) in enumerate(images): + display_or_save_image(image, args.output_dir, f"image_{i}.png") + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/stylegan2/export.py b/qai_hub_models/models/stylegan2/export.py new file mode 100644 index 00000000..e08ad817 --- /dev/null +++ b/qai_hub_models/models/stylegan2/export.py @@ -0,0 +1,186 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.stylegan2 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "stylegan2" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "stylegan2", + "StyleGAN2", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_output output_0" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=sample_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/stylegan2/info.yaml b/qai_hub_models/models/stylegan2/info.yaml new file mode 100644 index 00000000..4e624753 --- /dev/null +++ b/qai_hub_models/models/stylegan2/info.yaml @@ -0,0 +1,32 @@ +name: StyleGAN2 +# id must match with the model dir name in qai_hub_models +id: stylegan2 +status: public +headline: Generate realistic, randomized images of real classes. +domain: Computer Vision +description: StyleGAN2 is a machine learning model that generates realistic images + from random input state vectors. +use_case: Image Generation +tags: + - real-time + - generative-ai +research_paper: http://arxiv.org/abs/1912.04958 +research_paper_title: Analyzing and Improving the Image Quality of StyleGAN +license: https://github.com/NVlabs/stylegan3/blob/main/LICENSE.txt +source_repo: https://github.com/NVlabs/stylegan3 +technical_details: + Model checkpoint: StyleGAN2 (afhqcat dataset) + Input resolution: 1x512 + Number of parameters: 94.6M + Model size: 361 MB +applicable_scenarios: [] +related_models: [] +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: other +dataset: [] diff --git a/qai_hub_models/models/stylegan2/model.py b/qai_hub_models/models/stylegan2/model.py new file mode 100644 index 00000000..13d59c56 --- /dev/null +++ b/qai_hub_models/models/stylegan2/model.py @@ -0,0 +1,209 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Any, Callable, Dict, List + +import numpy as np +import torch + +from qai_hub_models.utils.asset_loaders import SourceAsRoot +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +STYLEGAN2_SOURCE_REPOSITORY = "https://github.com/NVlabs/stylegan3" +STYLEGAN2_SOURCE_REPO_COMMIT = "c233a919a6faee6e36a316ddd4eddababad1adf9" +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_WEIGHTS = ( + "https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/afhqcat.pkl" +) + + +class StyleGAN2(BaseModel): + """Exportable StyleGAN2 image generator.""" + + def __init__( + self, + generator: torch.nn.Module, + noise_mode="const", + ) -> None: + """ + Create a StyleGAN2 model + + Parameters: + generator: + Generator object loaded from the StyleGAN repositoru. + noise_mode: + Controls noise model introduces into the input. + Options: 'const', 'random', 'none' + """ + super().__init__() + self.generator = generator + self.output_size: int = self.generator.z_dim # type: ignore + self.num_classes: int = self.generator.c_dim # type: ignore + self.noise_mode = noise_mode + assert noise_mode in ["const", "random", "none"] + + @staticmethod + def from_pretrained(model_url_or_path: str = DEFAULT_WEIGHTS): + """Load StyleGAN2 from a pickled styleGAN2 file.""" + return StyleGAN2(_load_stylegan2_source_model_from_weights(model_url_or_path)) + + def forward(self, image_noise: torch.Tensor, classes: torch.Tensor | None = None): + """ + Generate an image. + + Parameters: + image_noise: torch.Tensor | None + Random state vector from which images should be generated. + Shape: [ N, self.output_size ] + + classes: torch.tensor + Tensor of shape [N, self.num_classes]. + If a value of class_idx[b, n] is 1, that class will be generated. + A maximum of 1 class can be set to 1 per batch. + + Returns: + A tensor of N generated RGB images. It has shape [N, self.output_size, self.output_size, 3]. + """ + if classes is None: + classes = torch.zeros((image_noise.shape[0], self.num_classes)) + if self.num_classes != 0: + classes[:, 0] = 1 # Select first class as default + + return self.generator( + image_noise, + classes, + truncation_psi=1, + noise_mode=self.noise_mode, + force_fp32=True, + ) + + def get_input_spec(self, batch_size: int = 1) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit a profiling job on Qualcomm AI Hub. + """ + inputs = {"image_noise": ((batch_size, self.output_size), "float32")} + if self.num_classes != 0: + inputs["classes"] = ((batch_size, self.num_classes), "float32") + return inputs # type: ignore + + def sample_inputs( + self, input_spec: InputSpec | None = None, seed=None + ) -> Dict[str, List[np.ndarray]]: + if not input_spec: + input_spec = self.get_input_spec() + + inputs = { + "image_noise": [ + np.random.RandomState(seed) + .randn(*input_spec["image_noise"][0]) + .astype(np.float32) + ] + } + if "classes" in input_spec: + classes = np.zeros(input_spec["classes"][0]).astype(np.float32) + if input_spec["classes"][0][1] != 0: + classes[:, 0] = 1 # Select first class as default + inputs["classes"] = [classes] + + return inputs + + +def _get_qaihm_upfirdn2d_ref(misc: Any, conv2d_gradfix: Callable, upfirdn2d: Any): + """ + Get patched upfirdn2d function implementation that is export compatible. + This replaces an implementation provided by the stylegan3 repository. + Params are imports from the stylegan3 repository (see _load_stylegan2_source_model_from_weights). + """ + + @misc.profiled_function + def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1): + """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.""" + # Validate arguments. + assert isinstance(x, torch.Tensor) and x.ndim == 4 + if f is None: + f = torch.ones([1, 1], dtype=torch.float32, device=x.device) + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + assert f.dtype == torch.float32 and not f.requires_grad + batch_size, num_channels, in_height, in_width = x.shape + upx, upy = upfirdn2d._parse_scaling(up) + downx, downy = upfirdn2d._parse_scaling(down) + padx0, padx1, pady0, pady1 = upfirdn2d._parse_padding(padding) + + # Upsample by inserting zeros. + + # ===== Local change start ===== + # Avoid rank 6. + # x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1]) + x = x.reshape([batch_size * num_channels, in_height, 1, in_width, 1]) + # ===== Local change end ===== + + x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1]) + x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx]) + + # Pad or crop. + x = torch.nn.functional.pad( + x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)] + ) + x = x[ + :, + :, + max(-pady0, 0) : x.shape[2] - max(-pady1, 0), + max(-padx0, 0) : x.shape[3] - max(-padx1, 0), + ] + + # Setup filter. + f = f * (gain ** (f.ndim / 2)) + f = f.to(x.dtype) + if not flip_filter: + f = f.flip(list(range(f.ndim))) + + # Convolve with the filter. + f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim) + if f.ndim == 4: + x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels) + else: + x = conv2d_gradfix.conv2d( + input=x, weight=f.unsqueeze(2), groups=num_channels + ) + x = conv2d_gradfix.conv2d( + input=x, weight=f.unsqueeze(3), groups=num_channels + ) + + # Downsample by throwing away pixels. + x = x[:, :, ::downy, ::downx] + return x + + return _upfirdn2d_ref + + +def _load_stylegan2_source_model_from_weights( + model_url_or_path: str, +) -> torch.nn.Module: + # Load StyleGAN model from the source repository using the given weights. + with SourceAsRoot( + STYLEGAN2_SOURCE_REPOSITORY, + STYLEGAN2_SOURCE_REPO_COMMIT, + MODEL_ID, + MODEL_ASSET_VERSION, + ): + # Patch rank 6 tensor that can't be exported + from torch_utils import misc + from torch_utils.ops import conv2d_gradfix, upfirdn2d + + upfirdn2d._upfirdn2d_ref = _get_qaihm_upfirdn2d_ref( + misc, conv2d_gradfix, upfirdn2d + ) + + # Load model + import dnnlib + import legacy + + with dnnlib.util.open_url(model_url_or_path) as f: + # Get generator + return legacy.load_network_pkl(f)["G_ema"] diff --git a/qai_hub_models/models/stylegan2/perf.yaml b/qai_hub_models/models/stylegan2/perf.yaml new file mode 100644 index 00000000..c762bf13 --- /dev/null +++ b/qai_hub_models/models/stylegan2/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: StyleGAN2 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1280066.0 + throughput: 0.7812097188738706 + estimated_peak_memory_range: + min: 1790029824 + max: 2607953504 + primary_compute_unit: CPU + precision: fp32 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 89 + layers_on_cpu: 462 + total_layers: 551 + job_id: jz57elvqp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:31:56.125164Z' diff --git a/qai_hub_models/models/stylegan2/requirements.txt b/qai_hub_models/models/stylegan2/requirements.txt new file mode 100644 index 00000000..b8261cd7 --- /dev/null +++ b/qai_hub_models/models/stylegan2/requirements.txt @@ -0,0 +1 @@ +click>=8.0 diff --git a/qai_hub_models/models/stylegan2/test.py b/qai_hub_models/models/stylegan2/test.py new file mode 100644 index 00000000..df1f75c6 --- /dev/null +++ b/qai_hub_models/models/stylegan2/test.py @@ -0,0 +1,70 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import torch + +from qai_hub_models.models.stylegan2.app import StyleGAN2App +from qai_hub_models.models.stylegan2.demo import main as demo_main +from qai_hub_models.models.stylegan2.model import ( + DEFAULT_WEIGHTS, + MODEL_ASSET_VERSION, + MODEL_ID, + StyleGAN2, + _load_stylegan2_source_model_from_weights, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import assert_most_close, skip_clone_repo_check + +SAMPLE_GENERATOR_RANDOM_SEED = 1000 +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "output_sample_image.png" +) + + +@skip_clone_repo_check +def test_task(): + source_model = _load_stylegan2_source_model_from_weights(DEFAULT_WEIGHTS) + qaihm_model = StyleGAN2.from_pretrained(DEFAULT_WEIGHTS) + z = StyleGAN2App(qaihm_model).generate_random_vec(seed=SAMPLE_GENERATOR_RANDOM_SEED) + + with torch.no_grad(): + assert_most_close( + source_model(z, [[]], noise_mode="const", force_fp32=True), + qaihm_model(z), + 0.005, + ) + + +@skip_clone_repo_check +def test_stylegan2_app(): + app = StyleGAN2App(StyleGAN2.from_pretrained()) + + # App generates expected image + z = app.generate_random_vec(seed=SAMPLE_GENERATOR_RANDOM_SEED) + expected = np.asarray(load_image(OUTPUT_IMAGE_ADDRESS).convert("RGB")) + output = np.asarray(app.generate_images(z, raw_output=True)) + assert_most_close(output, expected, 0.005) + + # App can generate multiple images + output_images = app.generate_images(class_idx=torch.Tensor([1, 2]).type(torch.int)) + assert len(output_images) == 2 + + +@skip_clone_repo_check +def test_stylegan2_trace(): + app = StyleGAN2App(StyleGAN2.from_pretrained().convert_to_torchscript()) + + # App generates expected image + z = app.generate_random_vec(seed=SAMPLE_GENERATOR_RANDOM_SEED) + expected = np.asarray(load_image(OUTPUT_IMAGE_ADDRESS).convert("RGB")) + output = np.asarray(app.generate_images(z, raw_output=True))[0] + + assert_most_close(output, expected, 0.005) + + +@skip_clone_repo_check +def test_stylegan2_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/swin_base/README.md b/qai_hub_models/models/swin_base/README.md new file mode 100644 index 00000000..6d48fce0 --- /dev/null +++ b/qai_hub_models/models/swin_base/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Swin-Base: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/swin_base) + +SwinBase is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of Swin-Base found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/swin_base). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.swin_base.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.swin_base.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Swin-Base can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py) diff --git a/qai_hub_models/models/swin_base/__init__.py b/qai_hub_models/models/swin_base/__init__.py new file mode 100644 index 00000000..349bff17 --- /dev/null +++ b/qai_hub_models/models/swin_base/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import SwinBase as Model # noqa: F401 diff --git a/qai_hub_models/models/swin_base/demo.py b/qai_hub_models/models/swin_base/demo.py new file mode 100644 index 00000000..bf03b593 --- /dev/null +++ b/qai_hub_models/models/swin_base/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.swin_base.model import SwinBase + + +def main(is_test: bool = False): + imagenet_demo(SwinBase, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/swin_base/export.py b/qai_hub_models/models/swin_base/export.py new file mode 100644 index 00000000..58416857 --- /dev/null +++ b/qai_hub_models/models/swin_base/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.swin_base import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "swin_base" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "swin_base", + "Swin-Base", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/swin_base/info.yaml b/qai_hub_models/models/swin_base/info.yaml new file mode 100644 index 00000000..15d83d1c --- /dev/null +++ b/qai_hub_models/models/swin_base/info.yaml @@ -0,0 +1,44 @@ +name: Swin-Base +# id must match with the model dir name in qai_hub_models +id: swin_base +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: SwinBase is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +use_case: Image Classification +tags: + - backbone +research_paper: https://arxiv.org/abs/2103.14030 +research_paper_title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted + Windows' +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: + https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 88.8M + Model size: 339 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - swin_tiny + - swin_small + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/swin_base/model.py b/qai_hub_models/models/swin_base/model.py new file mode 100644 index 00000000..b4ba8831 --- /dev/null +++ b/qai_hub_models/models/swin_base/model.py @@ -0,0 +1,32 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +import torchvision.models as tv_models +from torchvision.models.swin_transformer import PatchMerging, ShiftedWindowAttention + +from qai_hub_models.models._shared.common import replace_module_recursively +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier +from qai_hub_models.models._shared.swin.swin_transformer import ( + AutoSplitLinear, + ShiftedWindowAttentionInf, +) + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class SwinBase(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.swin_b(weights=weights) + replace_module_recursively( + net, ShiftedWindowAttention, ShiftedWindowAttentionInf + ) + replace_module_recursively( + net, torch.nn.Linear, AutoSplitLinear, parent_module=PatchMerging + ) + return cls(net.eval()) diff --git a/qai_hub_models/models/swin_base/perf.yaml b/qai_hub_models/models/swin_base/perf.yaml new file mode 100644 index 00000000..c27cc0d3 --- /dev/null +++ b/qai_hub_models/models/swin_base/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Swin-Base + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 76852.0 + throughput: 13.012023109353041 + estimated_peak_memory_range: + min: 12288 + max: 367871696 + primary_compute_unit: GPU + precision: fp16 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 2006 + layers_on_cpu: 0 + total_layers: 2006 + job_id: jw568zrvg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:09:41.513292Z' diff --git a/qai_hub_models/models/swin_base/test.py b/qai_hub_models/models/swin_base/test.py new file mode 100644 index 00000000..3f302ff1 --- /dev/null +++ b/qai_hub_models/models/swin_base/test.py @@ -0,0 +1,39 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( # noqa: F401 + imagenet_sample_torch, + run_imagenet_classifier_test, +) +from qai_hub_models.models.swin_base.demo import main as demo_main +from qai_hub_models.models.swin_base.model import MODEL_ID, SwinBase + + +def test_task(imagenet_sample_torch): + # Ensure that the optimized SwinBase matches the original one numerically + x = imagenet_sample_torch + model_opt = SwinBase.from_pretrained().eval() + model_orig = tv_models.swin_b(weights="IMAGENET1K_V1").eval() + np.testing.assert_allclose( + model_opt(x).detach().numpy(), + model_orig(x).detach().numpy(), + atol=1e-5, + rtol=1e-3, + ) + + +def test_task(): + run_imagenet_classifier_test( + SwinBase.from_pretrained(), + MODEL_ID, + probability_threshold=0.53, + asset_version=1, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/swin_small/README.md b/qai_hub_models/models/swin_small/README.md new file mode 100644 index 00000000..de3c2ec5 --- /dev/null +++ b/qai_hub_models/models/swin_small/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Swin-Small: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/swin_small) + +SwinSmall is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of Swin-Small found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/swin_small). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.swin_small.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.swin_small.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Swin-Small can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py) diff --git a/qai_hub_models/models/swin_small/__init__.py b/qai_hub_models/models/swin_small/__init__.py new file mode 100644 index 00000000..7e8edef6 --- /dev/null +++ b/qai_hub_models/models/swin_small/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import SwinSmall as Model # noqa: F401 diff --git a/qai_hub_models/models/swin_small/demo.py b/qai_hub_models/models/swin_small/demo.py new file mode 100644 index 00000000..dd78cca1 --- /dev/null +++ b/qai_hub_models/models/swin_small/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.swin_small.model import SwinSmall + + +def main(is_test: bool = False): + imagenet_demo(SwinSmall, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/swin_small/export.py b/qai_hub_models/models/swin_small/export.py new file mode 100644 index 00000000..dabc8746 --- /dev/null +++ b/qai_hub_models/models/swin_small/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.swin_small import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "swin_small" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "swin_small", + "Swin-Small", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/swin_small/info.yaml b/qai_hub_models/models/swin_small/info.yaml new file mode 100644 index 00000000..b783fb0c --- /dev/null +++ b/qai_hub_models/models/swin_small/info.yaml @@ -0,0 +1,43 @@ +name: Swin-Small +# id must match with the model dir name in qai_hub_models +id: swin_small +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: SwinSmall is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +use_case: Image Classification +tags: + - backbone +research_paper: https://arxiv.org/abs/2103.14030 +research_paper_title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted + Windows' +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: + https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 50.4M + Model size: 193 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - swin_tiny + - swin_base + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/swin_small/model.py b/qai_hub_models/models/swin_small/model.py new file mode 100644 index 00000000..081d731f --- /dev/null +++ b/qai_hub_models/models/swin_small/model.py @@ -0,0 +1,32 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +import torchvision.models as tv_models +from torchvision.models.swin_transformer import PatchMerging, ShiftedWindowAttention + +from qai_hub_models.models._shared.common import replace_module_recursively +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier +from qai_hub_models.models._shared.swin.swin_transformer import ( + AutoSplitLinear, + ShiftedWindowAttentionInf, +) + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class SwinSmall(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.swin_s(weights=weights) + replace_module_recursively( + net, ShiftedWindowAttention, ShiftedWindowAttentionInf + ) + replace_module_recursively( + net, torch.nn.Linear, AutoSplitLinear, parent_module=PatchMerging + ) + return cls(net.eval()) diff --git a/qai_hub_models/models/swin_small/perf.yaml b/qai_hub_models/models/swin_small/perf.yaml new file mode 100644 index 00000000..d481ee20 --- /dev/null +++ b/qai_hub_models/models/swin_small/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Swin-Small + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 52492.0 + throughput: 19.05052198430237 + estimated_peak_memory_range: + min: 12288 + max: 222000632 + primary_compute_unit: GPU + precision: fp16 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 1965 + layers_on_cpu: 0 + total_layers: 1965 + job_id: jlpe7wl05 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:30:42.368348Z' diff --git a/qai_hub_models/models/swin_small/test.py b/qai_hub_models/models/swin_small/test.py new file mode 100644 index 00000000..3ce1e0ea --- /dev/null +++ b/qai_hub_models/models/swin_small/test.py @@ -0,0 +1,39 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( # noqa: F401 + imagenet_sample_torch, + run_imagenet_classifier_test, +) +from qai_hub_models.models.swin_small.demo import main as demo_main +from qai_hub_models.models.swin_small.model import MODEL_ID, SwinSmall + + +def test_task(imagenet_sample_torch): + # Ensure that the optimized SwinSmall matches the original one numerically + x = imagenet_sample_torch + model_opt = SwinSmall.from_pretrained().eval() + model_orig = tv_models.swin_s(weights="IMAGENET1K_V1").eval() + np.testing.assert_allclose( + model_opt(x).detach().numpy(), + model_orig(x).detach().numpy(), + atol=1e-5, + rtol=1e-3, + ) + + +def test_task(): + run_imagenet_classifier_test( + SwinSmall.from_pretrained(), + MODEL_ID, + probability_threshold=0.53, + asset_version=1, + ) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/swin_tiny/README.md b/qai_hub_models/models/swin_tiny/README.md new file mode 100644 index 00000000..407b8fd2 --- /dev/null +++ b/qai_hub_models/models/swin_tiny/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Swin-Tiny: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/swin_tiny) + +SwinTiny is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of Swin-Tiny found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/swin_tiny). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.swin_tiny.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.swin_tiny.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Swin-Tiny can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py) diff --git a/qai_hub_models/models/swin_tiny/__init__.py b/qai_hub_models/models/swin_tiny/__init__.py new file mode 100644 index 00000000..d063ef8c --- /dev/null +++ b/qai_hub_models/models/swin_tiny/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import SwinTiny as Model # noqa: F401 diff --git a/qai_hub_models/models/swin_tiny/demo.py b/qai_hub_models/models/swin_tiny/demo.py new file mode 100644 index 00000000..cd8aac95 --- /dev/null +++ b/qai_hub_models/models/swin_tiny/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.swin_tiny.model import SwinTiny + + +def main(is_test: bool = False): + imagenet_demo(SwinTiny, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/swin_tiny/export.py b/qai_hub_models/models/swin_tiny/export.py new file mode 100644 index 00000000..bc2550e3 --- /dev/null +++ b/qai_hub_models/models/swin_tiny/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.swin_tiny import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "swin_tiny" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "swin_tiny", + "Swin-Tiny", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options + " --compute_unit gpu", + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/swin_tiny/info.yaml b/qai_hub_models/models/swin_tiny/info.yaml new file mode 100644 index 00000000..ee0345a8 --- /dev/null +++ b/qai_hub_models/models/swin_tiny/info.yaml @@ -0,0 +1,43 @@ +name: Swin-Tiny +# id must match with the model dir name in qai_hub_models +id: swin_tiny +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: SwinTiny is a machine learning model that can classify images from the + Imagenet dataset. It can also be used as a backbone in building more complex models + for specific use cases. +use_case: Image Classification +tags: + - backbone +research_paper: https://arxiv.org/abs/2103.14030 +research_paper_title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted + Windows' +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: + https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 28.8M + Model size: 110 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - swin_small + - swin_base + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/swin_tiny/model.py b/qai_hub_models/models/swin_tiny/model.py new file mode 100644 index 00000000..2c32e595 --- /dev/null +++ b/qai_hub_models/models/swin_tiny/model.py @@ -0,0 +1,32 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +import torchvision.models as tv_models +from torchvision.models.swin_transformer import PatchMerging, ShiftedWindowAttention + +from qai_hub_models.models._shared.common import replace_module_recursively +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier +from qai_hub_models.models._shared.swin.swin_transformer import ( + AutoSplitLinear, + ShiftedWindowAttentionInf, +) + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class SwinTiny(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.swin_t(weights=weights) + replace_module_recursively( + net, ShiftedWindowAttention, ShiftedWindowAttentionInf + ) + replace_module_recursively( + net, torch.nn.Linear, AutoSplitLinear, parent_module=PatchMerging + ) + return cls(net.eval()) diff --git a/qai_hub_models/models/swin_tiny/perf.yaml b/qai_hub_models/models/swin_tiny/perf.yaml new file mode 100644 index 00000000..7603ecf3 --- /dev/null +++ b/qai_hub_models/models/swin_tiny/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Swin-Tiny + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 29469.0 + throughput: 33.93396450507313 + estimated_peak_memory_range: + min: 0 + max: 193113472 + primary_compute_unit: GPU + precision: fp16 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 1059 + layers_on_cpu: 0 + total_layers: 1059 + job_id: jqpyojx45 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:18:27.047126Z' diff --git a/qai_hub_models/models/swin_tiny/test.py b/qai_hub_models/models/swin_tiny/test.py new file mode 100644 index 00000000..574e7c1e --- /dev/null +++ b/qai_hub_models/models/swin_tiny/test.py @@ -0,0 +1,42 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( # noqa: F401 + imagenet_sample_torch, + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.swin_tiny.demo import main as demo_main +from qai_hub_models.models.swin_tiny.model import MODEL_ID, SwinTiny + + +def test_task(imagenet_sample_torch): + # Ensure that the optimized SwinTiny matches the original one numerically + x = imagenet_sample_torch + model_opt = SwinTiny.from_pretrained().eval() + model_orig = tv_models.swin_t(weights="IMAGENET1K_V1").eval() + np.testing.assert_allclose( + model_opt(x).detach().numpy(), + model_orig(x).detach().numpy(), + atol=1e-5, + rtol=1e-3, + ) + + +def test_task(): + run_imagenet_classifier_test( + SwinTiny.from_pretrained(), MODEL_ID, probability_threshold=0.53 + ) + + +def test_trace(): + run_imagenet_classifier_trace_test(SwinTiny.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/trocr/README.md b/qai_hub_models/models/trocr/README.md new file mode 100644 index 00000000..8c87e6a4 --- /dev/null +++ b/qai_hub_models/models/trocr/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [TrOCR: Transformer based model for state-of-the-art optical character recognition (OCR) on both printed and handwritten text](https://aihub.qualcomm.com/models/trocr) + +End-to-end text recognition approach with pre-trained image transformer and text transformer models for both image understanding and wordpiece-level text generation. + +This is based on the implementation of TrOCR found +[here](https://huggingface.co/microsoft/trocr-small-stage1). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/trocr). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[trocr]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.trocr.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.trocr.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of TrOCR can be found + [here](https://github.com/microsoft/unilm/blob/master/LICENSE). + + +## References +* [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) +* [Source Model Implementation](https://huggingface.co/microsoft/trocr-small-stage1) diff --git a/qai_hub_models/models/trocr/__init__.py b/qai_hub_models/models/trocr/__init__.py new file mode 100644 index 00000000..e74d4060 --- /dev/null +++ b/qai_hub_models/models/trocr/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import TrOCRApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import TrOCR as Model # noqa: F401 diff --git a/qai_hub_models/models/trocr/app.py b/qai_hub_models/models/trocr/app.py new file mode 100644 index 00000000..4aa88b74 --- /dev/null +++ b/qai_hub_models/models/trocr/app.py @@ -0,0 +1,244 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Generator, List + +import torch +from PIL.Image import Image + +from qai_hub_models.models.trocr.model import KVCache, TrOCR + + +class TrOCRApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference with TrOCR. + + The app uses 2 models: + * encoder (modified to return cross attention key-value) + * decoder + + For a given image input, the app will: + * use the io_processor to pre-process the image (reshape & normalize) + * call the encoder once + * run the decoder in a loop until the "end of sentence" token is predicted, + or the max sequence length (defined by the source model config) is reached + * map the output tokens to a string via `io_processor`. + """ + + def __init__(self, model: TrOCR): + self.encoder = model.encoder + self.decoder = model.decoder + self.io_processor = model.io_processor + + self.pad_token_id = model.pad_token_id + self.eos_token_id = model.eos_token_id + self.start_token_id = model.start_token_id + self.max_seq_len = model.max_seq_len + + def preprocess_image(self, image: Image) -> torch.Tensor: + """Convert a raw image (resize, normalize) into a pyTorch tensor that can be used as input to TrOCR inference. + This also converts the image to RGB, which is the expected input channel layout for TrOCR. + + For more information on preprocessing, see https://huggingface.co/docs/transformers/preprocessing.""" + assert ( + self.io_processor is not None + ), "TrOCR processor most be provided to use type Image as an input." + return self.io_processor(image.convert("RGB"), return_tensors="pt").pixel_values + + def predict(self, *args, **kwargs): + # See predict_text_from_image. + return self.predict_text_from_image(*args, **kwargs) + + def predict_text_from_image( + self, pixel_values_or_image: torch.Tensor | Image, raw_output: bool = False + ) -> torch.Tensor | List[str]: + """ + From the provided image or tensor, predict the line of text contained within. + + Parameters: + pixel_values_or_image: torch.Tensor + Input PIL image (before pre-processing) or pyTorch tensor (after image pre-processing). + raw_output: bool + If false, return a list of predicted strings (one for each batch). Otherwise, return a tensor of predicted token IDs. + + Returns: + The output word / token sequence (representative of the text contained in the input image). + + The prediction will be a list of strings (one string per batch) if self.io_processor != None and raw_output=False. + Otherwise, a `torch.Tensor` of shape [batch_size, predicted_sequence_length] is returned. It contains predicted token IDs. + """ + gen = self.stream_predicted_text_from_image(pixel_values_or_image, raw_output) + _ = last = next(gen) + for last in gen: + pass + return last + + def stream_predicted_text_from_image( + self, pixel_values_or_image: torch.Tensor | Image, raw_output: bool = False + ) -> Generator[torch.Tensor | List[str], None, None]: + """ + From the provided image or tensor, predict the line of text contained within. + The returned generator will produce a single output per decoder iteration. + + The generator allows the client to "stream" output from the decoder + (eg. get the prediction one word at as time as they're predicted, instead of waiting for the entire output sequence to be predicted) + + Parameters: + pixel_values_or_image: torch.Tensor + Input PIL image (before pre-processing) or pyTorch tensor (after image pre-processing). + raw_output: bool + If false, return a list of predicted strings (one for each batch). Otherwise, return a tensor of predicted token IDs. + + Returns: + A python generator for the output word / token sequence (representative of the text contained in the input image). + The generator will produce one output for every decoder iteration. + + The prediction will be a list of strings (one string per batch) if self.io_processor != None and raw_output=False. + Otherwise, a `torch.Tensor` of shape [batch_size, predicted_sequence_length] is returned. It contains predicted token IDs. + """ + if isinstance(pixel_values_or_image, Image): + pixel_values = self.preprocess_image(pixel_values_or_image) + else: + pixel_values = pixel_values_or_image + + batch_size = pixel_values.shape[0] + eos_token_id_tensor = torch.tensor([self.eos_token_id], dtype=torch.int32) + + # Run encoder + kv_cache_cross_attn = self.encoder(pixel_values) + + # Initial KV Cache + initial_attn_cache = get_empty_attn_cache( + batch_size, + self.decoder.num_decoder_layers, + self.decoder.decoder_attention_heads, + self.decoder.embeddings_per_head, + ) + initial_kv_cache = combine_kv_caches(kv_cache_cross_attn, initial_attn_cache) + kv_cache = initial_kv_cache + + # Prepare decoder input IDs. Shape: [batch_size, 1] + initial_input_ids = ( + torch.ones((batch_size, 1), dtype=torch.int32) * self.start_token_id + ) + input_ids = initial_input_ids + + # Prepare decoder output IDs. Shape: [batch_size, seq_len] + output_ids = input_ids + + # Keep track of which sequences are already finished. Shape: [batch_size] + unfinished_sequences = torch.ones(batch_size, dtype=torch.int32) + + while unfinished_sequences.max() != 0 and ( + self.max_seq_len is None or output_ids.shape[-1] < self.max_seq_len + ): + # Get next tokens. Shape: [batch_size] + outputs = self.decoder(input_ids, *kv_cache) + next_tokens = outputs[0] + kv_cache_attn = outputs[1:] + + # Finished sentences should have padding token appended instead of the prediction. + next_tokens = next_tokens * unfinished_sequences + self.pad_token_id * ( + 1 - unfinished_sequences + ) + + input_ids = torch.unsqueeze(next_tokens, -1) + output_ids = torch.cat([output_ids, input_ids], dim=-1) + yield self.io_processor.batch_decode( + output_ids, skip_special_tokens=True + ) if self.io_processor and not raw_output else output_ids + + # if eos_token was found in one sentence, set sentence to finished + if eos_token_id_tensor is not None: + unfinished_sequences = unfinished_sequences.mul( + torch.unsqueeze(next_tokens, -1) + .ne(eos_token_id_tensor.unsqueeze(1)) + .prod(dim=0) + .type(torch.int32) + ) + + # Re-construct kv cache with new sequence. + kv_cache = combine_kv_caches(kv_cache_cross_attn, kv_cache_attn) + + +def combine_kv_caches( + kv_cache_cross_attn: KVCache, + kv_cache_attn: KVCache, +) -> KVCache: + """ + Generates full KV Cache from cross attention KV cache and attention KV cache. + + Parameters: + kv_cache_cross_attn: Tuple[kv_cache_cross_attn_0_key, kv_cache_cross_attn_0_val, cv_cache_cross_attn_1_key, ...] + Cross attn KV cache generated by CrossAttnKVGenerator. + len(tuple) == 2 * number of source model decoder layers. + + kv_cache_attn: Tuple[kv_cache_attn_0_key, kv_cache_attn_0_val cv_cache_attn_1_key, ...] + Attn generated by the decoder, or None (generate empty cache) if the decoder has not run yet. + len(tuple) == 2 * number of source model decoder layers. + + Returns: + kv_cache: Tuple[kv_cache_attn_0_key, kv_cache_attn_0_val, + kv_cache_cross_attn_0_key, kv_cache_cross_attn_0_val, + kv_cache_attn_1_key, ...] + Combined KV Cache. + len(tuple) == 4 * number of source model decoder layers. + """ + # Construct remaining kv cache with a new empty sequence. + kv_cache = [torch.Tensor()] * len(kv_cache_cross_attn) * 2 + + # Combine KV Cache. + for i in range(0, len(kv_cache_cross_attn) // 2): + kv_cache[4 * i] = kv_cache_attn[2 * i] + kv_cache[4 * i + 1] = kv_cache_attn[2 * i + 1] + kv_cache[4 * i + 2] = kv_cache_cross_attn[2 * i] + kv_cache[4 * i + 3] = kv_cache_cross_attn[2 * i + 1] + + return (*kv_cache,) + + +def get_empty_attn_cache( + batch_size: int, + num_decoder_layers: int, + decoder_attention_heads: int, + embeddings_per_head: int, +) -> KVCache: + """ + Generates empty cross attn KV Cache for use in the first iteration of the decoder. + + Parameters: + batch_size: Batch size. + num_decoder_layers: NUmber of decoder layers in the decoder. + decoder_attention_heads: Number of attention heads in the decoder. + embeddings_per_head: The count of the embeddings in each decoder attention head. + + Returns: + kv_cache: Tuple[kv_cache_attn_0_key, kv_cache_attn_0_val, kv_cache_attn_1_key, ...] + len(tuple) == 2 * number of source model decoder layers. + """ + kv_cache = [] + for i in range(0, num_decoder_layers): + kv_cache.append( + torch.zeros( + ( + batch_size, + decoder_attention_heads, + 0, + embeddings_per_head, + ) + ) + ) + kv_cache.append( + torch.zeros( + ( + batch_size, + decoder_attention_heads, + 0, + embeddings_per_head, + ) + ) + ) + return (*kv_cache,) diff --git a/qai_hub_models/models/trocr/demo.py b/qai_hub_models/models/trocr/demo.py new file mode 100644 index 00000000..eb1ad417 --- /dev/null +++ b/qai_hub_models/models/trocr/demo.py @@ -0,0 +1,57 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import time + +from qai_hub_models.models.trocr.app import TrOCRApp +from qai_hub_models.models.trocr.model import ( + HUGGINGFACE_TROCR_MODEL, + MODEL_ASSET_VERSION, + MODEL_ID, + TrOCR, +) +from qai_hub_models.utils.args import get_model_cli_parser, model_from_cli_args +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image + +HUGGINGFACE_TROCR_MODEL = "microsoft/trocr-small-stage1" +DEFAULT_SAMPLE_IMAGE = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "sample_text.jpg" +) + + +# Run TrOCR end-to-end on a sample line of handwriting. +# The demo will output the text contained within the source image. +# Text will be printed to terminal as it is generated with each decoder loop. +def main(is_test: bool = False): + # Demo parameters + parser = get_model_cli_parser(TrOCR) + parser.add_argument( + "--image", + type=str, + default=DEFAULT_SAMPLE_IMAGE, + help="image file path or URL", + ) + args = parser.parse_args([] if is_test else None) + + # Load Application + app = TrOCRApp(model_from_cli_args(TrOCR, args)) + + # Load Image + image = load_image(args.image) + + # Stream output from model + print("\n** Predicted Text **\n") + + for output in app.stream_predicted_text_from_image(image): + if is_test: + continue + print(output[0], end="\r") + # Sleep to accentuate the "streaming" affect in terminal output. + time.sleep(0.1) + + print("\n") + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/trocr/export.py b/qai_hub_models/models/trocr/export.py new file mode 100644 index 00000000..8f74baee --- /dev/null +++ b/qai_hub_models/models/trocr/export.py @@ -0,0 +1,221 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Mapping, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.trocr import Model +from qai_hub_models.utils.args import ( + export_parser, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + +ALL_COMPONENTS = ["TrOCREncoder", "TrOCRDecoder"] + + +def export_model( + device: str = "Samsung Galaxy S23", + components: Optional[List[str]] = None, + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Mapping[ + str, Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] +] | List[str]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + components: List of sub-components of the model that will be exported. + Each component is compiled and profiled separately. + Defaults to ALL_COMPONENTS if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` + + Returns: + A Mapping from component_name to a 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "trocr" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + component_arg = components + components = components or ALL_COMPONENTS + for component in components: + if component not in ALL_COMPONENTS: + raise ValueError(f"Invalid component {component}.") + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "trocr", + "TrOCR", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + component_arg, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + components_dict = {} + if "TrOCREncoder" in components: + components_dict["TrOCREncoder"] = model.encoder + if "TrOCRDecoder" in components: + components_dict["TrOCRDecoder"] = model.decoder + + compile_jobs = {} + for component_name, component in components_dict.items(): + # Trace the model + input_spec = component.get_input_spec() + source_model = torch.jit.trace(component, make_torch_inputs(input_spec)) + + # 2. Compile the models to an on-device asset + model_compile_options = component.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input pixel_values" + ) + print(f"Optimizing model {component_name} to run on-device.") + compile_jobs[component_name] = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=f"{component_name}", + options=model_compile_options, + ) + + # 3. Profile the model assets on real devices + profile_jobs = {} + if not skip_profiling: + for component_name in components: + print(f"Profiling model {component_name} on a hosted device.") + profile_jobs[component_name] = hub.submit_profile_job( + model=compile_jobs[component_name].get_target_model(), + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_jobs = {} + if not skip_inferencing: + for component_name in components: + print( + f"Running inference for {component_name} on a hosted device with example inputs." + ) + sample_inputs = components_dict[component_name].sample_inputs() + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "pixel_values", sample_inputs, target_runtime + ) + inference_jobs[component_name] = hub.submit_inference_job( + model=compile_jobs[component_name].get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 5. Download the model assets to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + for component_name, compile_job in compile_jobs.items(): + target_model = compile_job.get_target_model() + target_model.download( + str(output_path / f"{model_name}_{component_name}.tflite") + ) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + for component_name in components: + profile_job = profile_jobs[component_name] + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + for component_name in components: + inference_job = inference_jobs[component_name] + sample_inputs = components_dict[component_name].sample_inputs() + torch_out = torch_inference(components_dict[component_name], sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + return { + component_name: ( + compile_jobs[component_name], + profile_jobs.get(component_name, None), + inference_jobs.get(component_name, None), + ) + for component_name in components + } + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser( + model_cls=Model, components=ALL_COMPONENTS, supports_qnn=False + ) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/trocr/info.yaml b/qai_hub_models/models/trocr/info.yaml new file mode 100644 index 00000000..e0755c8f --- /dev/null +++ b/qai_hub_models/models/trocr/info.yaml @@ -0,0 +1,36 @@ +name: TrOCR +# id must match with the model dir name in qai_hub_models +id: trocr +status: public +headline: Transformer based model for state-of-the-art optical character recognition + (OCR) on both printed and handwritten text. +domain: Multimodal +description: End-to-end text recognition approach with pre-trained image transformer + and text transformer models for both image understanding and wordpiece-level text + generation. +use_case: Image To Text +tags: [] +research_paper: https://arxiv.org/abs/2109.10282 +research_paper_title: 'TrOCR: Transformer-based Optical Character Recognition with + Pre-trained Models' +license: https://github.com/microsoft/unilm/blob/master/LICENSE +source_repo: https://huggingface.co/microsoft/trocr-small-stage1 +technical_details: + Model checkpoint: trocr-small-stage1 + Input resolution: 320x320 + Number of parameters (TrOCREncoder): 23.0M + Model size (TrOCREncoder): 87.8 MB + Number of parameters (TrOCRDecoder): 38.3M + Model size (TrOCRDecoder): 146 MB +applicable_scenarios: + - Publishing + - Healthcare + - Document Management +form_factors: + - Phone + - Tablet +related_models: [] +has_static_banner: yes +has_animated_banner: yes +license_type: mit +dataset: [] diff --git a/qai_hub_models/models/trocr/model.py b/qai_hub_models/models/trocr/model.py new file mode 100644 index 00000000..6fc61625 --- /dev/null +++ b/qai_hub_models/models/trocr/model.py @@ -0,0 +1,257 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import copy +from typing import Callable, List, Tuple + +import numpy as np +import torch +from transformers import TrOCRProcessor, VisionEncoderDecoderModel +from transformers.models.trocr.modeling_trocr import ( + PreTrainedModel, + TrOCRAttention, + TrOCRForCausalLM, +) + +from qai_hub_models.utils.base_model import BaseModel, CollectionModel +from qai_hub_models.utils.input_spec import InputSpec + +HUGGINGFACE_TROCR_MODEL = "microsoft/trocr-small-stage1" +MODEL_ID = __name__.split(".")[-2] +TROCR_BATCH_SIZE = 1 +TROCR_EXPORT_SEQ_LEN = 1 # -1 TODO(#5428): Dynamic sequence length support. This limits the input size to a seq len of 1. +MODEL_ASSET_VERSION = 1 + +""" +Traceable modules used by TrOCRApp +""" +KVCache = Tuple[torch.Tensor, ...] # Export friendly + + +class TrOCR(CollectionModel): + def __init__( + self, + encoder: Callable[[torch.Tensor], KVCache], + decoder: Callable[..., Tuple[torch.Tensor, ...]], + io_processor: TrOCRProcessor, + pad_token_id: int, + eos_token_id: int, + start_token_id: int, + max_seq_len: int, + ): + self.encoder = encoder + self.decoder = decoder + self.io_processor = io_processor + self.pad_token_id = pad_token_id + self.eos_token_id = eos_token_id + self.start_token_id = start_token_id + self.max_seq_len = max_seq_len + + @classmethod + def from_pretrained(cls, hf_trocr_model: str = HUGGINGFACE_TROCR_MODEL) -> TrOCR: + # Load Huggingface source + source_model = VisionEncoderDecoderModel.from_pretrained( + hf_trocr_model, return_dict=False + ) + io_processor = TrOCRProcessor.from_pretrained(hf_trocr_model) + return TrOCR.from_source_model(source_model, io_processor) # type: ignore + + @staticmethod + def from_source_model( + source_model: VisionEncoderDecoderModel, io_processor: TrOCRProcessor + ) -> TrOCR: + encoder = TrOCREncoder(source_model.encoder, source_model.decoder) # type: ignore + decoder = TrOCRDecoder(source_model.decoder) # type: ignore + return TrOCR( + encoder, + decoder, + io_processor, + source_model.generation_config.pad_token_id, # type: ignore + source_model.generation_config.eos_token_id, # type: ignore + (source_model.generation_config.decoder_start_token_id or source_model.generation_config.bos_token_id), # type: ignore + source_model.generation_config.max_length, # type: ignore + ) + + +class TrOCREncoder(BaseModel): + """Vision encoder that returns the decoder's cross attn cache state.""" + + def __init__(self, encoder: PreTrainedModel, decoder: TrOCRForCausalLM): + super().__init__() + self.encoder = encoder + self.decoder = decoder + self.cross_attn_kv_shape: Callable = decoder.model.decoder.layers[0].encoder_attn._shape # type: ignore + + def forward( + self, + pixel_values: torch.FloatTensor, + ) -> KVCache: + """ + Run the encoder on `pixel_values`, and produce a cross attention key/value cache that can be used as decoder input. + + Parameters: + pixel_values: Pixel values pre-processed for encoder consumption. + + Returns: + cross_attn_kv_cache: Tuple[kv_cache_cross_attn_0_key, kv_cache_cross_attn_0_val, kv_cache_cross_attn_1_key, ...] + KV Cache for cross attention layers. + len(tuple) == 2 * number of source model decoder layers. + """ + encoder_hidden_states = self.encoder(pixel_values, return_dict=False)[0] + kv_cache = [] + batch_size = encoder_hidden_states.shape[0] + for layer in self.decoder.model.decoder.layers: + layer_attn: TrOCRAttention = layer.encoder_attn # type: ignore + key_states = self.cross_attn_kv_shape( + layer_attn.k_proj(encoder_hidden_states), -1, batch_size + ) + value_states = self.cross_attn_kv_shape( + layer_attn.v_proj(encoder_hidden_states), -1, batch_size + ) + kv_cache.append(key_states) + kv_cache.append(value_states) + + return (*kv_cache,) # convert list to tuple for export + + def get_input_spec(self) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declared + # the model input specification upon submitting a profile job. + return {"pixel_values": ((TROCR_BATCH_SIZE, 3, 384, 384), "float32")} + + @classmethod + def from_pretrained(cls): + return TrOCR.from_pretrained().encoder + + +class TrOCRDecoder(BaseModel): + """ + Wraps Vision decoder in an export-friendly interface. + + Inputs: (input_ids, KV Cache (unrolled in order generated by combine_kv_caches in app.py)) + Outputs: (output_ids, Updated Attention KV Cache) + """ + + def __init__(self, decoder: TrOCRForCausalLM): + super().__init__() + self.decoder = copy.deepcopy(decoder) + # Delete unused layers that exist only to generate initial KV cache. + self.num_decoder_layers = len(self.decoder.model.decoder.layers) + for layer in self.decoder.model.decoder.layers: + layer_attn: TrOCRAttention = layer.encoder_attn # type: ignore + layer_attn.k_proj = None # type: ignore + layer_attn.v_proj = None # type: ignore + self.max_position_embeddings: int = self.decoder.config.max_position_embeddings # type: ignore + self.decoder_attention_heads: int = decoder.config.decoder_attention_heads + self.embeddings_per_head: int = ( + decoder.config.d_model // decoder.config.decoder_attention_heads + ) + + def forward( + self, input_ids: torch.IntTensor, *kv_cache_args, **kv_cache_kwargs + ) -> Tuple[torch.Tensor, ...]: + """ + Generate the next token in the predicted output text sequence. + + Parameters: + input_ids : torch.IntTensor + Next token ID in each batch sequence (always shape (batch_size, 1)) + + kv_cache: Tuple[kv_cache_attn_0_key, kv_cache_attn_0_val, + kv_cache_cross_attn_0_key, kv_cache_cross_attn_0_val, + kv_cache_attn_1_key, ...] + Combined KV Cache generated by combine_kv_caches in app.py. + len(tuple) == 4 * number of source model decoder layers. + + Returns: + outputs : Tuple[ + predicted_ids + Next predicted token. + kv_cache_attn_0_key, kv_cache_attn_0_val, kv_cache_attn_1_key, ... + Updated KV cache for attention layers. Count == 2 * number of source model decoder layers. + ] + """ + # encoder_hidden_states is not used by the network when kv_cache is set. + # + # Unfortunately the underlying huggingface code does not allow us to + # get rid of the input entirely, because the decoder layer implementation uses its existance + # to determine if it should include cross-attention layers. + # + # Therefore, we set the hidden state to shape [1] in this case to minimize footprint. + # It will go away when traced. + encoder_hidden_states = torch.from_numpy(np.array([1])) + + # Convert KV Cache from export friendly format to decoder format + kv_cache: List[Tuple[torch.Tensor, ...]] = [] + curr_tuple: List[torch.Tensor] = [] + for arg in kv_cache_args or kv_cache_kwargs.values(): + curr_tuple.append(arg) + if len(curr_tuple) == 4: + kv_cache.append((*curr_tuple,)) + curr_tuple = [] + kv_cache = (*kv_cache,) # type: ignore + + # Run decoder + outputs = self.decoder( + input_ids=input_ids, + encoder_hidden_states=encoder_hidden_states, + return_dict=False, + use_cache=True, + past_key_values=kv_cache, + ) + + # KV Cache conversion to export-friendly format (tuple of tensors) + # Don't output cross attn KV cache because it does not change. + out_kv_cache: List[torch.Tensor] = [] + for layer_cache in outputs[1]: + out_kv_cache = out_kv_cache + list(layer_cache)[:2] + + # Argmax Logits, Sequence-Only (Attn) KV Cache + return ( + torch.argmax(torch.squeeze(outputs[0], dim=1), dim=-1), + *out_kv_cache, + ) + + def get_input_spec(self) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm AI Hub. + """ + input_ids_spec = ((1, 1), "int32") + + attn_cache_spec = ( + ( + TROCR_BATCH_SIZE, + self.decoder_attention_heads, + TROCR_EXPORT_SEQ_LEN, + self.embeddings_per_head, + ), + "float32", + ) + + cross_attn_cache_spec = ( + ( + TROCR_BATCH_SIZE, + self.decoder_attention_heads, + 578, # TODO: Can we get this programatically? + self.embeddings_per_head, + ), + "float32", + ) + + decoder_input_specs: InputSpec = {"input_ids": input_ids_spec} + for i in range(0, self.num_decoder_layers): + decoder_input_specs[f"kv_{i}_attn_key"] = attn_cache_spec + decoder_input_specs[f"kv_{i}_attn_val"] = attn_cache_spec + decoder_input_specs[f"kv_{i}_cross_attn_key"] = cross_attn_cache_spec + decoder_input_specs[f"kv_{i}_cross_attn_val"] = cross_attn_cache_spec + + return decoder_input_specs + + @classmethod + def from_pretrained(cls): + return TrOCR.from_pretrained().decoder diff --git a/qai_hub_models/models/trocr/perf.yaml b/qai_hub_models/models/trocr/perf.yaml new file mode 100644 index 00000000..b9cea027 --- /dev/null +++ b/qai_hub_models/models/trocr/perf.yaml @@ -0,0 +1,107 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: TrOCREncoder + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 244369.0 + throughput: 4.092172084020477 + estimated_peak_memory_range: + min: 7294976 + max: 10455296 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 627 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 627 + job_id: j2p0m26eg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:31:39.426796Z' +- name: TrOCRDecoder + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 2820.0 + throughput: 354.6099290780142 + estimated_peak_memory_range: + min: 20480 + max: 2212720 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 394 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 394 + job_id: j1p8em18p + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:34:45.126605Z' diff --git a/qai_hub_models/models/trocr/requirements.txt b/qai_hub_models/models/trocr/requirements.txt new file mode 100644 index 00000000..3a308074 --- /dev/null +++ b/qai_hub_models/models/trocr/requirements.txt @@ -0,0 +1,2 @@ +transformers==4.33.2 +sentencepiece diff --git a/qai_hub_models/models/trocr/test.py b/qai_hub_models/models/trocr/test.py new file mode 100644 index 00000000..9b47bce3 --- /dev/null +++ b/qai_hub_models/models/trocr/test.py @@ -0,0 +1,64 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import pytest +import torch +from transformers import TrOCRProcessor, VisionEncoderDecoderModel + +from qai_hub_models.models.trocr.app import TrOCRApp +from qai_hub_models.models.trocr.demo import DEFAULT_SAMPLE_IMAGE +from qai_hub_models.models.trocr.demo import main as demo_main +from qai_hub_models.models.trocr.model import HUGGINGFACE_TROCR_MODEL, TrOCR +from qai_hub_models.utils.asset_loaders import load_image + +IMAGE_TEXT = 'industrial " Mr. Brown commented icity., letus have a' + + +@pytest.fixture(scope="module") +def source_huggingface_model() -> VisionEncoderDecoderModel: + return VisionEncoderDecoderModel.from_pretrained( + HUGGINGFACE_TROCR_MODEL, return_dict=False + ) # type: ignore + + +@pytest.fixture(scope="module") +def trocr_app(source_huggingface_model: VisionEncoderDecoderModel) -> TrOCRApp: + # Load Huggingface source + source_model = source_huggingface_model + io_processor = TrOCRProcessor.from_pretrained(HUGGINGFACE_TROCR_MODEL) + + # Load Application + return TrOCRApp(TrOCR.from_source_model(source_model, io_processor)) + + +@pytest.fixture(scope="module") +def processed_sample_image(trocr_app: TrOCRApp) -> torch.Tensor: + """Huggingface-provided image preprocessing and token decoding.""" + return trocr_app.preprocess_image(load_image(DEFAULT_SAMPLE_IMAGE)) + + +def test_predict_text_from_image( + trocr_app: TrOCRApp, processed_sample_image: torch.Tensor +): + """Verify our driver produces the correct sentences from a given image input.""" + assert trocr_app.predict_text_from_image(processed_sample_image)[0] == IMAGE_TEXT + + +def test_task( + source_huggingface_model: VisionEncoderDecoderModel, + trocr_app: TrOCRApp, + processed_sample_image: torch.Tensor, +): + """Verify that raw (numeric) outputs of both networks are the same.""" + source_out = source_huggingface_model.generate(processed_sample_image).numpy() + qaihm_out = trocr_app.predict_text_from_image( + processed_sample_image, raw_output=True + ) + + assert np.allclose(source_out, qaihm_out) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/unet_segmentation/README.md b/qai_hub_models/models/unet_segmentation/README.md new file mode 100644 index 00000000..7e1b7c39 --- /dev/null +++ b/qai_hub_models/models/unet_segmentation/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Unet-Segmentation: Real-time segmentation optimized for mobile and edge](https://aihub.qualcomm.com/models/unet_segmentation) + +UNet is a machine learning model that produces a segmentation mask for an image. The most basic use case will label each pixel in the image as being in the foreground or the background. More advanced usage will assign a class label to each pixel. + +This is based on the implementation of Unet-Segmentation found +[here](https://github.com/milesial/Pytorch-UNet). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/unet_segmentation). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.unet_segmentation.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.unet_segmentation.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Unet-Segmentation can be found + [here](https://github.com/milesial/Pytorch-UNet/blob/master/LICENSE). + + +## References +* [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597) +* [Source Model Implementation](https://github.com/milesial/Pytorch-UNet) diff --git a/qai_hub_models/models/unet_segmentation/__init__.py b/qai_hub_models/models/unet_segmentation/__init__.py new file mode 100644 index 00000000..222f6aff --- /dev/null +++ b/qai_hub_models/models/unet_segmentation/__init__.py @@ -0,0 +1,6 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .model import MODEL_ID # noqa: F401 +from .model import UNet as Model # noqa: F401 diff --git a/qai_hub_models/models/unet_segmentation/app.py b/qai_hub_models/models/unet_segmentation/app.py new file mode 100644 index 00000000..41683171 --- /dev/null +++ b/qai_hub_models/models/unet_segmentation/app.py @@ -0,0 +1,42 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from typing import Callable + +import torch +from PIL.Image import Image + +from qai_hub_models.utils.image_processing import preprocess_PIL_image + + +class UNetSegmentationApp: + """ + This class consists of light-weight "app code" that is required to + perform end to end inference with UNet. + + For a given image input, the app will: + * Pre-process the image (resize and normalize) + * Run UNet Inference + * Convert the raw output into segmented image. + """ + + def __init__(self, model: Callable[[torch.Tensor], torch.Tensor]): + self.model = model + + def predict(self, image: Image) -> torch.Tensor: + """ + From the provided image or tensor, generate the segmented mask. + + Parameters: + image: A PIL Image in RGB format. + + Returns: + mask: Segmented mask as numpy array. + """ + + img = preprocess_PIL_image(image) + with torch.no_grad(): + out = self.model(img) + mask = out.argmax(dim=1) + return mask[0].bool().numpy() diff --git a/qai_hub_models/models/unet_segmentation/demo.py b/qai_hub_models/models/unet_segmentation/demo.py new file mode 100644 index 00000000..b1714696 --- /dev/null +++ b/qai_hub_models/models/unet_segmentation/demo.py @@ -0,0 +1,76 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from typing import Callable + +import torch +from PIL.Image import fromarray + +from qai_hub_models.models.unet_segmentation.app import UNetSegmentationApp +from qai_hub_models.models.unet_segmentation.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + UNet, +) +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, PathType, load_image +from qai_hub_models.utils.display import display_or_save_image +from qai_hub_models.utils.image_processing import pil_resize_pad + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "unet_test_image.jpg" +) + + +# Run unet segmentation app end-to-end on a sample image. +# The demo will display the predicted mask in a window. +def unet_demo( + model: Callable[..., Callable[[torch.Tensor, torch.Tensor], torch.Tensor]], + default_image: PathType, + is_test: bool = False, +): + # Demo parameters + parser = get_model_cli_parser(UNet) + parser = get_on_device_demo_parser(parser, add_output_dir=True) + parser.add_argument( + "--image", + type=str, + default=None, + help="File path or URL to an input image to use for the demo.", + ) + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, model.get_model_id()) + + # Load image & model + model = demo_model_from_cli_args(UNet, args) + print("Model loaded from pre-trained weights.") + (_, _, height, width) = UNet.get_input_spec()["image"][0] + orig_image = load_image( + args.image or default_image, verbose=True, desc="sample input image" + ) + image, _, _ = pil_resize_pad(orig_image, (height, width)) + + # Run app + app = UNetSegmentationApp(model) + mask = fromarray(app.predict(image)) + if not is_test: + display_or_save_image(image, args.output_dir, "input_image.png", "input image") + display_or_save_image(mask, args.output_dir, "mask.png", "mask") + + +def main(is_test: bool = False): + unet_demo( + UNet, + IMAGE_ADDRESS, + is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/unet_segmentation/export.py b/qai_hub_models/models/unet_segmentation/export.py new file mode 100644 index 00000000..d566a699 --- /dev/null +++ b/qai_hub_models/models/unet_segmentation/export.py @@ -0,0 +1,197 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.unet_segmentation import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "unet_segmentation" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "unet_segmentation", + "Unet-Segmentation", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/unet_segmentation/info.yaml b/qai_hub_models/models/unet_segmentation/info.yaml new file mode 100644 index 00000000..b835940a --- /dev/null +++ b/qai_hub_models/models/unet_segmentation/info.yaml @@ -0,0 +1,38 @@ +name: Unet-Segmentation +# id must match with the model dir name in qai_hub_models +id: unet_segmentation +status: public +headline: Real-time segmentation optimized for mobile and edge. +domain: Computer Vision +description: UNet is a machine learning model that produces a segmentation mask for + an image. The most basic use case will label each pixel in the image as being in + the foreground or the background. More advanced usage will assign a class label + to each pixel. +use_case: Semantic Segmentation +tags: + - backbone + - real-time +research_paper: https://arxiv.org/abs/1505.04597 +research_paper_title: 'U-Net: Convolutional Networks for Biomedical Image Segmentation' +license: https://github.com/milesial/Pytorch-UNet/blob/master/LICENSE +source_repo: https://github.com/milesial/Pytorch-UNet +technical_details: + Model checkpoint: unet_carvana_scale1.0_epoch2 + Input resolution: 224x224 + Number of parameters: 31.0M + Model size: 118 MB +applicable_scenarios: + - Autonomous Vehicles + - Medical Imaging + - Factory Quality Control +related_models: + - fcn_resnet50 +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: no +license_type: gpl-3.0 +dataset: [] diff --git a/qai_hub_models/models/unet_segmentation/model.py b/qai_hub_models/models/unet_segmentation/model.py new file mode 100644 index 00000000..32290667 --- /dev/null +++ b/qai_hub_models/models/unet_segmentation/model.py @@ -0,0 +1,74 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Optional + +import torch + +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_torch +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_REPO = "milesial/Pytorch-UNet" +MODEL_TYPE = "unet_carvana" +MODEL_ASSET_VERSION = 1 +DEFAULT_WEIGHTS = CachedWebModelAsset( + "https://github.com/milesial/Pytorch-UNet/releases/download/v3.0/unet_carvana_scale1.0_epoch2.pth", + MODEL_ID, + MODEL_ASSET_VERSION, + "unet_carvana_scale1.0_epoch2.pth", +) + + +class UNet(BaseModel): + def __init__(self, net: torch.nn.Module) -> None: + super().__init__() + self.net = net + + @classmethod + def from_pretrained(cls, ckpt_url: Optional[str] = DEFAULT_WEIGHTS): + net = torch.hub.load( + MODEL_REPO, MODEL_TYPE, pretrained=False, scale=1.0, trust_repo=True + ) + if ckpt_url is not None: + state_dict = load_torch(ckpt_url) + net.load_state_dict(state_dict) + return cls(net.eval()) + + def forward(self, image: torch.Tensor): + """ + Run UNet on `image`, and produce a segmentation mask over the image. + + Parameters: + image: A [1, 3, H, W] image. + The smaller of H, W should be >= 16, the larger should be >=32 + Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + mask: Shape [1, n_classes, H, W] where H, W are the same as the input image. + n_classes is 2 for the default model. + + Each channel represents the raw logit predictions for a given class. + Taking the softmax over all channels for a given pixel gives the + probability distribution over classes for that pixel. + """ + return self.net(image) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 640, + width: int = 1280, + ) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm AI Hub. + """ + return {"image": ((batch_size, num_channels, height, width), "float32")} diff --git a/qai_hub_models/models/unet_segmentation/perf.yaml b/qai_hub_models/models/unet_segmentation/perf.yaml new file mode 100644 index 00000000..42a0fc85 --- /dev/null +++ b/qai_hub_models/models/unet_segmentation/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Unet-Segmentation + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 7708.0 + throughput: 129.73533990659055 + estimated_peak_memory_range: + min: 442368 + max: 29540072 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 31 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 31 + job_id: j7gjr207p + job_status: Passed + torchscript_onnx_qnn: + inference_time: 7735.0 + throughput: 129.2824822236587 + estimated_peak_memory_range: + min: 421888 + max: 282981312 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 52 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 52 + job_id: jlpe7wr75 + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:43:41.073611Z' diff --git a/qai_hub_models/models/unet_segmentation/test.py b/qai_hub_models/models/unet_segmentation/test.py new file mode 100644 index 00000000..e9e68735 --- /dev/null +++ b/qai_hub_models/models/unet_segmentation/test.py @@ -0,0 +1,36 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +from PIL.Image import fromarray + +from qai_hub_models.models.unet_segmentation.app import UNetSegmentationApp +from qai_hub_models.models.unet_segmentation.demo import IMAGE_ADDRESS +from qai_hub_models.models.unet_segmentation.demo import main as demo_main +from qai_hub_models.models.unet_segmentation.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + UNet, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image + +OUTPUT_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_output.png" +) + + +def test_task(): + net = UNet.from_pretrained() + + img = load_image(IMAGE_ADDRESS) + mask = UNetSegmentationApp(net).predict(img) + + # Convert raw mask of 0s and 1s into a PIL Image + img = fromarray(mask) + expected_out = load_image(OUTPUT_ADDRESS) + np.testing.assert_allclose(np.array(img), np.array(expected_out)) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/vit/README.md b/qai_hub_models/models/vit/README.md new file mode 100644 index 00000000..f1c29bfc --- /dev/null +++ b/qai_hub_models/models/vit/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [VIT: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/vit) + +VIT is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of VIT found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/vit). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.vit.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.vit.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of VIT can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py) diff --git a/qai_hub_models/models/vit/__init__.py b/qai_hub_models/models/vit/__init__.py new file mode 100644 index 00000000..80e3a48a --- /dev/null +++ b/qai_hub_models/models/vit/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import VIT as Model # noqa: F401 diff --git a/qai_hub_models/models/vit/demo.py b/qai_hub_models/models/vit/demo.py new file mode 100644 index 00000000..53e6806c --- /dev/null +++ b/qai_hub_models/models/vit/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.vit.model import VIT + + +def main(is_test: bool = False): + imagenet_demo(VIT, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/vit/export.py b/qai_hub_models/models/vit/export.py new file mode 100644 index 00000000..5ddfbd36 --- /dev/null +++ b/qai_hub_models/models/vit/export.py @@ -0,0 +1,191 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.vit import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "vit" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "vit", + "VIT", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace( + model, make_torch_inputs(input_spec), check_trace=False + ) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/vit/info.yaml b/qai_hub_models/models/vit/info.yaml new file mode 100644 index 00000000..ec8bbafc --- /dev/null +++ b/qai_hub_models/models/vit/info.yaml @@ -0,0 +1,42 @@ +name: VIT +# id must match with the model dir name in qai_hub_models +id: vit +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: VIT is a machine learning model that can classify images from the Imagenet + dataset. It can also be used as a backbone in building more complex models for specific + use cases. +use_case: Image Classification +tags: + - backbone +research_paper: https://arxiv.org/abs/2010.11929 +research_paper_title: 'An Image is Worth 16x16 Words: Transformers for Image Recognition + at Scale' +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: + https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 86.6M + Model size: 330 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/vit/model.py b/qai_hub_models/models/vit/model.py new file mode 100644 index 00000000..b25fba95 --- /dev/null +++ b/qai_hub_models/models/vit/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class VIT(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.vit_b_16(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/vit/perf.yaml b/qai_hub_models/models/vit/perf.yaml new file mode 100644 index 00000000..3eebfd79 --- /dev/null +++ b/qai_hub_models/models/vit/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: VIT + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 135762.0 + throughput: 7.365831381388017 + estimated_peak_memory_range: + min: 147456 + max: 3331880 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 557 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 557 + job_id: j1gly2ll5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:21:41.057280Z' diff --git a/qai_hub_models/models/vit/test.py b/qai_hub_models/models/vit/test.py new file mode 100644 index 00000000..b9f00a10 --- /dev/null +++ b/qai_hub_models/models/vit/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( # run_imagenet_classifier_trace_test, + run_imagenet_classifier_test, +) +from qai_hub_models.models.vit.demo import main as demo_main +from qai_hub_models.models.vit.model import MODEL_ID, VIT + + +def test_task(): + run_imagenet_classifier_test(VIT.from_pretrained(), MODEL_ID) + + +# TODO: Fix this export test. +# def test_trace(): +# run_imagenet_classifier_trace_test(VIT.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/whisper_asr/README.md b/qai_hub_models/models/whisper_asr/README.md new file mode 100644 index 00000000..593bd344 --- /dev/null +++ b/qai_hub_models/models/whisper_asr/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Whisper-Base: Automatic speech recognition (ASR) model for multilingual transcription as well as translation](https://aihub.qualcomm.com/models/whisper_asr) + +State-of-art model encoder-decoder transformer. The encoder takes an audio chunk (around 30 second) converted to a log-Mel spectrogram. The decoder predicts the corresponding text caption intermixed with special tokens that can be used to direct the single model to perform various speech tasks. + +This is based on the implementation of Whisper-Base found +[here](https://github.com/openai/whisper/tree/main). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/whisper_asr). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[whisper_asr]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.whisper_asr.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.whisper_asr.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Whisper-Base can be found + [here](https://github.com/openai/whisper/blob/main/LICENSE). + + +## References +* [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) +* [Source Model Implementation](https://github.com/openai/whisper/tree/main) diff --git a/qai_hub_models/models/whisper_asr/__init__.py b/qai_hub_models/models/whisper_asr/__init__.py new file mode 100644 index 00000000..3f49ff9d --- /dev/null +++ b/qai_hub_models/models/whisper_asr/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import WhisperApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import Whisper as Model # noqa: F401 diff --git a/qai_hub_models/models/whisper_asr/app.py b/qai_hub_models/models/whisper_asr/app.py new file mode 100644 index 00000000..dcdccd68 --- /dev/null +++ b/qai_hub_models/models/whisper_asr/app.py @@ -0,0 +1,345 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import List, Tuple + +import numpy as np +import torch +import whisper # type: ignore +from scipy import special as scipy_special # type: ignore + +from qai_hub_models.models.whisper_asr.model import Whisper +from qai_hub_models.utils.model_adapters import TorchNumpyAdapter + +# hard-coded audio hyperparameters +SAMPLE_RATE = 16000 +N_FFT = 400 +N_MELS = 80 +HOP_LENGTH = 160 +CHUNK_LENGTH = 30 +N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk + + +class WhisperApp: + """ + WhisperApp runs Whisper encoder and decoder to transcribe audio + represented as mel spectrogram. It support all model variants of + OpenAI Whisper. + """ + + def __init__(self, whisper: Whisper): + decoder = whisper.decoder + encoder = whisper.encoder + self.num_decoder_blocks = whisper.num_decoder_blocks + self.attention_dim = whisper.attention_dim + + # Wraps torch Module so it takes np ndarray as input and outputs + if isinstance(encoder, torch.nn.Module): + self.encoder = TorchNumpyAdapter(encoder) + else: + self.encoder = encoder + if isinstance(decoder, torch.nn.Module): + self.decoder = TorchNumpyAdapter(decoder) + else: + self.decoder = decoder + + def predict(self, *args, **kwargs): + # See transcribe. + return self.transcribe(*args, **kwargs) + + def transcribe(self, mel_input: np.ndarray) -> str: + """ + Transcribe an audio to text. + + Parameters: + + - mel_input: of shape (1, 80, 3000). Mel spectrogram of 30s audio. + + Returns: + + - transcribed texts + """ + cross_attn_cache = self.encoder(mel_input) + # Start decoding + # coreml only takes float tensors + x = np.array([[TOKEN_SOT]]) + decoded_tokens = [TOKEN_SOT] + cache_tensor = np.array([], dtype=np.float32).reshape( + (1, 0, self.attention_dim) + ) + self_attn_cache = [cache_tensor] * 2 * self.num_decoder_blocks + + sample_len = 224 # max # of tokens to sample + sum_logprobs = 0 + for i in range(sample_len): + decoder_out = self.decoder(x, *cross_attn_cache, *self_attn_cache) + # logit has shape (1, decoded_len, 51864) + logits = decoder_out[0] + self_attn_cache = decoder_out[1:] # type: ignore + # logit has shape (51864,) + logits = logits[0, -1] # consider only the last token + + # Filters + # SuppressBlank + if i == 0: + logits[[TOKEN_EOT, TOKEN_BLANK]] = -np.inf + # SuppressTokens + logits[NON_SPEECH_TOKENS] = -np.inf + + logits, logprobs = apply_timestamp_rules(logits, decoded_tokens) + + if i == 0: + # detect no_speech + no_speech_prob = np.exp(logprobs[TOKEN_NO_SPEECH]) + if no_speech_prob > NO_SPEECH_THR: + break + + # temperature = 0 + next_token = np.argmax(logits) + if next_token == TOKEN_EOT: + break + + sum_logprobs += logprobs[next_token] + x = np.array([[next_token]]) + decoded_tokens.append(int(next_token)) + + tokenizer = whisper.decoding.get_tokenizer( + multilingual=False, language="en", task="transcribe" + ) + + text = tokenizer.decode(decoded_tokens[1:]) # remove TOKEN_SOT + return text.strip() + + +# Whisper constants +TOKEN_SOT = 50257 # Start of transcript +TOKEN_EOT = 50256 # end of transcript +TOKEN_BLANK = 220 # " " +TOKEN_NO_TIMESTAMP = 50362 +TOKEN_TIMESTAMP_BEGIN = 50363 +TOKEN_NO_SPEECH = 50361 + +# Above this prob we deem there's no speech in the audio +NO_SPEECH_THR = 0.6 + +# https://github.com/openai/whisper/blob/v20230314/whisper/decoding.py#L600 +NON_SPEECH_TOKENS = [ + 1, + 2, + 7, + 8, + 9, + 10, + 14, + 25, + 26, + 27, + 28, + 29, + 31, + 58, + 59, + 60, + 61, + 62, + 63, + 90, + 91, + 92, + 93, + 357, + 366, + 438, + 532, + 685, + 705, + 796, + 930, + 1058, + 1220, + 1267, + 1279, + 1303, + 1343, + 1377, + 1391, + 1635, + 1782, + 1875, + 2162, + 2361, + 2488, + 3467, + 4008, + 4211, + 4600, + 4808, + 5299, + 5855, + 6329, + 7203, + 9609, + 9959, + 10563, + 10786, + 11420, + 11709, + 11907, + 13163, + 13697, + 13700, + 14808, + 15306, + 16410, + 16791, + 17992, + 19203, + 19510, + 20724, + 22305, + 22935, + 27007, + 30109, + 30420, + 33409, + 34949, + 40283, + 40493, + 40549, + 47282, + 49146, + 50257, + 50357, + 50358, + 50359, + 50360, + 50361, +] + +SAMPLE_BEGIN = 1 # first token is TOKEN_SOT + +# https://github.com/openai/whisper/blob/v20230314/whisper/decoding.py#L545 +precision = 0.02 # in second +max_initial_timestamp = 1.0 # in second +max_initial_timestamp_index = int(max_initial_timestamp / precision) + + +def apply_timestamp_rules( + logits: np.ndarray, tokens: List[int] +) -> Tuple[np.ndarray, float]: + """ + When predicting timestamps, there are a few post processing rules / + heuristics to ensure well-formed timestamps. See in-line comments for details + + Args: + - logits: of shape (51864,) + + Returns: + + - modified logits + - log probability of modified logits (log(softmax(logits))) + """ + # Require producing timestamp + logits[TOKEN_NO_TIMESTAMP] = -np.inf + + # timestamps have to appear in pairs, except directly before EOT + seq = tokens[SAMPLE_BEGIN:] + last_was_timestamp = len(seq) >= 1 and seq[-1] >= TOKEN_TIMESTAMP_BEGIN + penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= TOKEN_TIMESTAMP_BEGIN + if last_was_timestamp: + if penultimate_was_timestamp: # has to be non-timestamp + logits[TOKEN_TIMESTAMP_BEGIN:] = -np.inf + else: # cannot be normal text tokens + logits[:TOKEN_EOT] = -np.inf + + timestamps = [t for t in tokens if t >= TOKEN_TIMESTAMP_BEGIN] + if len(timestamps) > 0: + # timestamps shouldn't decrease; forbid timestamp tokens smaller than the last + # also force each segment to have a nonzero length, to prevent infinite looping + if last_was_timestamp and not penultimate_was_timestamp: + timestamp_last = timestamps[-1] + else: + timestamp_last = timestamps[-1] + 1 + logits[TOKEN_TIMESTAMP_BEGIN:timestamp_last] = -np.inf + + if len(tokens) == SAMPLE_BEGIN: + # suppress generating non-timestamp tokens at the beginning + logits[:TOKEN_TIMESTAMP_BEGIN] = -np.inf + + # apply the `max_initial_timestamp` option + last_allowed = TOKEN_TIMESTAMP_BEGIN + max_initial_timestamp_index + logits[(last_allowed + 1) :] = -np.inf + + # if sum of probability over timestamps is above any other token, sample timestamp + logprobs = scipy_special.log_softmax(logits) + timestamp_logprob = scipy_special.logsumexp(logprobs[TOKEN_TIMESTAMP_BEGIN:]) + max_text_token_logprob = logprobs[:TOKEN_TIMESTAMP_BEGIN].max() + if timestamp_logprob > max_text_token_logprob: + # Mask out all but timestamp tokens + logits[:TOKEN_TIMESTAMP_BEGIN] = -np.inf + + return logits, logprobs + + +def load_audio(mel_filter: np.ndarray, audio_path: str) -> np.ndarray: + """ + Load audio to a mel spectrogram. + """ + with np.load(audio_path) as f: + audio_np = f["audio"] + # Pad 30-seconds of silence to the input audio, for slicing + input_feature = log_mel_spectrogram(mel_filter, audio_np, pad_to_length=N_SAMPLES) + # input_feature has fixed shape [1, 80, 3000]. 80 is + # spectrogram feature dim, 3000 is due to Whisper only takes + # 30 seconds input represented as 10ms spectrogram segments + assert input_feature.shape == (1, 80, 3000) + return input_feature + + +def load_mel_filter(mel_filter_path: str) -> np.ndarray: + with np.load(mel_filter_path) as f: + return f["mel_80"] + + +# Adopted from https://github.com/openai/whisper/blob/main/whisper/audio.py +def log_mel_spectrogram( + mel_filter: np.ndarray, + audio_np: np.ndarray, + pad_to_length: int, +) -> np.ndarray: + """ + Compute the log-Mel spectrogram of + + Parameters + ---------- + audio_np: np.ndarray, shape = (*) + + pad_to_length: int + Add zero samples to the right till this length. No op if + len(audio) >= pad_to_length + + Returns + ------- + np.ndarray, shape = (1, 80, n_frames) + A Tensor that contains the Mel spectrogram. n_frames = 3000 for whisper + """ + audio = torch.from_numpy(audio_np) + assert isinstance(audio, torch.Tensor) + + if pad_to_length is not None: + padding = pad_to_length - len(audio) + if padding > 0: + audio = torch.nn.functional.pad(audio, (0, padding)) + window = torch.hann_window(N_FFT) + stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True) + magnitudes = stft[..., :-1].abs() ** 2 + + mel_spec = torch.from_numpy(mel_filter) @ magnitudes + + log_spec = torch.clamp(mel_spec, min=1e-10).log10() + log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) + log_spec = (log_spec + 4.0) / 4.0 + return log_spec.unsqueeze(0).detach().float().numpy() diff --git a/qai_hub_models/models/whisper_asr/demo.py b/qai_hub_models/models/whisper_asr/demo.py new file mode 100644 index 00000000..9bb729c6 --- /dev/null +++ b/qai_hub_models/models/whisper_asr/demo.py @@ -0,0 +1,42 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.whisper_asr.app import ( + WhisperApp, + load_audio, + load_mel_filter, +) +from qai_hub_models.models.whisper_asr.model import ( + MEL_FILTER_PATH, + MODEL_ASSET_VERSION, + MODEL_ID, + Whisper, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +TEST_AUDIO_PATH = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "audio/jfk.npz" +) + + +def main(): + # For other model sizes, see https://github.com/openai/whisper/blob/main/whisper/__init__.py#L17 + app = WhisperApp(Whisper.from_pretrained()) + TEST_AUDIO_PATH.fetch() + MEL_FILTER_PATH.fetch() + + # Load audio into mel spectrogram + mel_filter_path = MEL_FILTER_PATH.path() + mel_filter = load_mel_filter(mel_filter_path) + + audio_path = TEST_AUDIO_PATH.path() + mel_input = load_audio(mel_filter, audio_path) + + # Perform transcription + transcription = app.transcribe(mel_input) + print("Transcription:", transcription) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/whisper_asr/export.py b/qai_hub_models/models/whisper_asr/export.py new file mode 100644 index 00000000..122ea877 --- /dev/null +++ b/qai_hub_models/models/whisper_asr/export.py @@ -0,0 +1,221 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Mapping, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.whisper_asr import Model +from qai_hub_models.utils.args import ( + export_parser, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + +ALL_COMPONENTS = ["WhisperEncoder", "WhisperDecoder"] + + +def export_model( + device: str = "Samsung Galaxy S23", + components: Optional[List[str]] = None, + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Mapping[ + str, Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] +] | List[str]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + components: List of sub-components of the model that will be exported. + Each component is compiled and profiled separately. + Defaults to ALL_COMPONENTS if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` + + Returns: + A Mapping from component_name to a 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "whisper_asr" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + component_arg = components + components = components or ALL_COMPONENTS + for component in components: + if component not in ALL_COMPONENTS: + raise ValueError(f"Invalid component {component}.") + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "whisper_asr", + "Whisper-Base", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + component_arg, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + components_dict = {} + if "WhisperEncoder" in components: + components_dict["WhisperEncoder"] = model.encoder + if "WhisperDecoder" in components: + components_dict["WhisperDecoder"] = model.decoder + + compile_jobs = {} + for component_name, component in components_dict.items(): + # Trace the model + input_spec = component.get_input_spec() + source_model = torch.jit.trace(component, make_torch_inputs(input_spec)) + + # 2. Compile the models to an on-device asset + model_compile_options = component.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input audio" + ) + print(f"Optimizing model {component_name} to run on-device.") + compile_jobs[component_name] = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=f"{component_name}", + options=model_compile_options, + ) + + # 3. Profile the model assets on real devices + profile_jobs = {} + if not skip_profiling: + for component_name in components: + print(f"Profiling model {component_name} on a hosted device.") + profile_jobs[component_name] = hub.submit_profile_job( + model=compile_jobs[component_name].get_target_model(), + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_jobs = {} + if not skip_inferencing: + for component_name in components: + print( + f"Running inference for {component_name} on a hosted device with example inputs." + ) + sample_inputs = components_dict[component_name].sample_inputs() + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "audio", sample_inputs, target_runtime + ) + inference_jobs[component_name] = hub.submit_inference_job( + model=compile_jobs[component_name].get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=f"{component_name}", + options=profile_options, + ) + + # 5. Download the model assets to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + for component_name, compile_job in compile_jobs.items(): + target_model = compile_job.get_target_model() + target_model.download( + str(output_path / f"{model_name}_{component_name}.tflite") + ) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + for component_name in components: + profile_job = profile_jobs[component_name] + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + for component_name in components: + inference_job = inference_jobs[component_name] + sample_inputs = components_dict[component_name].sample_inputs() + torch_out = torch_inference(components_dict[component_name], sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + return { + component_name: ( + compile_jobs[component_name], + profile_jobs.get(component_name, None), + inference_jobs.get(component_name, None), + ) + for component_name in components + } + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser( + model_cls=Model, components=ALL_COMPONENTS, supports_qnn=False + ) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/whisper_asr/info.yaml b/qai_hub_models/models/whisper_asr/info.yaml new file mode 100644 index 00000000..cf001a33 --- /dev/null +++ b/qai_hub_models/models/whisper_asr/info.yaml @@ -0,0 +1,38 @@ +name: Whisper-Base +# id must match with the model dir name in qai_hub_models +id: whisper_asr +status: public +headline: Automatic speech recognition (ASR) model for multilingual transcription + as well as translation. +domain: Audio +description: State-of-art model encoder-decoder transformer. The encoder takes an + audio chunk (around 30 second) converted to a log-Mel spectrogram. The decoder + predicts the corresponding text caption intermixed with special tokens that can + be used to direct the single model to perform various speech tasks. +use_case: Speech Recognition +tags: + - foundation +research_paper: https://cdn.openai.com/papers/whisper.pdf +research_paper_title: Robust Speech Recognition via Large-Scale Weak Supervision +license: https://github.com/openai/whisper/blob/main/LICENSE +source_repo: https://github.com/openai/whisper/tree/main +technical_details: + Model checkpoint: Tiny En + Input resolution: 80x3000 + Number of parameters (WhisperEncoder): 9.39M + Model size (WhisperEncoder): 35.9 MB + Number of parameters (WhisperDecoder): 28.2M + Model size (WhisperDecoder): 108 MB +applicable_scenarios: + - Smart Home + - Accessibility +related_models: + - huggingface_wavlm_base_plus +form_factors: + - Phone + - Tablet + - IoT +has_static_banner: yes +has_animated_banner: yes +license_type: mit +dataset: [] diff --git a/qai_hub_models/models/whisper_asr/model.py b/qai_hub_models/models/whisper_asr/model.py new file mode 100644 index 00000000..6ed6a7be --- /dev/null +++ b/qai_hub_models/models/whisper_asr/model.py @@ -0,0 +1,347 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Any, Callable, Dict, List, Optional, Tuple + +import torch +import whisper # type: ignore + +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.base_model import BaseModel, CollectionModel +from qai_hub_models.utils.input_spec import InputSpec + +MAX_DECODE_LEN = 448 + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +MEL_FILTER_PATH = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "openai_assets/mel_filters.npz" +) + + +class Whisper(CollectionModel): + def __init__( + self, + encoder: Callable[[torch.Tensor], List[torch.Tensor]], + decoder: Callable[..., Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]], + num_decoder_blocks: int, + attention_dim: int, + ): + self.encoder = encoder + self.decoder = decoder + self.num_decoder_blocks = num_decoder_blocks + self.attention_dim = attention_dim + + @classmethod + def from_pretrained(cls, model: str = "tiny.en"): + # For other model sizes, see https://github.com/openai/whisper/blob/main/whisper/__init__.py#L17 + return cls.from_source_model(whisper.load_model(model)) + + @classmethod + def from_source_model(cls, whisper_model: Any): + encoder = WhisperEncoderInf(whisper_model) + decoder = WhisperDecoderInf(whisper_model.decoder) + num_decoder_blocks = len(decoder.blocks) + attention_dim = decoder.attention_dim + return cls(encoder, decoder, num_decoder_blocks, attention_dim) # type: ignore + + +class WhisperEncoderInf(BaseModel): + """ + WhisperEncoder optimized for export and inference. + + It takes audio input (mel) and directly produce cross attention + kv-cache. + """ + + def __init__(self, model: whisper.model.Whisper): + super().__init__() + self.model = model + + def forward(self, audio: torch.Tensor) -> List[torch.Tensor]: + # Return 2 * self.num_blocks tensors (k, v for each block) + encoder_out = self.model.encoder(audio) + res = [] + for residual_block in self.model.decoder.blocks: + res.append(residual_block.cross_attn.key(encoder_out)) + res.append(residual_block.cross_attn.value(encoder_out)) + return res + + def get_input_spec(self) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm AI Hub. + """ + return dict(audio=((1, 80, 3000), "float32")) + + @classmethod + def from_pretrained(cls): + return Whisper.from_pretrained().encoder + + +class WhisperDecoderInf(BaseModel): + """ + whisper.model.TextDecoder optimized for export and inference: + + Wraps `whisper.model.TextDecoder` to facilitate export: + + 1. kv cache inputs are individual tensors instead of a list of tensors + 2. kv cache inputs are required, not optional + """ + + def __init__(self, model: whisper.model.TextDecoder): + super().__init__() + assert isinstance(model, whisper.model.TextDecoder) + + # Wraps `ResidualAttentionBlock` in + # `ResidualAttentionBlockWrapper` + self.blocks = torch.nn.ModuleList( + [ResidualAttentionBlockWrapper(b) for b in model.blocks] + ) + for m in ["token_embedding", "ln"]: + self.add_module(m, getattr(model, m)) + for p in ["positional_embedding"]: + self.register_parameter(p, getattr(model, p)) + + @property + def attention_dim(self): + return self.blocks[0].attn_ln.weight.shape[0] + + def forward(self, x: torch.Tensor, *kv_cache_args, **kv_cache_kwargs): + """ + Args: + + - x: torch.LongTensor, shape = (batch_size, <= n_ctx) + the text tokens + + - kv_cache_args: Tuple of length 4 * num_decoder_blocks. Elements are: + + b{i}_cross_attn_k: [1, 1500, attn_dim] + b{i}_cross_attn_v: [1, 1500, attn_dim] + + for i = 0, ..., num_blocks + + followed by + + b{i}_self_attn_k: [1, decoded_len, attn_dim] + b{i}_self_attn_v: [1, decoded_len, attn_dim] + + for i = 0, ..., num_blocks + + Returns: + + - logits: of shape [1, 1, 51864] + - b0_self_attn_k, b0_self_attn_v, b1_self_attn_k, ...: Updated self attn cache. + 2*num_decoder_blocks + """ + if not kv_cache_args: + kv_cache_args = list(kv_cache_kwargs.values()) + assert isinstance(self.token_embedding, torch.nn.Module) # for mypy + assert isinstance(self.ln, torch.nn.Module) # for mypy + assert isinstance(self.positional_embedding, torch.nn.Parameter) # for mypy + # Set up kv_cache + kv_cache = {} # torch.nn.Module -> torch.Tensor + num_blocks = len(self.blocks) + for i, block in enumerate(self.blocks): + kv_cache.update( + { + block.attn.key: kv_cache_args[2 * num_blocks + i * 2], + block.attn.value: kv_cache_args[2 * num_blocks + i * 2 + 1], + block.cross_attn.key: kv_cache_args[i * 2], + block.cross_attn.value: kv_cache_args[i * 2 + 1], + } + ) + offset = next(iter(kv_cache.values())).shape[1] if kv_cache else 0 + x = ( + self.token_embedding(x) + + self.positional_embedding[offset : offset + x.shape[-1]] + ) + + # x shape: (1, 1, 384) + kv_cache_new = [] + for block in self.blocks: + x, k_cache, v_cache = block(x, kv_cache=kv_cache) + kv_cache_new.append(k_cache.float()) + kv_cache_new.append(v_cache.float()) + + x = self.ln(x) + logits = ( + x + @ torch.transpose( + self.token_embedding.weight.to(x.dtype), 0, 1 # type: ignore + ) + ).float() + + # shape: [1, 1, 51864] + return (logits,) + tuple(kv_cache_new) + + def get_input_spec(self) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm AI Hub. + """ + specs = dict(x=((1, 1), "int32")) + for i in range(len(self.blocks)): + specs[f"b{i}_cross_attn_k"] = ((1, 1500, self.attention_dim), "float32") + specs[f"b{i}_cross_attn_v"] = ((1, 1500, self.attention_dim), "float32") + + # Use mean length for profiling + mean_decode_len = MAX_DECODE_LEN // 2 + + for i in range(len(self.blocks)): + specs[f"b{i}_self_attn_k"] = ( + (1, mean_decode_len, self.attention_dim), + "float32", + ) + specs[f"b{i}_self_attn_v"] = ( + (1, mean_decode_len, self.attention_dim), + "float32", + ) + + return specs + + @classmethod + def from_pretrained(cls): + return Whisper.from_pretrained().decoder + + +class MHAWrapper(torch.nn.Module): + """ + Wrapper around whisper.model.MultiHeadAttention to leverage kv cache for + efficient inference. The original whisper.model.MultiHeadAttention doesn't + returns the updated kv cache but relies on pytorch hook which + cannot be exported for on-device inference. This wrapper fixes that. + + If attn_type == "self_attention", the kv cache is updated before they are returned. + + If attn_type == "cross_attention", the kv cache is returned without any update. + + Note that unlike whisper.model.MultiHeadAttention, this wrapper is + optimized for inference so it doesn't take mask as an input. + """ + + def __init__(self, model: whisper.model.MultiHeadAttention, attn_type: str): + """ + attn_type: one of {"self_attention", "cross_attention"} + """ + super().__init__() + assert isinstance(model, whisper.model.MultiHeadAttention) + self.attn_type = attn_type + self.n_head = model.n_head + for m in ["query", "key", "value", "out"]: + self.add_module(m, getattr(model, m)) + + def forward( + self, + x: torch.Tensor, + kv_cache: Dict[torch.nn.Module, torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Args: + + - x: shape [1, 1, attention_dim]. Input feature. + + - kv_cache: 4 * num_decoder_blocks entries representing self attention + and cross attention from all attention blocks. Each entry of shape + [1, decoded_len, attention_dim]. We'd only use cache relevant to this + particular attention layer and ignore other entries in the dict. + + Returns: + + - x_out: attention output + + - updated k, v cache: of shape [1, decoded_len+1, attention_dim] + """ + assert isinstance(self.query, torch.nn.Module) # for mypy + assert isinstance(self.key, torch.nn.Module) # for mypy + assert isinstance(self.value, torch.nn.Module) # for mypy + assert isinstance(self.out, torch.nn.Module) # for mypy + q = self.query(x) + + if self.attn_type == "self_attention": + k_cache = kv_cache[self.key] + v_cache = kv_cache[self.value] + k = self.key(x) + v = self.value(x) + k = torch.cat([k_cache, k], dim=1) + v = torch.cat([v_cache, v], dim=1) + else: # cross_attention + k, v = kv_cache[self.key], kv_cache[self.value] + + wv = qkv_attention(q, k, v, self.n_head) + # Return updated kv cache + return self.out(wv), k.detach(), v.detach() + + +def qkv_attention( + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + n_head: int, + mask: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Adapted from whisper.model.MultiHeadAttention.qkv_attention + """ + n_batch, n_ctx, n_state = q.shape + scale = (n_state // n_head) ** -0.25 + q = q.view(*q.shape[:2], n_head, -1).permute(0, 2, 1, 3) * scale + k = k.view(*k.shape[:2], n_head, -1).permute(0, 2, 3, 1) * scale + v = v.view(*v.shape[:2], n_head, -1).permute(0, 2, 1, 3) + + qk = q @ k + if mask is not None: + qk = qk + mask[:n_ctx, :n_ctx] + qk = qk.float() + + w = torch.nn.functional.softmax(qk, dim=-1).to(q.dtype) + return (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2) + + +class ResidualAttentionBlockWrapper(torch.nn.Module): + """ + Wrapper around whisper.model.ResidualAttentionBlock to leverage kv cache + for efficient inference. The original whisper.model.ResidiualAttentionBlock + doesn't returns the updated kv cache but relies on pytorch hook which + cannot be exported for on-device inference. This wrapper fixes that. + """ + + def __init__(self, model: whisper.model.ResidualAttentionBlock): + super().__init__() + assert isinstance(model, whisper.model.ResidualAttentionBlock) + # Wraps `MultiheadAttention` to `MultiheadAttentionWrapper` + self.attn = MHAWrapper(model.attn, "self_attention") + self.cross_attn = MHAWrapper(model.cross_attn, "cross_attention") + for m in ["attn_ln", "cross_attn_ln", "mlp", "mlp_ln"]: + self.add_module(m, getattr(model, m)) + + def forward( + self, + x: torch.Tensor, + kv_cache: Dict[torch.nn.Module, torch.Tensor], + ): + """ + Args: Same as MHAWrapper + Returns: Same as MHAWrapper + """ + # Get updated self attention kv cache + assert isinstance(self.attn, torch.nn.Module) # for mypy + assert isinstance(self.attn_ln, torch.nn.Module) # for mypy + assert isinstance(self.cross_attn_ln, torch.nn.Module) # for mypy + assert isinstance(self.cross_attn, torch.nn.Module) # for mypy + assert isinstance(self.mlp, torch.nn.Module) # for mypy + assert isinstance(self.mlp_ln, torch.nn.Module) # for mypy + x_attn, k_cache, v_cache = self.attn(self.attn_ln(x), kv_cache=kv_cache) + x = x + x_attn + if self.cross_attn: + # Ignore cross attn kv cache which is constant (pre-computed in + # `WhisperCrossAttnKVCacheTorch`) + x_cross_attn, _, _ = self.cross_attn( + self.cross_attn_ln(x), kv_cache=kv_cache + ) + x = x + x_cross_attn + x = x + self.mlp(self.mlp_ln(x)) + return x, k_cache, v_cache diff --git a/qai_hub_models/models/whisper_asr/perf.yaml b/qai_hub_models/models/whisper_asr/perf.yaml new file mode 100644 index 00000000..f8e81783 --- /dev/null +++ b/qai_hub_models/models/whisper_asr/perf.yaml @@ -0,0 +1,107 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: WhisperEncoder + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 68918.0 + throughput: 14.50999738820047 + estimated_peak_memory_range: + min: 18612224 + max: 67240168 + primary_compute_unit: GPU + precision: fp16 + layer_info: + layers_on_npu: 0 + layers_on_gpu: 216 + layers_on_cpu: 0 + total_layers: 216 + job_id: j1p3z16z5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:31:20.996693Z' +- name: WhisperDecoder + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 7924.0 + throughput: 126.19888944977284 + estimated_peak_memory_range: + min: 3014656 + max: 5380072 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 293 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 293 + job_id: jwgoln8dg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:35:36.297844Z' diff --git a/qai_hub_models/models/whisper_asr/requirements.txt b/qai_hub_models/models/whisper_asr/requirements.txt new file mode 100644 index 00000000..75b1cf12 --- /dev/null +++ b/qai_hub_models/models/whisper_asr/requirements.txt @@ -0,0 +1,2 @@ +openai-whisper==20230314 +scipy diff --git a/qai_hub_models/models/whisper_asr/test.py b/qai_hub_models/models/whisper_asr/test.py new file mode 100644 index 00000000..7b021e37 --- /dev/null +++ b/qai_hub_models/models/whisper_asr/test.py @@ -0,0 +1,83 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import pytest +import torch +import whisper + +from qai_hub_models.models.whisper_asr.app import ( + WhisperApp, + load_audio, + load_mel_filter, +) +from qai_hub_models.models.whisper_asr.demo import TEST_AUDIO_PATH +from qai_hub_models.models.whisper_asr.demo import main as demo_main +from qai_hub_models.models.whisper_asr.model import ( + MEL_FILTER_PATH, + Whisper, + WhisperDecoderInf, + WhisperEncoderInf, +) + + +@pytest.fixture(scope="session") +def mel_input() -> np.ndarray: + mel_filter_path = MEL_FILTER_PATH.fetch() + mel_filter = load_mel_filter(mel_filter_path) + audio_path = TEST_AUDIO_PATH.fetch() + return load_audio(mel_filter, audio_path) + + +def test_numerics(mel_input): + """ + Test that wrapper classes predict logits (without post processing) that + matches with the original model's. + """ + # OpenAI + with torch.no_grad(): + mel_input = torch.from_numpy(mel_input) + model = whisper.load_model("tiny.en") + audio_features = model.encoder(mel_input) + + tokens = torch.LongTensor([[50257]]) + logits_orig = model.decoder(tokens, audio_features).detach().numpy() + + # QAIHM + encoder = WhisperEncoderInf(model) + decoder = WhisperDecoderInf(model.decoder) + + cross_attn_cache = encoder(mel_input) + cache_tensor = np.array([], dtype=np.float32).reshape((1, 0, 384)) + self_attn_cache = [torch.from_numpy(cache_tensor)] * 2 * 4 + + decoder_out = decoder(tokens, *cross_attn_cache, *self_attn_cache) + logits = decoder_out[0].detach().numpy() + + np.testing.assert_allclose(logits_orig, logits) + + +def test_transcribe(mel_input): + """ + Test that pytorch wrappers produces end to end transcription results that + matches with the original model + """ + # Run inference with OpenAI whisper + with torch.no_grad(): + model = whisper.load_model("tiny.en") + options = whisper.DecodingOptions( + language="en", without_timestamps=False, fp16=False + ) + results = model.decode(torch.from_numpy(mel_input).float(), options) + text_orig = results[0].text + + app = WhisperApp(Whisper.from_source_model(model)) + + # Perform transcription + transcription = app.transcribe(mel_input) + assert transcription == text_orig + + +def test_demo(): + demo_main() diff --git a/qai_hub_models/models/wideresnet50/README.md b/qai_hub_models/models/wideresnet50/README.md new file mode 100644 index 00000000..a723b252 --- /dev/null +++ b/qai_hub_models/models/wideresnet50/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [WideResNet50: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/wideresnet50) + +WideResNet50 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of WideResNet50 found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/wideresnet50). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.wideresnet50.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.wideresnet50.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of WideResNet50 can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Wide Residual Networks](https://arxiv.org/abs/1605.07146) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) diff --git a/qai_hub_models/models/wideresnet50/__init__.py b/qai_hub_models/models/wideresnet50/__init__.py new file mode 100644 index 00000000..fc06006f --- /dev/null +++ b/qai_hub_models/models/wideresnet50/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import WideResNet50 as Model # noqa: F401 diff --git a/qai_hub_models/models/wideresnet50/demo.py b/qai_hub_models/models/wideresnet50/demo.py new file mode 100644 index 00000000..e0fc917e --- /dev/null +++ b/qai_hub_models/models/wideresnet50/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.wideresnet50.model import WideResNet50 + + +def main(is_test: bool = False): + imagenet_demo(WideResNet50, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/wideresnet50/export.py b/qai_hub_models/models/wideresnet50/export.py new file mode 100644 index 00000000..a43ffe10 --- /dev/null +++ b/qai_hub_models/models/wideresnet50/export.py @@ -0,0 +1,189 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.wideresnet50 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "wideresnet50" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "wideresnet50", + "WideResNet50", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/wideresnet50/info.yaml b/qai_hub_models/models/wideresnet50/info.yaml new file mode 100644 index 00000000..3e10e96e --- /dev/null +++ b/qai_hub_models/models/wideresnet50/info.yaml @@ -0,0 +1,40 @@ +name: WideResNet50 +# id must match with the model dir name in qai_hub_models +id: wideresnet50 +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: WideResNet50 is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: + - backbone +research_paper: https://arxiv.org/abs/1605.07146 +research_paper_title: Wide Residual Networks +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 68.8M + Model size: 263 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: no +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/wideresnet50/model.py b/qai_hub_models/models/wideresnet50/model.py new file mode 100644 index 00000000..f8d7d130 --- /dev/null +++ b/qai_hub_models/models/wideresnet50/model.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torchvision.models as tv_models + +from qai_hub_models.models._shared.imagenet_classifier.model import ImagenetClassifier + +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "IMAGENET1K_V1" + + +class WideResNet50(ImagenetClassifier): + @classmethod + def from_pretrained(cls, weights: str = DEFAULT_WEIGHTS) -> ImagenetClassifier: + net = tv_models.wide_resnet50_2(weights=weights) + return cls(net) diff --git a/qai_hub_models/models/wideresnet50/perf.yaml b/qai_hub_models/models/wideresnet50/perf.yaml new file mode 100644 index 00000000..9c9625ba --- /dev/null +++ b/qai_hub_models/models/wideresnet50/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: WideResNet50 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 4393.0 + throughput: 227.6348736626451 + estimated_peak_memory_range: + min: 24576 + max: 1816072 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 77 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 77 + job_id: jz57el9rp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 4605.0 + throughput: 217.15526601520088 + estimated_peak_memory_range: + min: 0 + max: 313348064 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 125 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 125 + job_id: jqp4yd3lp + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:11:06.129828Z' diff --git a/qai_hub_models/models/wideresnet50/test.py b/qai_hub_models/models/wideresnet50/test.py new file mode 100644 index 00000000..09976a8e --- /dev/null +++ b/qai_hub_models/models/wideresnet50/test.py @@ -0,0 +1,23 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.wideresnet50.demo import main as demo_main +from qai_hub_models.models.wideresnet50.model import MODEL_ID, WideResNet50 + + +def test_task(): + run_imagenet_classifier_test(WideResNet50.from_pretrained(), MODEL_ID) + + +def test_trace(): + run_imagenet_classifier_trace_test(WideResNet50.from_pretrained()) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/wideresnet50_quantized/README.md b/qai_hub_models/models/wideresnet50_quantized/README.md new file mode 100644 index 00000000..a5ac6c61 --- /dev/null +++ b/qai_hub_models/models/wideresnet50_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [WideResNet50-Quantized: Imagenet classifier and general purpose backbone](https://aihub.qualcomm.com/models/wideresnet50_quantized) + +WideResNet50 is a machine learning model that can classify images from the Imagenet dataset. It can also be used as a backbone in building more complex models for specific use cases. + +This is based on the implementation of WideResNet50-Quantized found +[here](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/wideresnet50_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.wideresnet50_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.wideresnet50_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of WideResNet50-Quantized can be found + [here](https://github.com/pytorch/vision/blob/main/LICENSE). + + +## References +* [Wide Residual Networks](https://arxiv.org/abs/1605.07146) +* [Source Model Implementation](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) diff --git a/qai_hub_models/models/wideresnet50_quantized/__init__.py b/qai_hub_models/models/wideresnet50_quantized/__init__.py new file mode 100644 index 00000000..f2d4a232 --- /dev/null +++ b/qai_hub_models/models/wideresnet50_quantized/__init__.py @@ -0,0 +1,11 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.app import ( # noqa: F401 + ImagenetClassifierApp as App, +) +from qai_hub_models.models.wideresnet50_quantized.model import MODEL_ID # noqa: F401 +from qai_hub_models.models.wideresnet50_quantized.model import ( # noqa: F401 + WideResNet50Quantizable as Model, +) diff --git a/qai_hub_models/models/wideresnet50_quantized/demo.py b/qai_hub_models/models/wideresnet50_quantized/demo.py new file mode 100644 index 00000000..c124c6fa --- /dev/null +++ b/qai_hub_models/models/wideresnet50_quantized/demo.py @@ -0,0 +1,14 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.demo import imagenet_demo +from qai_hub_models.models.wideresnet50_quantized.model import WideResNet50Quantizable + + +def main(is_test: bool = False): + imagenet_demo(WideResNet50Quantizable, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/wideresnet50_quantized/export.py b/qai_hub_models/models/wideresnet50_quantized/export.py new file mode 100644 index 00000000..77af46f4 --- /dev/null +++ b/qai_hub_models/models/wideresnet50_quantized/export.py @@ -0,0 +1,199 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.wideresnet50_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "wideresnet50_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "wideresnet50_quantized", + "WideResNet50-Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image_tensor" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image_tensor", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics(inference_job, inference_result, torch_out) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/wideresnet50_quantized/info.yaml b/qai_hub_models/models/wideresnet50_quantized/info.yaml new file mode 100644 index 00000000..e1b9b755 --- /dev/null +++ b/qai_hub_models/models/wideresnet50_quantized/info.yaml @@ -0,0 +1,41 @@ +name: WideResNet50-Quantized +# id must match with the model dir name in qai_hub_models +id: wideresnet50_quantized +status: public +headline: Imagenet classifier and general purpose backbone. +domain: Computer Vision +description: WideResNet50 is a machine learning model that can classify images from + the Imagenet dataset. It can also be used as a backbone in building more complex + models for specific use cases. +use_case: Image Classification +tags: + - backbone + - quantized +research_paper: https://arxiv.org/abs/1605.07146 +research_paper_title: Wide Residual Networks +license: https://github.com/pytorch/vision/blob/main/LICENSE +source_repo: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py +technical_details: + Model checkpoint: Imagenet + Input resolution: 224x224 + Number of parameters: 68.9M + Model size: 66.6 MB +applicable_scenarios: + - Medical Imaging + - Anomaly Detection + - Inventory Management +related_models: + - mobilenet_v2 + - densenet121 + - googlenet +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: no +license_type: bsd-3-clause +dataset: + - imagenet-1k + - imagenet-22k diff --git a/qai_hub_models/models/wideresnet50_quantized/model.py b/qai_hub_models/models/wideresnet50_quantized/model.py new file mode 100644 index 00000000..86bdd679 --- /dev/null +++ b/qai_hub_models/models/wideresnet50_quantized/model.py @@ -0,0 +1,76 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +# isort: off +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( + AIMETQuantizableMixin, +) + +# isort: on + +import torch +from aimet_torch.cross_layer_equalization import equalize_model +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models.wideresnet50.model import WideResNet50 +from qai_hub_models.utils.aimet.config_loader import get_per_channel_aimet_config +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 1 +DEFAULT_ENCODINGS = "wideresnet50_quantized_encodings.json" + + +class WideResNet50Quantizable(AIMETQuantizableMixin, WideResNet50): + """WideResNet50 with post train quantization support. + + Supports only 8 bit weights and activations, and only loads pre-quantized checkpoints. + Support for quantizing using your own weights & data will come at a later date.""" + + def __init__( + self, + sim_model: QuantizationSimModel, + ) -> None: + WideResNet50.__init__(self, sim_model.model) + AIMETQuantizableMixin.__init__( + self, sim_model, needs_onnx_direct_aimet_export=True + ) + + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> "WideResNet50Quantizable": + """ + Parameters: + aimet_encodings: + if "DEFAULT": Loads the model with aimet encodings calibrated on imagenette. + elif None: Doesn't load any encodings. Used when computing encodings. + else: Interprets as a filepath and loads the encodings stored there. + """ + model = WideResNet50.from_pretrained() + input_shape = model.get_input_spec()["image_tensor"][0] + + equalize_model(model, input_shape) + sim = QuantizationSimModel( + model.net, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=get_per_channel_aimet_config(), + dummy_input=torch.rand(input_shape), + ) + + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_ENCODINGS + ).fetch() + load_encodings_to_sim(sim, aimet_encodings) + + sim.model.eval() + return cls(sim) diff --git a/qai_hub_models/models/wideresnet50_quantized/perf.yaml b/qai_hub_models/models/wideresnet50_quantized/perf.yaml new file mode 100644 index 00000000..b9121909 --- /dev/null +++ b/qai_hub_models/models/wideresnet50_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: WideResNet50-Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1833.0 + throughput: 545.5537370430987 + estimated_peak_memory_range: + min: 28672 + max: 1710680 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 80 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 80 + job_id: jz5wl34jp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 1756.0 + throughput: 569.4760820045558 + estimated_peak_memory_range: + min: 520192 + max: 152789048 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 78 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 78 + job_id: jmg9zydvp + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:11:48.964511Z' diff --git a/qai_hub_models/models/wideresnet50_quantized/test.py b/qai_hub_models/models/wideresnet50_quantized/test.py new file mode 100644 index 00000000..cb4bac98 --- /dev/null +++ b/qai_hub_models/models/wideresnet50_quantized/test.py @@ -0,0 +1,41 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.imagenet_classifier.test_utils import ( + run_imagenet_classifier_test, + run_imagenet_classifier_trace_test, +) +from qai_hub_models.models.wideresnet50_quantized.demo import main as demo_main +from qai_hub_models.models.wideresnet50_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + WideResNet50Quantizable, +) + + +def test_task(): + run_imagenet_classifier_test( + WideResNet50Quantizable.from_pretrained(), + MODEL_ID, + probability_threshold=0.4, + asset_version=MODEL_ASSET_VERSION, + diff_tol=0.005, + rtol=0.02, + atol=0.2, + ) + + +def test_trace(): + run_imagenet_classifier_trace_test( + WideResNet50Quantizable.from_pretrained(), + diff_tol=0.01, + rtol=0.02, + atol=0.2, + is_quantized=True, + ) + + +def test_demo(): + # Verify demo does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/models/xlsr/README.md b/qai_hub_models/models/xlsr/README.md new file mode 100644 index 00000000..742df1d9 --- /dev/null +++ b/qai_hub_models/models/xlsr/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [XLSR: Upscale images in real time](https://aihub.qualcomm.com/models/xlsr) + +XLSR is designed for lightweight real-time upscaling of images. + +This is based on the implementation of XLSR found +[here](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/xlsr). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/xlsr). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.xlsr.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.xlsr.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of XLSR can be found + [here](https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf). + + +## References +* [Extremely Lightweight Quantization Robust Real-Time Single-Image Super Resolution for Mobile Devices](https://arxiv.org/abs/2105.10288) +* [Source Model Implementation](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/xlsr) diff --git a/qai_hub_models/models/xlsr/__init__.py b/qai_hub_models/models/xlsr/__init__.py new file mode 100644 index 00000000..6aeb527a --- /dev/null +++ b/qai_hub_models/models/xlsr/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 + SuperResolutionApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import XLSR as Model # noqa: F401 diff --git a/qai_hub_models/models/xlsr/demo.py b/qai_hub_models/models/xlsr/demo.py new file mode 100644 index 00000000..9d531a77 --- /dev/null +++ b/qai_hub_models/models/xlsr/demo.py @@ -0,0 +1,19 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo +from qai_hub_models.models.xlsr.model import MODEL_ASSET_VERSION, MODEL_ID, XLSR +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "xlsr_demo.jpg" +) + + +def main(is_test: bool = False): + super_resolution_demo(XLSR, IMAGE_ADDRESS, is_test) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/xlsr/export.py b/qai_hub_models/models/xlsr/export.py new file mode 100644 index 00000000..b0a7a751 --- /dev/null +++ b/qai_hub_models/models/xlsr/export.py @@ -0,0 +1,194 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.xlsr import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "xlsr" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "xlsr", + "XLSR", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/xlsr/info.yaml b/qai_hub_models/models/xlsr/info.yaml new file mode 100644 index 00000000..1751d48f --- /dev/null +++ b/qai_hub_models/models/xlsr/info.yaml @@ -0,0 +1,31 @@ +name: XLSR +# id must match with the model dir name in qai_hub_models +id: xlsr +status: public +headline: Upscale images in real time. +domain: Computer Vision +use_case: Super Resolution +description: XLSR is designed for lightweight real-time upscaling of images. +tags: [] +research_paper: https://arxiv.org/abs/2105.10288 +research_paper_title: Extremely Lightweight Quantization Robust Real-Time Single-Image + Super Resolution for Mobile Devices +license: https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf +source_repo: https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/xlsr +technical_details: + Model checkpoint: xlsr_4x_checkpoint_float32 + Input resolution: 128x128 + Number of parameters: 28.0K + Model size: 116 KB +applicable_scenarios: + - Virtual Real Estate Tours + - Gaming + - ARVR +form_factors: + - Phone + - Tablet +related_models: [esrgan, real_esrgan_general_x4v3] +has_static_banner: yes +has_animated_banner: yes +license_type: other +dataset: [] diff --git a/qai_hub_models/models/xlsr/model.py b/qai_hub_models/models/xlsr/model.py new file mode 100644 index 00000000..5f4a2ffd --- /dev/null +++ b/qai_hub_models/models/xlsr/model.py @@ -0,0 +1,93 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.evaluators.superres_evaluator import SuperResolutionOutputEvaluator +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, SourceAsRoot +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 2 +# Weights and config stored in S3 are sourced from +# https://github.com/quic/aimet-model-zoo/blob/develop/aimet_zoo_torch/xlsr/model/model_cards/xlsr_4x_w8a8.json +# https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/xlsr_4x_checkpoint_float32.pth.tar +XLSR_WEIGHTS = "xlsr_4x_checkpoint_float32.pth.tar" +XLSR_SOURCE_REPOSITORY = "https://github.com/quic/aimet-model-zoo" +XLSR_SOURCE_REPO_COMMIT = "d09d2b0404d10f71a7640a87e9d5e5257b028802" +SCALING_FACTOR = 4 + + +class XLSR(BaseModel): + """Exportable XLSR super resolution model, end-to-end.""" + + def __init__( + self, + xlsr_model: torch.nn.Module, + ) -> None: + super().__init__() + self.model = xlsr_model + + @classmethod + def from_pretrained(cls) -> XLSR: + model = _load_xlsr_source_model() + dst = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, XLSR_WEIGHTS + ).fetch() + checkpoint = torch.load(dst, map_location=torch.device("cpu")) + model.load_state_dict(checkpoint["state_dict"]) + model.eval() + + return cls(model) + + def get_evaluator(self) -> BaseEvaluator: + return SuperResolutionOutputEvaluator() + + def forward(self, image: torch.Tensor) -> torch.Tensor: + """ + Run XLSR on `image`, and produce an upscaled image + + Parameters: + image: Pixel values pre-processed for model consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + image: Pixel values + Range: float[0, 1] + 3-channel Color Space: RGB + """ + return self.model(image) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 128, + width: int = 128, + ) -> InputSpec: + # Get the input specification ordered (name -> (shape, type)) pairs for this model. + # + # This can be used with the qai_hub python API to declare + # the model input specification upon submitting a profile job. + return {"image": ((batch_size, num_channels, height, width), "float32")} + + +def _load_xlsr_source_model() -> torch.nn.Module: + # Load XLSR model from the source repository using the given weights. + # Returns .utils.super_resolution.models.XLSRRelease + with SourceAsRoot( + XLSR_SOURCE_REPOSITORY, + XLSR_SOURCE_REPO_COMMIT, + MODEL_ID, + MODEL_ASSET_VERSION, + ): + # necessary import. `modeling.deeplab` comes from the XLSR repo. + from aimet_zoo_torch.common.super_resolution.models import XLSRRelease + + return XLSRRelease(scaling_factor=SCALING_FACTOR) diff --git a/qai_hub_models/models/xlsr/perf.yaml b/qai_hub_models/models/xlsr/perf.yaml new file mode 100644 index 00000000..91c2f707 --- /dev/null +++ b/qai_hub_models/models/xlsr/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: XLSR + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 2523.0 + throughput: 396.3535473642489 + estimated_peak_memory_range: + min: 24576 + max: 1686120 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 13 + layers_on_gpu: 0 + layers_on_cpu: 3 + total_layers: 16 + job_id: jogk2qlyg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 1068.0 + throughput: 936.3295880149813 + estimated_peak_memory_range: + min: 217088 + max: 63076024 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 22 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 22 + job_id: jn5qlr77p + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:37:57.776098Z' diff --git a/qai_hub_models/models/xlsr/test.py b/qai_hub_models/models/xlsr/test.py new file mode 100644 index 00000000..1ce0cdd8 --- /dev/null +++ b/qai_hub_models/models/xlsr/test.py @@ -0,0 +1,38 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.models._shared.super_resolution.app import SuperResolutionApp +from qai_hub_models.models.xlsr.demo import IMAGE_ADDRESS +from qai_hub_models.models.xlsr.demo import main as demo_main +from qai_hub_models.models.xlsr.model import MODEL_ASSET_VERSION, MODEL_ID, XLSR +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_LOCAL_PATH = "xlsr_demo_output.png" +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, OUTPUT_IMAGE_LOCAL_PATH +) + + +@skip_clone_repo_check +def test_task(): + image = load_image(IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + model = XLSR.from_pretrained() + app = SuperResolutionApp(model=model) + app_output_image = app.upscale_image(image)[0] + + np.testing.assert_allclose( + np.asarray(app_output_image, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/xlsr_quantized/README.md b/qai_hub_models/models/xlsr_quantized/README.md new file mode 100644 index 00000000..edf7ff13 --- /dev/null +++ b/qai_hub_models/models/xlsr_quantized/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [XLSR-Quantized: Upscale images in real time](https://aihub.qualcomm.com/models/xlsr_quantized) + +XLSR is designed for lightweight real-time upscaling of images. + +This is based on the implementation of XLSR-Quantized found +[here](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/xlsr). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/xlsr_quantized). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.xlsr_quantized.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.xlsr_quantized.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of XLSR-Quantized can be found + [here](https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf). + + +## References +* [Extremely Lightweight Quantization Robust Real-Time Single-Image Super Resolution for Mobile Devices](https://arxiv.org/abs/2105.10288) +* [Source Model Implementation](https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/xlsr) diff --git a/qai_hub_models/models/xlsr_quantized/__init__.py b/qai_hub_models/models/xlsr_quantized/__init__.py new file mode 100644 index 00000000..5a4cb124 --- /dev/null +++ b/qai_hub_models/models/xlsr_quantized/__init__.py @@ -0,0 +1,10 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.app import ( # noqa: F401 + SuperResolutionApp as App, +) + +from .model import MODEL_ID # noqa: F401 +from .model import XLSRQuantizable as Model # noqa: F401 diff --git a/qai_hub_models/models/xlsr_quantized/demo.py b/qai_hub_models/models/xlsr_quantized/demo.py new file mode 100644 index 00000000..fde391bd --- /dev/null +++ b/qai_hub_models/models/xlsr_quantized/demo.py @@ -0,0 +1,29 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.super_resolution.demo import super_resolution_demo +from qai_hub_models.models.xlsr_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + XLSRQuantizable, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset +from qai_hub_models.utils.base_model import TargetRuntime + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "xlsr_quantized_demo.jpg" +) + + +def main(is_test: bool = False): + super_resolution_demo( + XLSRQuantizable, + IMAGE_ADDRESS, + is_test, + available_target_runtimes=[TargetRuntime.TFLITE], + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/xlsr_quantized/export.py b/qai_hub_models/models/xlsr_quantized/export.py new file mode 100644 index 00000000..b26fde6b --- /dev/null +++ b/qai_hub_models/models/xlsr_quantized/export.py @@ -0,0 +1,204 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub + +from qai_hub_models.models.xlsr_quantized import Model +from qai_hub_models.utils.args import ( + TargetRuntime, + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) +from qai_hub_models.utils.qnn_helpers import get_qnn_inputs + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "xlsr_quantized" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "xlsr_quantized", + "XLSR-Quantized", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = model.convert_to_hub_source_model( + target_runtime, output_path, input_spec + ) + if target_runtime == TargetRuntime.TFLITE: + quant_calibration_data = None + else: + quant_calibration_data = model.get_calibration_data(target_runtime, input_spec) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, + compile_options + + " --force_channel_last_input image" + + " --force_channel_last_output output_0", + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + calibration_data=quant_calibration_data, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + hub_inputs = sample_inputs + if target_runtime == TargetRuntime.QNN: + hub_inputs = get_qnn_inputs(compile_job, sample_inputs) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + # Convert outputs from channel last to channel first + inference_result = transpose_channel_last_to_first( + "output_0", inference_result, target_runtime + ) + print_inference_metrics(inference_job, inference_result, torch_out) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/xlsr_quantized/info.yaml b/qai_hub_models/models/xlsr_quantized/info.yaml new file mode 100644 index 00000000..55059211 --- /dev/null +++ b/qai_hub_models/models/xlsr_quantized/info.yaml @@ -0,0 +1,32 @@ +name: XLSR-Quantized +# id must match with the model dir name in qai_hub_models +id: xlsr_quantized +status: public +headline: Upscale images in real time. +domain: Computer Vision +use_case: Super Resolution +description: XLSR is designed for lightweight real-time upscaling of images. +tags: + - quantized +research_paper: https://arxiv.org/abs/2105.10288 +research_paper_title: Extremely Lightweight Quantization Robust Real-Time Single-Image + Super Resolution for Mobile Devices +license: https://github.com/quic/aimet-model-zoo/blob/develop/LICENSE.pdf +source_repo: https://github.com/quic/aimet-model-zoo/tree/develop/aimet_zoo_torch/xlsr +technical_details: + Model checkpoint: xlsr_4x_checkpoint_w8a8 + Input resolution: 128x128 + Number of parameters: 28.0K + Model size: 47.0 KB +applicable_scenarios: + - Virtual Real Estate Tours + - Gaming + - ARVR +form_factors: + - Phone + - Tablet +related_models: [esrgan, real_esrgan_general_x4v3, xlsr] +has_static_banner: yes +has_animated_banner: yes +license_type: other +dataset: [] diff --git a/qai_hub_models/models/xlsr_quantized/model.py b/qai_hub_models/models/xlsr_quantized/model.py new file mode 100644 index 00000000..d9313440 --- /dev/null +++ b/qai_hub_models/models/xlsr_quantized/model.py @@ -0,0 +1,88 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +from aimet_torch.quantsim import QuantizationSimModel, load_encodings_to_sim + +from qai_hub_models.models.xlsr.model import XLSR, _load_xlsr_source_model +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +# This verifies aimet is installed, and this must be included first. +from qai_hub_models.utils.quantization_aimet import ( # isort: skip + AIMETQuantizableMixin, +) + + +MODEL_ID = __name__.split(".")[-2] +MODEL_ASSET_VERSION = 2 +# Weights and config stored in S3 are sourced from +# https://github.com/quic/aimet-model-zoo/blob/develop/aimet_zoo_torch/xlsr/model/model_cards/xlsr_4x_w8a8.json: +# https://github.com/quic/aimet-model-zoo/releases/download/phase_2_february_artifacts/xlsr_4x_checkpoint_int8.pth +# and +# https://raw.githubusercontent.com/quic/aimet/release-aimet-1.23/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.js +# Encodings were generated with AIMET QuantSim library +XLSR_QUANTIZED_WEIGHTS = "xlsr_4x_checkpoint_int8.pth" +AIMET_ENCODINGS = "aimet_quantization_encodings.json" +AIMET_CONFIG = "default_config_per_channel.json" +SCALING_FACTOR = 4 + + +class XLSRQuantizable(AIMETQuantizableMixin, XLSR): + """XLSR with post training quantization suport + + Supports only 8 bit weights and activations, and only loads pre-quantized checkpoints. + Support for quantizing using your own weights & data will come at a later date.""" + + def __init__( + self, + xlsr_model: QuantizationSimModel, + ) -> None: + XLSR.__init__(self, xlsr_model.model) + AIMETQuantizableMixin.__init__( + self, xlsr_model, needs_onnx_direct_aimet_export=True + ) + + @classmethod + def from_pretrained( + cls, + aimet_encodings: str | None = "DEFAULT", + ) -> XLSRQuantizable: + """ + Parameters: + aimet_encodings: + if "DEFAULT": Loads the model with aimet encodings calibrated on BSD300. + elif None: Doesn't load any encodings. Used when computing encodings. + else: Interprets as a filepath and loads the encodings stored there. + """ + xlsr = _load_xlsr_source_model() + input_shape = XLSR.get_input_spec()["image"][0] + + weights = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, XLSR_QUANTIZED_WEIGHTS + ).fetch() + aimet_config = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, AIMET_CONFIG + ).fetch() + + # Load the model weights and quantization parameters + state_dict = torch.load(weights, map_location=torch.device("cpu"))["state_dict"] + xlsr.load_state_dict(state_dict) + sim = QuantizationSimModel( + xlsr, + quant_scheme="tf_enhanced", + default_param_bw=8, + default_output_bw=8, + config_file=aimet_config, + dummy_input=torch.rand(input_shape), + ) + if aimet_encodings: + if aimet_encodings == "DEFAULT": + aimet_encodings = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, AIMET_ENCODINGS + ).fetch() + load_encodings_to_sim(sim, aimet_encodings) + + return cls(sim) diff --git a/qai_hub_models/models/xlsr_quantized/perf.yaml b/qai_hub_models/models/xlsr_quantized/perf.yaml new file mode 100644 index 00000000..e76f30ab --- /dev/null +++ b/qai_hub_models/models/xlsr_quantized/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: XLSR-Quantized + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 1298.0 + throughput: 770.4160246533128 + estimated_peak_memory_range: + min: 24576 + max: 1426056 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 16 + layers_on_gpu: 0 + layers_on_cpu: 3 + total_layers: 19 + job_id: jo5m064yg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:31:32.010687Z' diff --git a/qai_hub_models/models/xlsr_quantized/test.py b/qai_hub_models/models/xlsr_quantized/test.py new file mode 100644 index 00000000..7ec905dc --- /dev/null +++ b/qai_hub_models/models/xlsr_quantized/test.py @@ -0,0 +1,45 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import torch + +from qai_hub_models.models._shared.super_resolution.app import SuperResolutionApp +from qai_hub_models.models.xlsr_quantized.demo import IMAGE_ADDRESS +from qai_hub_models.models.xlsr_quantized.demo import main as demo_main +from qai_hub_models.models.xlsr_quantized.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + XLSRQuantizable, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_LOCAL_PATH = "xlsr_quantized_demo_output.png" +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, OUTPUT_IMAGE_LOCAL_PATH +) + + +@skip_clone_repo_check +def test_task(): + # AIMET Quantization Simulator introduces randomness. Eliminate that for this test. + torch.manual_seed(0) + image = load_image(IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + model = XLSRQuantizable.from_pretrained() + app = SuperResolutionApp(model=model) + app_output_image = app.upscale_image(image)[0] + + np.testing.assert_allclose( + np.asarray(app_output_image, dtype=np.float32) / 255, + np.asarray(output_image, dtype=np.float32) / 255, + rtol=0.02, + atol=0.2, + ) + + +@skip_clone_repo_check +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/yolov6/README.md b/qai_hub_models/models/yolov6/README.md new file mode 100644 index 00000000..fa194543 --- /dev/null +++ b/qai_hub_models/models/yolov6/README.md @@ -0,0 +1,50 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Yolo-v6: Real-time object detection optimized for mobile and edge](https://aihub.qualcomm.com/models/yolov6) + +YoloV6 is a machine learning model that predicts bounding boxes and classes of objects in an image. + +This is based on the implementation of Yolo-v6 found +[here](https://github.com/meituan/YOLOv6/). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/yolov6). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.yolov6.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.yolov6.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Yolo-v6 can be found + [here](https://github.com/meituan/YOLOv6/blob/47625514e7480706a46ff3c0cd0252907ac12f22/LICENSE). + + +## References +* [YOLOv6: A Single-Stage Object Detection Framework for Industrial Applications](https://arxiv.org/abs/2209.02976) +* [Source Model Implementation](https://github.com/meituan/YOLOv6/) diff --git a/qai_hub_models/models/yolov6/__init__.py b/qai_hub_models/models/yolov6/__init__.py new file mode 100644 index 00000000..032460fc --- /dev/null +++ b/qai_hub_models/models/yolov6/__init__.py @@ -0,0 +1,8 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.yolov6.app import YoloV6DetectionApp as App # noqa: F401 + +from .model import MODEL_ID # noqa: F401 +from .model import YoloV6 as Model # noqa: F401 diff --git a/qai_hub_models/models/yolov6/app.py b/qai_hub_models/models/yolov6/app.py new file mode 100644 index 00000000..e025877e --- /dev/null +++ b/qai_hub_models/models/yolov6/app.py @@ -0,0 +1,26 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.models._shared.yolo.app import YoloObjectDetectionApp +from qai_hub_models.models.yolov6.model import YoloV6 + + +class YoloV6DetectionApp(YoloObjectDetectionApp): + def check_image_size(self, pixel_values: torch.Tensor) -> None: + """ + Verify image size is valid model input. + """ + if len(pixel_values.shape) != 4: + raise ValueError("Pixel Values must be rank 4: [batch, channels, x, y]") + if ( + pixel_values.shape[2] % YoloV6.STRIDE_MULTIPLE != 0 + or pixel_values.shape[3] % YoloV6.STRIDE_MULTIPLE != 0 + ): + raise ValueError( + f"Pixel values must have spatial dimensions (H & W) that are multiples of {YoloV6.STRIDE_MULTIPLE}." + ) diff --git a/qai_hub_models/models/yolov6/demo.py b/qai_hub_models/models/yolov6/demo.py new file mode 100644 index 00000000..9f2ba92b --- /dev/null +++ b/qai_hub_models/models/yolov6/demo.py @@ -0,0 +1,29 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.yolo.demo import yolo_detection_demo +from qai_hub_models.models.yolov6.app import YoloV6DetectionApp +from qai_hub_models.models.yolov6.model import MODEL_ASSET_VERSION, MODEL_ID, YoloV6 +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +WEIGHTS_HELP_MSG = ( + "YoloV6 checkpoint name, defined here: https://github.com/meituan/YOLOv6/releases" +) +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_images/input_image.jpg" +) + + +def main(is_test: bool = False): + yolo_detection_demo( + YoloV6, + YoloV6DetectionApp, + IMAGE_ADDRESS, + YoloV6.STRIDE_MULTIPLE, + is_test=is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/yolov6/export.py b/qai_hub_models/models/yolov6/export.py new file mode 100644 index 00000000..22f077b9 --- /dev/null +++ b/qai_hub_models/models/yolov6/export.py @@ -0,0 +1,191 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.yolov6 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "yolov6" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "yolov6", + "Yolo-v6", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics( + inference_job, inference_result, torch_out, outputs_to_skip=[2] + ) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/yolov6/info.yaml b/qai_hub_models/models/yolov6/info.yaml new file mode 100644 index 00000000..9195123b --- /dev/null +++ b/qai_hub_models/models/yolov6/info.yaml @@ -0,0 +1,38 @@ +name: Yolo-v6 +# id must match with the model dir name in qai_hub_models +id: yolov6 +status: public +headline: Real-time object detection optimized for mobile and edge. +domain: Computer Vision +description: YoloV6 is a machine learning model that predicts bounding boxes and classes + of objects in an image. +use_case: Object Detection +tags: + - real-time +research_paper: https://arxiv.org/abs/2209.02976 +research_paper_title: 'YOLOv6: A Single-Stage Object Detection Framework for Industrial + Applications' +license: + https://github.com/meituan/YOLOv6/blob/47625514e7480706a46ff3c0cd0252907ac12f22/LICENSE +source_repo: https://github.com/meituan/YOLOv6/ +technical_details: + Model checkpoint: YoloV6-N + Input resolution: 640x640 + Number of parameters: 4.68M + Model size: 17.9 MB +applicable_scenarios: + - Factory Automation + - Robotic Navigation + - Camera +related_models: + - yolov7 + - yolov8_det +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: gpl-3.0 +dataset: [] diff --git a/qai_hub_models/models/yolov6/model.py b/qai_hub_models/models/yolov6/model.py new file mode 100644 index 00000000..cf836d44 --- /dev/null +++ b/qai_hub_models/models/yolov6/model.py @@ -0,0 +1,112 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import tempfile +from importlib import reload + +import torch +import torch.nn as nn + +from qai_hub_models.models._shared.yolo.utils import detect_postprocess +from qai_hub_models.utils.asset_loaders import ( + CachedWebModelAsset, + SourceAsRoot, + load_path, +) +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +YOLOV6_SOURCE_REPOSITORY = "https://github.com/meituan/YOLOv6" +YOLOV6_SOURCE_REPO_COMMIT = "55d80c317edd0fb5847e599a1802d394f34a3141" +MODEL_ASSET_VERSION = 1 +MODEL_ID = __name__.split(".")[-2] + +WEIGHTS_PATH = "https://github.com/meituan/YOLOv6/releases/download/0.4.0/" +DEFAULT_WEIGHTS = "yolov6n.pt" + + +class YoloV6(BaseModel): + """Exportable YoloV6 bounding box detector, end-to-end.""" + + def __init__(self, model: nn.Module) -> None: + super().__init__() + self.model = model + + # All image input spatial dimensions should be a multiple of this stride. + STRIDE_MULTIPLE = 32 + + @classmethod + def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): + model_url = f"{WEIGHTS_PATH}{ckpt_name}" + asset = CachedWebModelAsset(model_url, MODEL_ID, MODEL_ASSET_VERSION, ckpt_name) + model = _load_yolov6_source_model_from_weights(asset) + return cls(model) + + def forward(self, image: torch.Tensor): + """ + Run YoloV6 on `image`, and produce a predicted set of bounding boxes and associated class probabilities. + + Parameters: + image: Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + boxes: Shape [batch, num preds, 4] where 4 == (center_x, center_y, w, h) + class scores multiplied by confidence: Shape [batch, num_preds, # of classes (typically 80)] + """ + predictions = self.model(image) + return detect_postprocess(predictions) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 640, + width: int = 640, + ) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm AI Hub. + """ + return {"image": ((batch_size, num_channels, height, width), "float32")} + + +def _load_yolov6_source_model_from_weights( + ckpt_path: str | CachedWebModelAsset, +) -> torch.nn.Module: + with tempfile.TemporaryDirectory() as tmpdir: + model_path = load_path(ckpt_path, tmpdir) + with SourceAsRoot( + YOLOV6_SOURCE_REPOSITORY, + YOLOV6_SOURCE_REPO_COMMIT, + MODEL_ID, + MODEL_ASSET_VERSION, + ): + # Our models/yolov6 package may already be loaded and cached as + # "yolov6" (reproduce by running python -m yolov6.demo from models + # folder). To make sure it loads the external yolov6 repo, + # explicitly reload first. + import yolov6 + + reload(yolov6) + + from yolov6.layers.common import RepVGGBlock + from yolov6.utils.checkpoint import load_checkpoint + + model = load_checkpoint( + model_path, map_location="cpu", inplace=True, fuse=True + ) + model.export = True + + for layer in model.modules(): + if isinstance(layer, RepVGGBlock): + layer.switch_to_deploy() + elif isinstance(layer, nn.Upsample) and not hasattr( + layer, "recompute_scale_factor" + ): + layer.recompute_scale_factor = None # torch 1.11.0 compatibility + return model diff --git a/qai_hub_models/models/yolov6/perf.yaml b/qai_hub_models/models/yolov6/perf.yaml new file mode 100644 index 00000000..82e39b9b --- /dev/null +++ b/qai_hub_models/models/yolov6/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Yolo-v6 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 7848.0 + throughput: 127.420998980632 + estimated_peak_memory_range: + min: 32768 + max: 7233136 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 182 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 182 + job_id: jqpyoj4r5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 7283.0 + throughput: 137.3060551970342 + estimated_peak_memory_range: + min: 4931584 + max: 17461520 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 230 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 230 + job_id: j2p0m212g + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:12:26.065342Z' diff --git a/qai_hub_models/models/yolov6/test.py b/qai_hub_models/models/yolov6/test.py new file mode 100644 index 00000000..d3d13d82 --- /dev/null +++ b/qai_hub_models/models/yolov6/test.py @@ -0,0 +1,50 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import torch + +from qai_hub_models.models._shared.yolo.utils import detect_postprocess +from qai_hub_models.models.yolov6.demo import IMAGE_ADDRESS +from qai_hub_models.models.yolov6.demo import main as demo_main +from qai_hub_models.models.yolov6.model import ( + DEFAULT_WEIGHTS, + MODEL_ASSET_VERSION, + MODEL_ID, + WEIGHTS_PATH, + YoloV6, + _load_yolov6_source_model_from_weights, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.image_processing import preprocess_PIL_image +from qai_hub_models.utils.testing import skip_clone_repo_check + + +@skip_clone_repo_check +def test_task(): + model_path = f"{WEIGHTS_PATH}{DEFAULT_WEIGHTS}" + asset = CachedWebModelAsset( + model_path, MODEL_ID, MODEL_ASSET_VERSION, DEFAULT_WEIGHTS + ) + + # source model + source_model = _load_yolov6_source_model_from_weights(asset) + + # Qualcomm AI Hub Model + qaihm_model = YoloV6.from_pretrained() + + with torch.no_grad(): + # source model output + processed_sample_image = preprocess_PIL_image(load_image(IMAGE_ADDRESS)) + source_detect_out = source_model(processed_sample_image) + source_out_postprocessed = detect_postprocess(source_detect_out) + + # Qualcomm AI Hub Model output + qaihm_out_postprocessed = qaihm_model(processed_sample_image) + for i in range(0, len(source_out_postprocessed)): + assert np.allclose(source_out_postprocessed[i], qaihm_out_postprocessed[i]) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/yolov7/README.md b/qai_hub_models/models/yolov7/README.md new file mode 100644 index 00000000..bb98d698 --- /dev/null +++ b/qai_hub_models/models/yolov7/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Yolo-v7: Real-time object detection optimized for mobile and edge](https://aihub.qualcomm.com/models/yolov7) + +YoloV7 is a machine learning model that predicts bounding boxes and classes of objects in an image. + +This is based on the implementation of Yolo-v7 found +[here](https://github.com/WongKinYiu/yolov7/). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/yolov7). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[yolov7]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.yolov7.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.yolov7.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Yolo-v7 can be found + [here](https://github.com/WongKinYiu/yolov7/blob/main/LICENSE.md). + + +## References +* [YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors](https://arxiv.org/abs/2207.02696) +* [Source Model Implementation](https://github.com/WongKinYiu/yolov7/) diff --git a/qai_hub_models/models/yolov7/__init__.py b/qai_hub_models/models/yolov7/__init__.py new file mode 100644 index 00000000..1504e099 --- /dev/null +++ b/qai_hub_models/models/yolov7/__init__.py @@ -0,0 +1,8 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models.yolov7.app import YoloV7DetectionApp as App # noqa: F401 + +from .model import MODEL_ID # noqa: F401 +from .model import YoloV7 as Model # noqa: F401 diff --git a/qai_hub_models/models/yolov7/app.py b/qai_hub_models/models/yolov7/app.py new file mode 100644 index 00000000..fe6f1244 --- /dev/null +++ b/qai_hub_models/models/yolov7/app.py @@ -0,0 +1,26 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.models._shared.yolo.app import YoloObjectDetectionApp +from qai_hub_models.models.yolov7.model import YoloV7 + + +class YoloV7DetectionApp(YoloObjectDetectionApp): + def check_image_size(self, pixel_values: torch.Tensor) -> None: + """ + Verify image size is valid model input. + """ + if len(pixel_values.shape) != 4: + raise ValueError("Pixel Values must be rank 4: [batch, channels, x, y]") + if ( + pixel_values.shape[2] % YoloV7.STRIDE_MULTIPLE != 0 + or pixel_values.shape[3] % YoloV7.STRIDE_MULTIPLE != 0 + ): + raise ValueError( + f"Pixel values must have spatial dimensions (H & W) that are multiples of {YoloV7.STRIDE_MULTIPLE}." + ) diff --git a/qai_hub_models/models/yolov7/demo.py b/qai_hub_models/models/yolov7/demo.py new file mode 100644 index 00000000..23b01552 --- /dev/null +++ b/qai_hub_models/models/yolov7/demo.py @@ -0,0 +1,26 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.yolo.demo import yolo_detection_demo +from qai_hub_models.models.yolov7.app import YoloV7DetectionApp +from qai_hub_models.models.yolov7.model import MODEL_ASSET_VERSION, MODEL_ID, YoloV7 +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "yolov7_demo_640.jpg" +) + + +def main(is_test: bool = False): + yolo_detection_demo( + YoloV7, + YoloV7DetectionApp, + IMAGE_ADDRESS, + YoloV7.STRIDE_MULTIPLE, + is_test=is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/yolov7/export.py b/qai_hub_models/models/yolov7/export.py new file mode 100644 index 00000000..45ac5a22 --- /dev/null +++ b/qai_hub_models/models/yolov7/export.py @@ -0,0 +1,191 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.yolov7 import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "yolov7" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "yolov7", + "Yolo-v7", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace(model, make_torch_inputs(input_spec)) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics( + inference_job, inference_result, torch_out, outputs_to_skip=[2] + ) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/yolov7/info.yaml b/qai_hub_models/models/yolov7/info.yaml new file mode 100644 index 00000000..5cf326e6 --- /dev/null +++ b/qai_hub_models/models/yolov7/info.yaml @@ -0,0 +1,37 @@ +name: Yolo-v7 +# id must match with the model dir name in qai_hub_models +id: yolov7 +status: public +headline: Real-time object detection optimized for mobile and edge. +domain: Computer Vision +description: YoloV7 is a machine learning model that predicts bounding boxes and classes + of objects in an image. +use_case: Object Detection +tags: + - real-time +research_paper: https://arxiv.org/abs/2207.02696 +research_paper_title: 'YOLOv7: Trainable bag-of-freebies sets new state-of-the-art + for real-time object detectors' +license: https://github.com/WongKinYiu/yolov7/blob/main/LICENSE.md +source_repo: https://github.com/WongKinYiu/yolov7/ +technical_details: + Model checkpoint: YoloV7 Tiny + Input resolution: 720p (720x1280) + Number of parameters: 6.39M + Model size: 24.4 MB +applicable_scenarios: + - Factory Automation + - Robotic Navigation + - Camera +related_models: + - yolov6 + - yolov8_det +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: gpl-3.0 +dataset: [] diff --git a/qai_hub_models/models/yolov7/model.py b/qai_hub_models/models/yolov7/model.py new file mode 100644 index 00000000..63e56e72 --- /dev/null +++ b/qai_hub_models/models/yolov7/model.py @@ -0,0 +1,246 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from importlib import reload +from typing import Any, List, Mapping, Optional + +import torch + +from qai_hub_models.models._shared.yolo.utils import ( + detect_postprocess, + yolo_sample_inputs, +) +from qai_hub_models.utils.asset_loaders import SourceAsRoot +from qai_hub_models.utils.base_model import BaseModel, InputsType +from qai_hub_models.utils.input_spec import InputSpec + +YOLOV7_SOURCE_REPOSITORY = "https://github.com/WongKinYiu/yolov7" +YOLOV7_SOURCE_REPO_COMMIT = "84932d70fb9e2932d0a70e4a1f02a1d6dd1dd6ca" +MODEL_ID = __name__.split(".")[-2] +DEFAULT_WEIGHTS = "yolov7-tiny.pt" +MODEL_ASSET_VERSION = 1 + + +class YoloV7(BaseModel): + """Exportable YoloV7 bounding box detector, end-to-end.""" + + def __init__( + self, + yolov7_feature_extractor: torch.nn.Module, + yolov7_detector: torch.nn.Module, + ) -> None: + super().__init__() + self.yolov7_feature_extractor = yolov7_feature_extractor + self.yolov7_detector = yolov7_detector + + # All image input spatial dimensions should be a multiple of this stride. + STRIDE_MULTIPLE = 32 + + @classmethod + def from_pretrained( + cls, + weights_name: Optional[str] = DEFAULT_WEIGHTS, + ): + """Load YoloV7 from a weightfile created by the source YoloV7 repository.""" + # Load PyTorch model from disk + yolov7_model = _load_yolov7_source_model_from_weights(weights_name) + + yolov7_model.profile = False + + # When traced = True, the model will skip the "Detect" step, + # which allows us to override it with an exportable version. + yolov7_model.traced = True + + # Generate replacement detector that can be traced + detector_head_state_dict = yolov7_model.model[-1].state_dict() + detector_head_state_dict["stride"] = yolov7_model.model[-1].stride + detector_head_state_dict["f"] = yolov7_model.model[ + -1 + ].f # Previous (input) node indices in sequential model + detector_head_state_dict["i"] = yolov7_model.model[ + -1 + ].i # Index in sequential model + yolov7_detect = _YoloV7Detector.from_yolov7_state_dict(detector_head_state_dict) + + return cls( + yolov7_model, + yolov7_detect, + ) + + def forward(self, image: torch.Tensor): + """ + Run YoloV7 on `image`, and produce a predicted set of bounding boxes and associated class probabilities. + + Parameters: + image: Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: BGR + + Returns: + boxes: Shape [batch, num preds, 4] where 4 == (center_x, center_y, w, h) + class scores multiplied by confidence: Shape [batch, num_preds, # of classes (typically 80)] + """ + feature_extraction_output = ( + *self.yolov7_feature_extractor(image), + ) # Convert output list to Tuple, for exportability + prediction = self.yolov7_detector(feature_extraction_output) + return detect_postprocess(prediction) + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 640, + width: int = 640, + ) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm AI Hub. + """ + return {"image": ((batch_size, num_channels, height, width), "float32")} + + def sample_inputs(self, input_spec: InputSpec | None = None) -> InputsType: + if input_spec is not None and input_spec != YoloV7.get_input_spec(): + raise ValueError("Sample input has a fixed size that cannot be changed") + + return yolo_sample_inputs() + + +class _YoloV7Detector(torch.nn.Module): # YoloV7 Detection + """Converts features extracted by YoloV7 to predicted bounding boxes & associated class predictions.""" + + def __init__( + self, + stride: torch.Tensor, + f, + i, + num_anchors: int, + num_layers: int, + m_in_channels: List[int], + m_out_channel, + ): + super(_YoloV7Detector, self).__init__() + self.f = f + self.i = i + self.stride = stride + self.na = num_anchors + self.no = m_out_channel // self.na # number of outputs per anchor + self.nc = self.no - 5 # number of classes + self.nl = num_layers + for i in range(0, self.nl): + self.register_buffer( + f"anchor_grid_{i}", torch.zeros(1, self.na, 1, 1, 2) + ) # nl * [ tensor(shape(1,na,1,1,2)) ] + self.m = torch.nn.ModuleList( + torch.nn.Conv2d(m_in_channel, m_out_channel, 1) + for m_in_channel in m_in_channels + ) # output conv + + @staticmethod + def from_yolov7_state_dict( + state_dict: Mapping[str, Any], + strict: bool = True, + ): + """ + Load this module from a state dict taken from the "Detect" module. + This module is found in the original YoloV7 source repository (models/common.py::Detect). + """ + new_state_dict = {} + + # Convert anchor grid buffer from rank 6 to several rank 5 tensors, for export-friendliness. + anchor_grid = state_dict["anchor_grid"] + nl = len(anchor_grid) + na = anchor_grid.shape[2] + for i in range(0, nl): + new_state_dict[f"anchor_grid_{i}"] = anchor_grid[i] + + # Copy over `m` layers + m_in_channels = [] + m_out_channel = 0 + for i in range(0, nl): + weight = f"m.{i}.weight" + for x in [weight, f"m.{i}.bias"]: + new_state_dict[x] = state_dict[x] + m_in_channels.append(new_state_dict[weight].shape[1]) + m_out_channel = new_state_dict[weight].shape[0] + + out = _YoloV7Detector( + state_dict["stride"], + state_dict["f"], + state_dict["i"], + na, + nl, + m_in_channels, + m_out_channel, + ) + out.load_state_dict(new_state_dict, strict) + return out + + def make_grid_points(self, x, i): + x = x.sigmoid() + bs, _, ny, nx = x.shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x = x.view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + grid = self._make_grid(nx, ny) + y = x + xy = (y[..., 0:2] * 2.0 - 0.5 + grid) * self.stride[i] + wh = (y[..., 2:4] * 2) ** 2 * self.__getattr__(f"anchor_grid_{i}") + + cat = torch.cat((xy, wh, y[..., 4:]), -1) + return cat.view(bs, -1, self.no) + + def forward(self, all_x: tuple[torch.Tensor, ...]): + """ + From the outputs of the feature extraction layers of YoloV7, predict bounding boxes, + classes, and confidence. + + Parameters: + all_x: tuple[torch.Tensor] + Outputs of the feature extraction layers of YoloV7. Typically 3 5D tensors. + + Returns: + pred: [batch_size, # of predictions, 5 + # of classes] + Where the rightmost dim contains [center_x, center_y, w, h, confidence score, n per-class scores] + """ + z = [] # inference output + for i in range(self.nl): + x = all_x[i] + x = self.m[i](x) # conv + points = self.make_grid_points(x, i) + z.append(points) + + return torch.cat(z, 1) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)], indexing="ij") + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + +def _load_yolov7_source_model_from_weights(weights_name: str) -> torch.nn.Module: + # Load YoloV7 model from the source repository using the given weights. + # Returns .models.yolo.Model + with SourceAsRoot( + YOLOV7_SOURCE_REPOSITORY, + YOLOV7_SOURCE_REPO_COMMIT, + MODEL_ID, + MODEL_ASSET_VERSION, + ): + # Our qai_hub_models/models package may already be loaded and cached + # as "models" (reproduce by running python -m models.yolov7.demo from + # models qai_hub_models folder). To make sure it loads the external + # "models" package, explicitly reload first. + import models + + reload(models) + + # necessary imports. `models` come from the yolov7 repo. + from models.experimental import attempt_load + from models.yolo import Model + + yolov7_model = attempt_load(weights_name, map_location="cpu") # load FP32 model + + assert isinstance(yolov7_model, Model) + return yolov7_model diff --git a/qai_hub_models/models/yolov7/perf.yaml b/qai_hub_models/models/yolov7/perf.yaml new file mode 100644 index 00000000..36dfb9a3 --- /dev/null +++ b/qai_hub_models/models/yolov7/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Yolo-v7 + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 22349.0 + throughput: 44.74473130788849 + estimated_peak_memory_range: + min: 9764864 + max: 12574848 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 286 + layers_on_gpu: 0 + layers_on_cpu: 21 + total_layers: 307 + job_id: jvgddqzlg + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:10:34.471023Z' diff --git a/qai_hub_models/models/yolov7/requirements.txt b/qai_hub_models/models/yolov7/requirements.txt new file mode 100644 index 00000000..8e95168e --- /dev/null +++ b/qai_hub_models/models/yolov7/requirements.txt @@ -0,0 +1,6 @@ +matplotlib +opencv-python +PyYAML +requests +scipy +seaborn diff --git a/qai_hub_models/models/yolov7/test.py b/qai_hub_models/models/yolov7/test.py new file mode 100644 index 00000000..b2d84594 --- /dev/null +++ b/qai_hub_models/models/yolov7/test.py @@ -0,0 +1,56 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import torch + +from qai_hub_models.models._shared.yolo.utils import detect_postprocess +from qai_hub_models.models.yolov7.app import YoloV7DetectionApp +from qai_hub_models.models.yolov7.demo import IMAGE_ADDRESS +from qai_hub_models.models.yolov7.demo import main as demo_main +from qai_hub_models.models.yolov7.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + YoloV7, + _load_yolov7_source_model_from_weights, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.image_processing import preprocess_PIL_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "yolov7_demo_640_output.png" +) +WEIGHTS = "yolov7-tiny.pt" + + +@skip_clone_repo_check +def test_task(): + """Verify that raw (numeric) outputs of both (QAIHM and non-qaihm) networks are the same.""" + processed_sample_image = preprocess_PIL_image(load_image(IMAGE_ADDRESS)) + source_model = _load_yolov7_source_model_from_weights(WEIGHTS) + qaihm_model = YoloV7.from_pretrained(WEIGHTS) + + with torch.no_grad(): + # original model output + source_model.model[-1].training = False + source_model.model[-1].export = False + source_detect_out = source_model(processed_sample_image)[0] + source_out_postprocessed = detect_postprocess(source_detect_out) + + # Qualcomm AI Hub Model output + qaihm_out_postprocessed = qaihm_model(processed_sample_image) + for i in range(0, len(source_out_postprocessed)): + assert np.allclose(source_out_postprocessed[i], qaihm_out_postprocessed[i]) + + +def test_yolov7_app(): + image = load_image(IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS).convert("RGB") + app = YoloV7DetectionApp(YoloV7.from_pretrained(WEIGHTS)) + assert np.allclose(app.predict_boxes_from_image(image)[0], np.asarray(output_image)) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/yolov8_det/README.md b/qai_hub_models/models/yolov8_det/README.md new file mode 100644 index 00000000..d2bb0e5c --- /dev/null +++ b/qai_hub_models/models/yolov8_det/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Yolo-v8-Detection: Real-time object detection optimized for mobile and edge](https://aihub.qualcomm.com/models/yolov8_det) + +YoloV8 is a machine learning model that predicts bounding boxes and classes of objects in an image. + +This is based on the implementation of Yolo-v8-Detection found +[here](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/yolo/detect). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/yolov8_det). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[yolov8_det]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.yolov8_det.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.yolov8_det.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Yolo-v8-Detection can be found + [here](https://github.com/ultralytics/ultralytics/blob/main/LICENSE). + + +## References +* [Real-Time Flying Object Detection with YOLOv8](https://arxiv.org/abs/2305.09972) +* [Source Model Implementation](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/yolo/detect) diff --git a/qai_hub_models/models/yolov8_det/__init__.py b/qai_hub_models/models/yolov8_det/__init__.py new file mode 100644 index 00000000..e82c0b57 --- /dev/null +++ b/qai_hub_models/models/yolov8_det/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import YoloV8DetectionApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import YoloV8Detector as Model # noqa: F401 diff --git a/qai_hub_models/models/yolov8_det/app.py b/qai_hub_models/models/yolov8_det/app.py new file mode 100644 index 00000000..f642e153 --- /dev/null +++ b/qai_hub_models/models/yolov8_det/app.py @@ -0,0 +1,17 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch + +from qai_hub_models.models._shared.yolo.app import YoloObjectDetectionApp + + +class YoloV8DetectionApp(YoloObjectDetectionApp): + def check_image_size(self, pixel_values: torch.Tensor) -> None: + """ + YoloV8 does not check for spatial dim shapes for input image + """ + pass diff --git a/qai_hub_models/models/yolov8_det/demo.py b/qai_hub_models/models/yolov8_det/demo.py new file mode 100644 index 00000000..3e766a25 --- /dev/null +++ b/qai_hub_models/models/yolov8_det/demo.py @@ -0,0 +1,29 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.models._shared.yolo.demo import yolo_detection_demo +from qai_hub_models.models.yolov8_det.app import YoloV8DetectionApp +from qai_hub_models.models.yolov8_det.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + YoloV8Detector, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_images/input_image.jpg" +) + + +def main(is_test: bool = False): + yolo_detection_demo( + YoloV8Detector, + YoloV8DetectionApp, + IMAGE_ADDRESS, + is_test=is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/yolov8_det/export.py b/qai_hub_models/models/yolov8_det/export.py new file mode 100644 index 00000000..f9c24138 --- /dev/null +++ b/qai_hub_models/models/yolov8_det/export.py @@ -0,0 +1,193 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.yolov8_det import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "yolov8_det" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "yolov8_det", + "Yolo-v8-Detection", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace( + model, make_torch_inputs(input_spec), check_trace=False + ) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics( + inference_job, inference_result, torch_out, outputs_to_skip=[2] + ) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/yolov8_det/info.yaml b/qai_hub_models/models/yolov8_det/info.yaml new file mode 100644 index 00000000..bff88dc3 --- /dev/null +++ b/qai_hub_models/models/yolov8_det/info.yaml @@ -0,0 +1,37 @@ +name: Yolo-v8-Detection +# id must match with the model dir name in qai_hub_models +id: yolov8_det +status: public +headline: Real-time object detection optimized for mobile and edge. +domain: Computer Vision +use_case: Object Detection +description: YoloV8 is a machine learning model that predicts bounding boxes and classes + of objects in an image. +tags: + - real-time +research_paper: https://arxiv.org/abs/2305.09972 +research_paper_title: Real-Time Flying Object Detection with YOLOv8 +license: https://github.com/ultralytics/ultralytics/blob/main/LICENSE +source_repo: + https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/yolo/detect +technical_details: + Model checkpoint: YoloV8-N + Input resolution: 640x640 + Number of parameters: 3.18M + Model size: 12.2 MB +applicable_scenarios: + - Factory Automation + - Robotic Navigation + - Camera +related_models: + - yolov6 + - yolov7 +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: yes +license_type: agpl-3.0 +dataset: [] diff --git a/qai_hub_models/models/yolov8_det/model.py b/qai_hub_models/models/yolov8_det/model.py new file mode 100644 index 00000000..7d7a1833 --- /dev/null +++ b/qai_hub_models/models/yolov8_det/model.py @@ -0,0 +1,107 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +import torch.nn as nn +from ultralytics import YOLO as ultralytics_YOLO + +from qai_hub_models.models._shared.yolo.utils import ( + get_most_likely_score, + transform_box_layout_xywh2xyxy, +) +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ASSET_VERSION = 1 +MODEL_ID = __name__.split(".")[-2] + +SUPPORTED_WEIGHTS = [ + "yolov8n.pt", + "yolov8s.pt", + "yolov8m.pt", + "yolov8l.pt", + "yolov8x.pt", +] +DEFAULT_WEIGHTS = "yolov8n.pt" + + +class YoloV8Detector(BaseModel): + """Exportable YoloV8 bounding box detector, end-to-end.""" + + def __init__(self, model: nn.Module) -> None: + super().__init__() + self.model = model + + @classmethod + def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): + model = ultralytics_YOLO(ckpt_name).model + model.eval() + return cls(model) + + def forward(self, image: torch.Tensor): + """ + Run YoloV8 on `image`, and produce a predicted set of bounding boxes and associated class probabilities. + + Parameters: + image: Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + boxes: Shape [batch, num preds, 4] where 4 == (center_x, center_y, w, h) + class scores multiplied by confidence: Shape [batch, num_preds, # of classes (typically 80)] + """ + predictions, *_ = self.model(image) + boxes, scores, classes = yolov8_detect_postprocess(predictions) + return boxes, scores, classes + + @staticmethod + def get_input_spec( + batch_size: int = 1, + num_channels: int = 3, + height: int = 640, + width: int = 640, + ) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm AI Hub. + """ + return {"image": ((batch_size, num_channels, height, width), "float32")} + + +def yolov8_detect_postprocess(detector_output: torch.Tensor): + """ + Post processing to break YoloV8 detector output into multiple, consumable tensors (eg. for NMS). + such as bounding boxes, scores and classes. + + Parameters: + detector_output: torch.Tensor + The output of Yolo Detection model + Shape is [batch, k, num_preds] + where, k = # of classes + 4 + k is structured as follows [boxes (4) : # of classes] + and boxes are co-ordinates [x_center, y_center, w, h] + + Returns: + boxes: torch.Tensor + Bounding box locations. Shape is [batch, num preds, 4] where 4 == (x1, y1, x2, y2) + scores: torch.Tensor + class scores multiplied by confidence: Shape is [batch, num_preds] + class_idx: torch.tensor + Shape is [batch, num_preds] where the last dim is the index of the most probable class of the prediction. + """ + # Break output into parts + detector_output = torch.permute(detector_output, [0, 2, 1]) + boxes = detector_output[:, :, :4] + scores = detector_output[:, :, 4:] + + # Convert boxes to (x1, y1, x2, y2) + boxes = transform_box_layout_xywh2xyxy(boxes) + + # Get class ID of most likely score. + scores, class_idx = get_most_likely_score(scores) + + return boxes, scores, class_idx diff --git a/qai_hub_models/models/yolov8_det/perf.yaml b/qai_hub_models/models/yolov8_det/perf.yaml new file mode 100644 index 00000000..bbaddd57 --- /dev/null +++ b/qai_hub_models/models/yolov8_det/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Yolo-v8-Detection + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 9251.0 + throughput: 108.09642200843152 + estimated_peak_memory_range: + min: 233472 + max: 2649168 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 300 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 300 + job_id: j7gjr2q8p + job_status: Passed + torchscript_onnx_qnn: + inference_time: 7043.0 + throughput: 141.9849495953429 + estimated_peak_memory_range: + min: 4939776 + max: 19565584 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 294 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 294 + job_id: jlpe7wy05 + job_status: Passed + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:08:50.678067Z' diff --git a/qai_hub_models/models/yolov8_det/requirements.txt b/qai_hub_models/models/yolov8_det/requirements.txt new file mode 100644 index 00000000..5d6e5cf5 --- /dev/null +++ b/qai_hub_models/models/yolov8_det/requirements.txt @@ -0,0 +1 @@ +ultralytics==8.0.193 diff --git a/qai_hub_models/models/yolov8_det/test.py b/qai_hub_models/models/yolov8_det/test.py new file mode 100644 index 00000000..13614261 --- /dev/null +++ b/qai_hub_models/models/yolov8_det/test.py @@ -0,0 +1,54 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import torch +from ultralytics import YOLO as ultralytics_YOLO + +from qai_hub_models.models.yolov8_det.app import YoloV8DetectionApp +from qai_hub_models.models.yolov8_det.demo import IMAGE_ADDRESS +from qai_hub_models.models.yolov8_det.demo import main as demo_main +from qai_hub_models.models.yolov8_det.model import ( + MODEL_ASSET_VERSION, + MODEL_ID, + YoloV8Detector, + yolov8_detect_postprocess, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.image_processing import preprocess_PIL_image +from qai_hub_models.utils.testing import skip_clone_repo_check + +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_images/output_image.png" +) +WEIGHTS = "yolov8n.pt" + + +@skip_clone_repo_check +def test_task(): + """Verify that raw (numeric) outputs of both (QAIHM and non-qaihm) networks are the same.""" + processed_sample_image = preprocess_PIL_image(load_image(IMAGE_ADDRESS)) + source_model = ultralytics_YOLO(WEIGHTS).model + qaihm_model = YoloV8Detector.from_pretrained(WEIGHTS) + + with torch.no_grad(): + # original model output + source_detect_out, *_ = source_model(processed_sample_image) + source_out_postprocessed = yolov8_detect_postprocess(source_detect_out) + + # Qualcomm AI Hub Model output + qaihm_out_postprocessed = qaihm_model(processed_sample_image) + for i in range(0, len(source_out_postprocessed)): + assert np.allclose(source_out_postprocessed[i], qaihm_out_postprocessed[i]) + + +def test_yolov8_det_app(): + image = load_image(IMAGE_ADDRESS) + output_image = load_image(OUTPUT_IMAGE_ADDRESS) + app = YoloV8DetectionApp(YoloV8Detector.from_pretrained(WEIGHTS)) + assert np.allclose(app.predict_boxes_from_image(image)[0], np.asarray(output_image)) + + +def test_demo(): + demo_main(is_test=True) diff --git a/qai_hub_models/models/yolov8_seg/README.md b/qai_hub_models/models/yolov8_seg/README.md new file mode 100644 index 00000000..4ccdd84f --- /dev/null +++ b/qai_hub_models/models/yolov8_seg/README.md @@ -0,0 +1,55 @@ +[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](../../README.md) + + +# [Yolo-v8-Segmentation: Real-time object segmentation optimized for mobile and edge](https://aihub.qualcomm.com/models/yolov8_seg) + +YoloV8 is a machine learning model that predicts bounding boxes, segmentation masks and classes of objects in an image. + +This is based on the implementation of Yolo-v8-Segmentation found +[here](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/yolo/segment). This repository contains scripts for optimized on-device +export suitable to run on Qualcomm® devices. More details on model performance +accross various devices, can be found [here](https://aihub.qualcomm.com/models/yolov8_seg). + +[Sign up](https://aihub.qualcomm.com/) for early access to run these models on +a hosted Qualcomm® device. + + +## Example & Usage + +Install the package via pip: +```bash +pip install "qai_hub_models[yolov8_seg]" +``` + + +Once installed, run the following simple CLI demo: + +```bash +python -m qai_hub_models.models.yolov8_seg.demo +``` +More details on the CLI tool can be found with the `--help` option. See +[demo.py](demo.py) for sample usage of the model including pre/post processing +scripts. Please refer to our [general instructions on using +models](../../#qai-hub-models) for more usage instructions. + +## Export for on-device deployment + +This repository contains export scripts that produce a model optimized for +on-device deployment. This can be run as follows: + +```bash +python -m qai_hub_models.models.yolov8_seg.export +``` +Additional options are documented with the `--help` option. Note that the above +script requires access to Deployment instructions for Qualcomm® AI Hub. + +## License +- Code in the Qualcomm® AI Hub Models repository is covered by the LICENSE + file at the repository root. +- The license for the original implementation of Yolo-v8-Segmentation can be found + [here](https://github.com/ultralytics/ultralytics/blob/main/LICENSE). + + +## References +* [Real-Time Flying Object Detection with YOLOv8](https://arxiv.org/abs/2305.09972) +* [Source Model Implementation](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/yolo/segment) diff --git a/qai_hub_models/models/yolov8_seg/__init__.py b/qai_hub_models/models/yolov8_seg/__init__.py new file mode 100644 index 00000000..54ba0b12 --- /dev/null +++ b/qai_hub_models/models/yolov8_seg/__init__.py @@ -0,0 +1,7 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from .app import YoloV8SegmentationApp as App # noqa: F401 +from .model import MODEL_ID # noqa: F401 +from .model import YoloV8Segmentor as Model # noqa: F401 diff --git a/qai_hub_models/models/yolov8_seg/app.py b/qai_hub_models/models/yolov8_seg/app.py new file mode 100644 index 00000000..01fee5b9 --- /dev/null +++ b/qai_hub_models/models/yolov8_seg/app.py @@ -0,0 +1,203 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, List, Tuple + +import numpy as np +import torch +import torch.nn.functional as F +from PIL import Image +from torchvision.transforms import Resize +from ultralytics.utils.ops import process_mask + +from qai_hub_models.utils.bounding_box_processing import batched_nms +from qai_hub_models.utils.draw import create_color_map +from qai_hub_models.utils.image_processing import app_to_net_image_inputs + + +class YoloV8SegmentationApp: + """ + This class consists of light-weight "app code" that is required to perform end to end inference + with YoloV8 segmentation model. + + For a given image input, the app will: + * pre-process the image (convert to range[0, 1]) + * Run Yolo inference + * By default, + - post-processes output using non-maximum-suppression + - applies predicted mask on input image + """ + + def __init__( + self, + model: Callable[ + [torch.Tensor], + Tuple[ + List[torch.Tensor], + List[torch.Tensor], + List[torch.Tensor], + List[torch.Tensor], + torch.Tensor, + ], + ], + nms_score_threshold: float = 0.45, + nms_iou_threshold: float = 0.7, + input_height: int = 640, + input_width: int = 640, + ): + """ + Initialize a YoloV8SegmentationApp application. + + Parameters: + model: torch.Tensor + YoloV8 segmentation model. + + Inputs: + Tensor of shape (N H W C x float32) with range [0, 1] and BGR channel layout. + + Outputs: + boxes: torch.Tensor + Bounding box locations. Shape is [batch, num preds, 4] where 4 == (x1, y1, x2, y2) + scores: torch.Tensor + Class scores multiplied by confidence: Shape is [batch, num_preds] + masks: torch.Tensor + Predicted masks: Shape is [batch, num_preds, 32] + classes: torch.Tensor + Shape is [batch, num_preds] where the last dim is the index of the most probable class of the prediction. + protos: torch.Tensor + Tensor of shape[batch, 32, mask_h, mask_w] + Multiply masks and protos to generate output masks. + + nms_score_threshold + Score threshold for non maximum suppression. + + nms_iou_threshold + Intersection over Union threshold for non maximum suppression. + """ + self.model = model + self.nms_score_threshold = nms_score_threshold + self.nms_iou_threshold = nms_iou_threshold + self.input_height = input_height + self.input_width = input_width + + def check_image_size(self, pixel_values: torch.Tensor) -> None: + """ + Verify image size is valid model input. + """ + return all([s % 32 == 0 for s in pixel_values.shape[-2:]]) + + def preprocess_input(self, pixel_values: torch.Tensor) -> torch.Tensor: + img_size = (self.input_height, self.input_width) + return Resize(img_size)(pixel_values) + + def predict(self, *args, **kwargs): + # See predict_boxes_from_image. + return self.predict_segmentation_from_image(*args, **kwargs) + + def predict_segmentation_from_image( + self, + pixel_values_or_image: torch.Tensor | np.ndarray | Image | List[Image], + raw_output: bool = False, + ) -> Tuple[ + List[torch.Tensor], List[torch.Tensor], List[torch.Tensor], List[torch.Tensor] + ] | List[Image.Image]: + """ + From the provided image or tensor, predict the bounding boxes & classes of objects detected within. + + Parameters: + pixel_values_or_image: torch.Tensor + PIL image + or + numpy array (N H W C x uint8) or (H W C x uint8) -- both BGR channel layout + or + pyTorch tensor (N C H W x fp32, value range is [0, 1]), BGR channel layout + + raw_output: bool + See "returns" doc section for details. + + Returns: + If raw_output is false or pixel_values_or_image is not a PIL image, returns: + pred_boxes: List[torch.Tensor] + List of predicted boxes for all the batches. + Each pred_box is of shape [num_boxes, 4] + pred_scores: List[torch.Tensor] + List of scores for each predicted box for all the batches. + Each pred_score is of shape [num_boxes] + pred_masks: List[torch.Tensor] + List of predicted masks for all the batches. + Each pred_mask is of shape [num_boxes, 32] + pred_classes: List[torch.Tensor] + List of predicted class for all the batches. + Each pred_class is of shape [num_boxes] + + Otherwise, returns: + image_with_masks: List[PIL.Image] + Input image with predicted masks applied + """ + + # Input Prep + NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs( + pixel_values_or_image + ) + + # Cache input spatial dimension to use for post-processing + input_h, input_w = NCHW_fp32_torch_frames.shape[2:] + NCHW_fp32_torch_frames = self.preprocess_input(NCHW_fp32_torch_frames) + + self.check_image_size(NCHW_fp32_torch_frames) + + # Run prediction + pred_boxes, pred_scores, pred_masks, pred_class_idx, proto = self.model( + NCHW_fp32_torch_frames + ) + + # Non Maximum Suppression on each batch + pred_boxes, pred_scores, pred_class_idx, pred_masks = batched_nms( + self.nms_iou_threshold, + self.nms_score_threshold, + pred_boxes, + pred_scores, + pred_class_idx, + pred_masks, + ) + + # Process mask and upsample to input shape + for batch_idx in range(len(pred_masks)): + pred_masks[batch_idx] = process_mask( + proto[batch_idx], + pred_masks[batch_idx], + pred_boxes[batch_idx], + (self.input_height, self.input_width), + upsample=True, + ).numpy() + + # Resize masks to match with input image shape + pred_masks = F.interpolate( + input=torch.Tensor(pred_masks), + size=(input_h, input_w), + mode="bilinear", + align_corners=False, + ) + + # Return raw output if requested + if raw_output or isinstance(pixel_values_or_image, torch.Tensor): + return (pred_boxes, pred_scores, pred_masks, pred_class_idx) + + # Create color map and convert segmentation mask to RGB image + pred_mask_img = torch.argmax(pred_masks, 1) + + # Overlay the segmentation masks on the image. + color_map = create_color_map(pred_mask_img.max().item() + 1) + out = [] + for i, img_tensor in enumerate(NHWC_int_numpy_frames): + out.append( + Image.blend( + Image.fromarray(img_tensor), + Image.fromarray(color_map[pred_mask_img[i]]), + alpha=0.5, + ) + ) + return out diff --git a/qai_hub_models/models/yolov8_seg/demo.py b/qai_hub_models/models/yolov8_seg/demo.py new file mode 100644 index 00000000..bd194c22 --- /dev/null +++ b/qai_hub_models/models/yolov8_seg/demo.py @@ -0,0 +1,102 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Type + +from qai_hub_models.models.yolov8_seg.app import YoloV8SegmentationApp +from qai_hub_models.models.yolov8_seg.model import ( + DEFAULT_WEIGHTS, + MODEL_ASSET_VERSION, + MODEL_ID, + YoloV8Segmentor, +) +from qai_hub_models.utils.args import ( + demo_model_from_cli_args, + get_model_cli_parser, + get_on_device_demo_parser, + validate_on_device_demo_args, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset, load_image +from qai_hub_models.utils.base_model import BaseModel, TargetRuntime +from qai_hub_models.utils.display import display_or_save_image + +WEIGHTS_HELP_MSG = f"YoloV8-Segment checkpoint name. Valid checkpoints can be found in qai_hub_models/{MODEL_ID}/model.py" + +IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_images/bus.jpg" +) +OUTPUT_IMAGE_ADDRESS = CachedWebModelAsset.from_asset_store( + MODEL_ID, MODEL_ASSET_VERSION, "test_images/out_bus_with_mask.png" +) + + +def yolov8_seg_demo( + model_type: Type[BaseModel], + default_weights: str, + weights_help_msg: str, + default_image: CachedWebModelAsset, + stride_multiple: int | None = None, + is_test: bool = False, +): + # Demo parameters + parser = get_model_cli_parser(model_type) + parser = get_on_device_demo_parser( + parser, available_target_runtimes=[TargetRuntime.TFLITE], add_output_dir=True + ) + image_help = "image file path or URL." + if stride_multiple: + image_help = f"{image_help} Image spatial dimensions (x and y) must be multiples of {stride_multiple}." + + parser.add_argument( + "--image", + type=str, + help="Test image file path or URL", + ) + parser.add_argument( + "--score-threshold", + type=float, + default=0.45, + help="Score threshold for NonMaximumSuppression", + ) + parser.add_argument( + "--iou-threshold", + type=float, + default=0.7, + help="Intersection over Union (IoU) threshold for NonMaximumSuppression", + ) + args = parser.parse_args([] if is_test else None) + validate_on_device_demo_args(args, model_type.get_model_id()) + + if args.image is None: + image_path = default_image.fetch() + else: + image_path = args.image + + # Load image & model + model = demo_model_from_cli_args(model_type, args, check_trace=False) + app = YoloV8SegmentationApp(model, args.score_threshold, args.iou_threshold) + + print("Model Loaded") + + image = load_image(image_path) + image_annotated = app.predict_segmentation_from_image(image)[0] + + if not is_test: + display_or_save_image(image_annotated, args.output_dir) + + +def main(is_test: bool = False): + yolov8_seg_demo( + YoloV8Segmentor, + DEFAULT_WEIGHTS, + WEIGHTS_HELP_MSG, + IMAGE_ADDRESS, + is_test=is_test, + ) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/yolov8_seg/export.py b/qai_hub_models/models/yolov8_seg/export.py new file mode 100644 index 00000000..e8b4d7ae --- /dev/null +++ b/qai_hub_models/models/yolov8_seg/export.py @@ -0,0 +1,193 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +# THIS FILE WAS AUTO-GENERATED. DO NOT EDIT MANUALLY. + + +from __future__ import annotations + +import os +import warnings +from pathlib import Path +from typing import List, Optional, Tuple + +import qai_hub as hub +import torch + +from qai_hub_models.models.yolov8_seg import Model +from qai_hub_models.utils.args import ( + export_parser, + get_input_spec_kwargs, + get_model_kwargs, + parse_target_runtime, +) +from qai_hub_models.utils.compare import torch_inference +from qai_hub_models.utils.input_spec import make_torch_inputs +from qai_hub_models.utils.printing import ( + print_inference_metrics, + print_on_target_demo_cmd, + print_profile_metrics_from_job, +) +from qai_hub_models.utils.qai_hub_helpers import ( + can_access_qualcomm_ai_hub, + export_without_hub_access, + transpose_channel_first_to_last, +) + + +def export_model( + device: str = "Samsung Galaxy S23", + skip_profiling: bool = False, + skip_inferencing: bool = False, + skip_downloading: bool = False, + skip_summary: bool = False, + output_dir: Optional[str] = None, + dst_runtime: str = "TFLITE", + compile_options: str = "", + profile_options: str = "", + **additional_model_kwargs, +) -> Tuple[hub.CompileJob, Optional[hub.ProfileJob], Optional[hub.InferenceJob]] | List[ + str +]: + """ + This function accomplishes 6 main tasks: + + 1. Instantiates a PyTorch model and converts it to a traced TorchScript format. + 2. Compiles the model to an asset that can be run on device. + 3. Profiles the model performance on real devices. + 4. Inferences the model on sample inputs. + 5. Downloads the model asset to the local directory. + 6. Summarizes the results from profiling and inference. + + Each of the last four steps can be optionally skipped using the input options. + + Parameters: + device: Device for which to export the model. + Full list of available devices can be found by running `hub.get_devices()`. + Defaults to DEFAULT_DEVICE if not specified. + skip_profiling: If set, skips profiling of compiled model on real devices. + skip_inferencing: If set, skips computing on-device outputs from sample data. + skip_downloading: If set, skips downloading of compiled model. + skip_summary: If set, skips waiting for and summarizing results + from profiling and inference. + output_dir: Directory to store generated assets (e.g. compiled model). + Defaults to `/build/`. + dst_runtime: Which on-device runtime to target. Default is TensorFlowLite. + compile_options: Additional options to pass when submitting the compile job. + profile_options: Additional options to pass when submitting the profile job. + **additional_model_kwargs: Additional optional kwargs used to customize + `model_cls.from_pretrained` and `model.get_input_spec` + + Returns: + A 3-tuple of: + * A CompileJob object containing metadata about the compile job submitted to hub. + * A ProfileJob containing metadata about the profile job (None if profiling skipped). + * An InferenceJob containing metadata about the inference job (None if inferencing skipped). + """ + model_name = "yolov8_seg" + output_path = Path(output_dir or Path.cwd() / "build" / model_name) + target_runtime = parse_target_runtime(dst_runtime) + if not can_access_qualcomm_ai_hub(): + return export_without_hub_access( + "yolov8_seg", + "Yolo-v8-Segmentation", + device, + skip_profiling, + skip_inferencing, + skip_downloading, + skip_summary, + output_path, + target_runtime, + compile_options, + profile_options, + ) + + # 1. Initialize PyTorch model + model = Model.from_pretrained(**get_model_kwargs(Model, additional_model_kwargs)) + input_spec = model.get_input_spec( + **get_input_spec_kwargs(model, additional_model_kwargs) + ) + + # Trace the model + source_model = torch.jit.trace( + model, make_torch_inputs(input_spec), check_trace=False + ) + + # 2. Compile the model to an on-device asset + model_compile_options = model.get_hub_compile_options( + target_runtime, compile_options + " --force_channel_last_input image" + ) + print(f"Optimizing model {model_name} to run on-device.") + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=hub.Device(device), + name=model_name, + options=model_compile_options, + ) + + # 3. Profile the model asset on real devices + profile_job = None + if not skip_profiling: + print(f"Profiling model {model_name} on a hosted device.") + profile_job = hub.submit_profile_job( + model=compile_job.get_target_model(), + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 4. Run inference on-device with sample inputs + inference_job = None + if not skip_inferencing: + print( + f"Running inference for {model_name} on a hosted device with example inputs." + ) + sample_inputs = model.sample_inputs(input_spec) + # Convert inputs from channel first to channel last + hub_inputs = transpose_channel_first_to_last( + "image", sample_inputs, target_runtime + ) + inference_job = hub.submit_inference_job( + model=compile_job.get_target_model(), + inputs=hub_inputs, + device=hub.Device(device), + name=model_name, + options=profile_options, + ) + + # 5. Download the model asset to a local file + if not skip_downloading: + os.makedirs(output_path, exist_ok=True) + target_model = compile_job.get_target_model() + target_model.download(str(output_path / f"{model_name}.tflite")) + + # 6. Summarize the results from profiling and inference + if not skip_summary and not skip_profiling: + assert profile_job.wait().success + profile_data = profile_job.download_profile() + print_profile_metrics_from_job(profile_job, profile_data) + + if not skip_summary and not skip_inferencing: + torch_out = torch_inference(model, sample_inputs) + assert inference_job.wait().success + inference_result = inference_job.download_output_data() + print_inference_metrics( + inference_job, inference_result, torch_out, outputs_to_skip=[3] + ) + + print_on_target_demo_cmd(compile_job, Path(__file__).parent.resolve(), device) + + return (compile_job, profile_job, inference_job) + + +def main(): + warnings.filterwarnings("ignore") + parser = export_parser(model_cls=Model, supports_qnn=False) + args = parser.parse_args() + export_model(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/qai_hub_models/models/yolov8_seg/info.yaml b/qai_hub_models/models/yolov8_seg/info.yaml new file mode 100644 index 00000000..7397936c --- /dev/null +++ b/qai_hub_models/models/yolov8_seg/info.yaml @@ -0,0 +1,41 @@ +name: Yolo-v8-Segmentation +# id must match with the model dir name in qai_hub_models +id: yolov8_seg +status: public +headline: Real-time object segmentation optimized for mobile and edge. +domain: Computer Vision +use_case: Semantic Segmentation +description: YoloV8 is a machine learning model that predicts bounding boxes, segmentation + masks and classes of objects in an image. +tags: + - real-time +research_paper: https://arxiv.org/abs/2305.09972 +research_paper_title: Real-Time Flying Object Detection with YOLOv8 +license: https://github.com/ultralytics/ultralytics/blob/main/LICENSE +source_repo: + https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/yolo/segment +technical_details: + Model checkpoint: YoloV8N-Seg + Input resolution: 640x640 + Number of parameters: 3.43M + Model size: 13.2 MB +applicable_scenarios: + - Factory Automation + - Robotic Navigation + - Camera +related_models: + - unet_segmentation + - sam + - fastsam_x + - mediapipe_selfie + - yolov8_det + - ddrnet23_slim +form_factors: + - Phone + - Tablet + - IoT + - XR +has_static_banner: yes +has_animated_banner: no +license_type: agpl-3.0 +dataset: [] diff --git a/qai_hub_models/models/yolov8_seg/model.py b/qai_hub_models/models/yolov8_seg/model.py new file mode 100644 index 00000000..ff3ac466 --- /dev/null +++ b/qai_hub_models/models/yolov8_seg/model.py @@ -0,0 +1,126 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import torch +import torch.nn as nn +from ultralytics import YOLO as ultralytics_YOLO + +from qai_hub_models.models._shared.yolo.utils import ( + get_most_likely_score, + transform_box_layout_xywh2xyxy, +) +from qai_hub_models.utils.base_model import BaseModel +from qai_hub_models.utils.input_spec import InputSpec + +MODEL_ASSET_VERSION = 1 +MODEL_ID = __name__.split(".")[-2] + +SUPPORTED_WEIGHTS = [ + "yolov8n-seg.pt", + "yolov8s-seg.pt", + "yolov8m-seg.pt", + "yolov8l-seg.pt", + "yolov8x-seg.pt", +] +DEFAULT_WEIGHTS = "yolov8n-seg.pt" + + +class YoloV8Segmentor(BaseModel): + """Exportable YoloV8 segmentor, end-to-end.""" + + def __init__(self, model: nn.Module) -> None: + super().__init__() + self.model = model + + @classmethod + def from_pretrained(cls, ckpt_name: str = DEFAULT_WEIGHTS): + if ckpt_name not in SUPPORTED_WEIGHTS: + raise ValueError( + f"Unsupported checkpoint name provided {ckpt_name}.\n" + f"Supported checkpoints are {list(SUPPORTED_WEIGHTS)}." + ) + model = ultralytics_YOLO(ckpt_name).model + model.eval() + return cls(model) + + def forward(self, image: torch.Tensor): + """ + Run YoloV8 on `image`, and produce a predicted set of bounding boxes and associated class probabilities. + + Parameters: + image: Pixel values pre-processed for encoder consumption. + Range: float[0, 1] + 3-channel Color Space: RGB + + Returns: + boxes: torch.Tensor + Bounding box locations. Shape is [batch, num preds, 4] where 4 == (x1, y1, x2, y2) + scores: torch.Tensor + Class scores multiplied by confidence: Shape is [batch, num_preds] + masks: torch.Tensor + Predicted masks: Shape is [batch, num_preds, 32] + classes: torch.Tensor + Shape is [batch, num_preds] where the last dim is the index of the most probable class of the prediction. + protos: torch.Tensor + Tensor of shape[batch, 32, mask_h, mask_w] + Multiply masks and protos to generate output masks. + """ + predictions = self.model(image) + boxes, scores, masks, classes = yolov8_segment_postprocess(predictions[0]) + return boxes, scores, masks, classes, predictions[1][-1] + + def get_input_spec( + self, + batch_size: int = 1, + num_channels: int = 3, + height: int = 640, + width: int = 640, + ) -> InputSpec: + """ + Returns the input specification (name -> (shape, type). This can be + used to submit profiling job on Qualcomm AI Hub. + """ + return {"image": ((batch_size, num_channels, height, width), "float32")} + + +def yolov8_segment_postprocess(detector_output: torch.Tensor): + """ + Post processing to break YoloV8 detector output into multiple, consumable tensors (eg. for NMS). + such as bounding boxes, scores and classes. + + Parameters: + detector_output: torch.Tensor + The output of Yolo Detection model + Shape is [batch, k, num_preds] + where, k = # of classes + 4 + k is structured as follows [boxes (4) : # of classes] + and boxes are co-ordinates [x_center, y_center, w, h] + + Returns: + boxes: torch.Tensor + Bounding box locations. Shape is [batch, num preds, 4] where 4 == (x1, y1, x2, y2) + scores: torch.Tensor + Class scores multiplied by confidence: Shape is [batch, num_preds] + masks: torch.Tensor + Predicted masks: Shape is [batch, num_preds, 32] + class_idx: torch.Tensor + Shape is [batch, num_preds] where the last dim is the index of the most probable class of the prediction. + """ + # Break output into parts + detector_output = torch.permute(detector_output, [0, 2, 1]) + boxes_idx, num_classes = 4, 80 + masks_dim = detector_output.shape[-1] - boxes_idx - num_classes + boxes = detector_output[:, :, :4] + scores = detector_output[:, :, 4 : boxes_idx + num_classes] + masks = detector_output[:, :, -masks_dim:] + + # Convert boxes to (x1, y1, x2, y2) + boxes = transform_box_layout_xywh2xyxy(boxes) + + # Get class ID of most likely score. + scores, class_idx = get_most_likely_score(scores) + + return boxes, scores, masks, class_idx diff --git a/qai_hub_models/models/yolov8_seg/perf.yaml b/qai_hub_models/models/yolov8_seg/perf.yaml new file mode 100644 index 00000000..6e25b242 --- /dev/null +++ b/qai_hub_models/models/yolov8_seg/perf.yaml @@ -0,0 +1,67 @@ +aggregated: + supported_oses: + - Android + supported_devices: + - Google Pixel 3 + - Google Pixel 3a + - Google Pixel 3a XL + - Google Pixel 4 + - Google Pixel 4a + - Google Pixel 5a 5G + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + - Xiaomi 12 + - Xiaomi 12 Pro + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 +models: +- name: Yolo-v8-Segmentation + performance_metrics: + - torchscript_onnx_tflite: + inference_time: 10686.0 + throughput: 93.58038555118847 + estimated_peak_memory_range: + min: 4616192 + max: 6819472 + primary_compute_unit: NPU + precision: fp16 + layer_info: + layers_on_npu: 337 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 337 + job_id: jz57el6qp + job_status: Passed + torchscript_onnx_qnn: + inference_time: 'null' + throughput: 'null' + estimated_peak_memory_range: + min: 0 + max: 0 + primary_compute_unit: 'null' + precision: 'null' + layer_info: + layers_on_npu: 0 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 0 + job_id: '' + job_status: Skipped + reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-02-21T16:36:07.212007Z' diff --git a/qai_hub_models/models/yolov8_seg/requirements.txt b/qai_hub_models/models/yolov8_seg/requirements.txt new file mode 100644 index 00000000..5d6e5cf5 --- /dev/null +++ b/qai_hub_models/models/yolov8_seg/requirements.txt @@ -0,0 +1 @@ +ultralytics==8.0.193 diff --git a/qai_hub_models/models/yolov8_seg/test.py b/qai_hub_models/models/yolov8_seg/test.py new file mode 100644 index 00000000..f477d402 --- /dev/null +++ b/qai_hub_models/models/yolov8_seg/test.py @@ -0,0 +1,65 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import torch +from ultralytics import YOLO as ultralytics_YOLO + +from qai_hub_models.models.yolov8_seg.app import YoloV8SegmentationApp +from qai_hub_models.models.yolov8_seg.demo import IMAGE_ADDRESS, OUTPUT_IMAGE_ADDRESS +from qai_hub_models.models.yolov8_seg.demo import main as demo_main +from qai_hub_models.models.yolov8_seg.model import ( + YoloV8Segmentor, + yolov8_segment_postprocess, +) +from qai_hub_models.utils.asset_loaders import load_image +from qai_hub_models.utils.image_processing import preprocess_PIL_image +from qai_hub_models.utils.testing import assert_most_close + +WEIGHTS = "yolov8n-seg.pt" + + +def test_task(): + """Verify that raw (numeric) outputs of both (QAIHM and non-qaihm) networks are the same.""" + source_model = ultralytics_YOLO(WEIGHTS).model + qaihm_model = YoloV8Segmentor.from_pretrained(WEIGHTS) + qaihm_app = YoloV8SegmentationApp(qaihm_model) + processed_sample_image = preprocess_PIL_image(load_image(IMAGE_ADDRESS)) + processed_sample_image = qaihm_app.preprocess_input(processed_sample_image) + + with torch.no_grad(): + # original model output + source_out = source_model(processed_sample_image) + source_out_postprocessed = yolov8_segment_postprocess(source_out[0]) + source_out = [*source_out_postprocessed, source_out[1][-1]] + + # Qualcomm AI Hub Model output + qaihm_out_postprocessed = qaihm_model(processed_sample_image) + for i in range(0, len(source_out_postprocessed)): + assert np.allclose(source_out_postprocessed[i], qaihm_out_postprocessed[i]) + + +def test_trace(): + net = YoloV8Segmentor.from_pretrained(WEIGHTS) + input_spec = net.get_input_spec() + trace = net.convert_to_torchscript(input_spec, check_trace=False) + + # Collect output via app for traced model + img = load_image(IMAGE_ADDRESS) + app = YoloV8SegmentationApp(trace) + out_imgs = app.predict(img) + + expected_out = load_image(OUTPUT_IMAGE_ADDRESS) + assert_most_close( + np.asarray(out_imgs[0], dtype=np.float32), + np.asarray(expected_out, dtype=np.float32), + 0.005, + rtol=0.02, + atol=1.5, + ) + + +def test_demo(): + # Run demo and verify it does not crash + demo_main(is_test=True) diff --git a/qai_hub_models/requirements-dev.txt b/qai_hub_models/requirements-dev.txt new file mode 100644 index 00000000..00cfe6e1 --- /dev/null +++ b/qai_hub_models/requirements-dev.txt @@ -0,0 +1,19 @@ +boto3 +botocore +coverage==6.5.0 +huggingface-hub==0.20.3 +jinja2==3.0.3 +mypy==0.991 +protobuf==3.20.3 +pytest-cov==4.1.0 +pytest-xdist==3.3.1 +pyyaml==6.0.1 +ruamel-yaml +schema==0.7.5 +scikit-image>=0.21.0 +tensorflow-cpu==2.13.0; sys_platform != 'darwin' +tensorflow-macos==2.13.0; sys_platform == 'darwin' +types-PyYAML +types-pillow +types-requests +keyrings.envvars; python_version >= '3.9' # used only by CI diff --git a/qai_hub_models/requirements.txt b/qai_hub_models/requirements.txt new file mode 100644 index 00000000..6ba28b51 --- /dev/null +++ b/qai_hub_models/requirements.txt @@ -0,0 +1,17 @@ +Pillow==10.0.1 +gdown==4.7.1 +gitpython +huggingface_hub +ipython +numpy==1.23.1 +opencv-python==4.8.1.78 +prettytable +pytest==7.4.2 +pyyaml +qai_hub +requests +requests_toolbelt +schema +torch==1.13.1 +torchvision<=0.14.1 +urllib3<2 diff --git a/qai_hub_models/test/__init__.py b/qai_hub_models/test/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/test/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/test/e2e/__init__.py b/qai_hub_models/test/e2e/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/test/e2e/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/test/e2e/test_aimet_compile.py b/qai_hub_models/test/e2e/test_aimet_compile.py new file mode 100644 index 00000000..0bd090c5 --- /dev/null +++ b/qai_hub_models/test/e2e/test_aimet_compile.py @@ -0,0 +1,43 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np +import pytest +import qai_hub as hub + +from qai_hub_models.models.mobilenet_v2_quantized.model import MobileNetV2Quantizable +from qai_hub_models.utils.base_model import SourceModelFormat, TargetRuntime +from qai_hub_models.utils.inference import compile_zoo_model_to_hub +from qai_hub_models.utils.measurement import get_model_size_mb +from qai_hub_models.utils.testing import skip_clone_repo_check_fixture # noqa: F401 + + +@pytest.mark.parametrize( + "source_model_format,target_runtime,expected_size_mb", + [ + (SourceModelFormat.ONNX, TargetRuntime.TFLITE, 3.4), + (SourceModelFormat.TORCHSCRIPT, TargetRuntime.TFLITE, 3.4), + (SourceModelFormat.ONNX, TargetRuntime.QNN, 3.8), + (SourceModelFormat.TORCHSCRIPT, TargetRuntime.QNN, 3.8), + ], +) +def test_compile_aimet( + source_model_format, target_runtime, expected_size_mb, skip_clone_repo_check_fixture +): + model = MobileNetV2Quantizable.from_pretrained() + + calibration_data = model.get_calibration_data(target_runtime) + + device = hub.Device("Samsung Galaxy S23") + hub_model = compile_zoo_model_to_hub( + model=model, + device=device, + source_model_format=source_model_format, + target_runtime=target_runtime, + calibration_data=calibration_data, + ) + + # Make sure model is quantized + tgt_model_size_mb = get_model_size_mb(hub_model.model) + np.testing.assert_allclose(expected_size_mb, tgt_model_size_mb, rtol=0.1) diff --git a/qai_hub_models/test/test_async_compile_jobs.py b/qai_hub_models/test/test_async_compile_jobs.py new file mode 100644 index 00000000..41374b24 --- /dev/null +++ b/qai_hub_models/test/test_async_compile_jobs.py @@ -0,0 +1,27 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os + +import qai_hub as hub +import yaml + + +def test_compile_jobs_success(): + """ + When testing compilation in CI, synchronously waiting for each compile_job to + finish is too slow. Instead, job ids are written to a file upon submission, + and success is validated all at once in the end using this test. + """ + if os.stat(os.environ["COMPILE_JOBS_FILE"]).st_size == 0: + return + with open(os.environ["COMPILE_JOBS_FILE"], "r") as f: + job_ids = yaml.safe_load(f.read()) + failed_jobs = {} + for name, job_id in job_ids.items(): + result = hub.get_job(job_id).wait() + if not result.success: + failed_jobs[name] = job_id + if failed_jobs: + raise ValueError(f"The following jobs failed to compile: {failed_jobs}") diff --git a/qai_hub_models/test/test_utils/__init__.py b/qai_hub_models/test/test_utils/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/test/test_utils/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/test/test_utils/perf.yaml b/qai_hub_models/test/test_utils/perf.yaml new file mode 100644 index 00000000..c68d3abc --- /dev/null +++ b/qai_hub_models/test/test_utils/perf.yaml @@ -0,0 +1,59 @@ +models: +- name: AOTGAN + performance_metrics: + - reference_device_info: + name: Samsung Galaxy S23 Ultra + os: '13' + form_factor: Phone + os_name: Android + manufacturer: Samsung + chipset: Snapdragon® 8 Gen 2 + timestamp: '2024-01-26T00:36:03.230526Z' + torchscript_onnx_tflite: + inference_time: 171647.0 + throughput: 5.8259101528136235 + estimated_peak_memory_range: + min: 3248128 + max: 6077152 + layer_info: + layers_on_npu: 243 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 243 + precision: fp16 + primary_compute_unit: NPU + job_id: jegnojxm5 + job_status: Passed + torchscript_onnx_qnn: + inference_time: 159404.0 + throughput: 6.273368296905975 + estimated_peak_memory_range: + min: 311296 + max: 41386272 + layer_info: + layers_on_npu: 283 + layers_on_gpu: 0 + layers_on_cpu: 0 + total_layers: 283 + precision: fp16 + primary_compute_unit: NPU + job_id: jo5mojldg + job_status: Passed +aggregated: + supported_devices: + - Samsung Galaxy S21 + - Samsung Galaxy S21 Ultra + - Samsung Galaxy S21+ + - Samsung Galaxy S22 5G + - Samsung Galaxy S22 Ultra 5G + - Samsung Galaxy S22+ 5G + - Samsung Galaxy S23 + - Samsung Galaxy S23 Ultra + - Samsung Galaxy S23+ + - Samsung Galaxy Tab S8 + supported_oses: + - Android + supported_chipsets: + - Snapdragon® 8 Gen 1 + - Snapdragon® 8 Gen 2 + - Snapdragon® 888 diff --git a/qai_hub_models/test/test_utils/test_info_specs.py b/qai_hub_models/test/test_utils/test_info_specs.py new file mode 100644 index 00000000..bfbd07ef --- /dev/null +++ b/qai_hub_models/test/test_utils/test_info_specs.py @@ -0,0 +1,110 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from qai_hub_models.utils.config_loaders import ( + MODEL_DOMAIN, + MODEL_IDS, + MODEL_TAG, + MODEL_USE_CASE, + QAIHMModelInfo, +) + +HF_PIPELINE_TAGS = { + "text-classification", + "token-classification", + "table-question-answering", + "question-answering", + "zero-shot-classification", + "translation", + "summarization", + "conversational", + "feature-extraction", + "text-generation", + "text2text-generation", + "fill-mask", + "sentence-similarity", + "text-to-speech", + "text-to-audio", + "automatic-speech-recognition", + "audio-to-audio", + "audio-classification", + "voice-activity-detection", + "depth-estimation", + "image-classification", + "object-detection", + "image-segmentation", + "text-to-image", + "image-to-text", + "image-to-image", + "image-to-video", + "unconditional-image-generation", + "video-classification", + "reinforcement-learning", + "robotics", + "tabular-classification", + "tabular-regression", + "tabular-to-text", + "table-to-text", + "multiple-choice", + "text-retrieval", + "time-series-forecasting", + "text-to-video", + "visual-question-answering", + "document-question-answering", + "zero-shot-image-classification", + "graph-ml", + "mask-generation", + "zero-shot-object-detection", + "text-to-3d", + "image-to-3d", + "other", +} + + +def test_model_usecase_to_hf_pipeline_tag(): + for use_case in MODEL_USE_CASE: + assert use_case.map_to_hf_pipeline_tag() in HF_PIPELINE_TAGS + + +def test_info_spec(): + # Guard against MODEL_IDS being empty + assert ( + len(MODEL_IDS) > 0 + ), "Something went wrong. This test found no models to validate." + + for model_id in MODEL_IDS: + try: + info_spec = QAIHMModelInfo.from_model(model_id) + except Exception as err: + assert False, f"{model_id} config validation failed: {str(err)}" + + # Verify model ID is the same as folder name + assert ( + info_spec.id == model_id + ), f"{model_id} config ID does not match the model's folder name" + + # Validate spec + valid, reason = info_spec.validate() + assert valid, f"{model_id} config validation failed: {reason}" + + +def test_qaihm_domain(): + # Test " " is handled correctly and vice-versa + assert MODEL_DOMAIN.from_string("Computer Vision") == MODEL_DOMAIN.COMPUTER_VISION + assert MODEL_DOMAIN.COMPUTER_VISION.__str__() == "Computer Vision" + + +def test_qaihm_tags(): + # Test "-" is handled correctly and vice-versa + assert MODEL_TAG.from_string("real-time") == MODEL_TAG.REAL_TIME + assert MODEL_TAG.REAL_TIME.__str__() == "real-time" + + +def test_qaihm_usecases(): + # Test " " is handled correctly and vice-versa + assert ( + MODEL_USE_CASE.from_string("Image Classification") + == MODEL_USE_CASE.IMAGE_CLASSIFICATION + ) + assert MODEL_USE_CASE.IMAGE_CLASSIFICATION.__str__() == "Image Classification" diff --git a/qai_hub_models/test/test_utils/test_perf_summary.py b/qai_hub_models/test/test_utils/test_perf_summary.py new file mode 100644 index 00000000..33780bb6 --- /dev/null +++ b/qai_hub_models/test/test_utils/test_perf_summary.py @@ -0,0 +1,206 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os + +import ruamel.yaml + +from qai_hub_models.utils.perf_summary import PerformanceSummary + +CHIPSET = "GEN2" +OS = "13" +MODEL_ID = "dummy" + + +def get_basic_speedup_report( + os_name: str = "Android", + onnx_tf_inference_time="null", + onnx_ort_qnn_inference_time=100.0, +): + return { + "models": [ + { + "name": "dummy", + "performance_metrics": [ + { + "reference_device_info": { + "os": OS, + "os_name": os_name, + "chipset": CHIPSET, + }, + "torchscript_onnx_tflite": { + "inference_time": onnx_tf_inference_time, + }, + "torchscript_onnx_qnn": { + "inference_time": 5.0, + }, + "torchscript_qnn": { + "inference_time": 5.0, + }, + }, + ], + }, + ] + } + + +def read_config(config_path): + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + yaml.preserve_yaml_order = True + with open(config_path, "r") as file: + return yaml.load(file) + + +def validate_perf_summary_is_empty(perf_summary): + # No difference captured + for _, val in perf_summary.progressions.items(): + assert len(val) == 0 + for _, val in perf_summary.regressions.items(): + assert len(val) == 0 + # No new reports captured + assert len(perf_summary.new_perf_report) == 0 + # No missing devices found in updated report + assert len(perf_summary.missing_devices) == 0 + + +def test_ios_excluded(): + # Set os_name to iOS to ensure it's not included in summary + prev_perf_metrics = get_basic_speedup_report(os_name="iOS") + new_perf_metrics = get_basic_speedup_report( + os_name="iOS", + onnx_tf_inference_time=10.0, + ) + + perf_summary = PerformanceSummary() + validate_perf_summary_is_empty(perf_summary) + + # Update perf summary + perf_summary.update_summary(MODEL_ID, prev_perf_metrics, new_perf_metrics) + + # Ensure no change in perf summary + validate_perf_summary_is_empty(perf_summary) + + +def test_model_inference_run_toggle(): + # Test model inference fail/pass toggle is captured + prev_perf_metrics = get_basic_speedup_report( + onnx_tf_inference_time="null", onnx_ort_qnn_inference_time=10.0 + ) + new_perf_metrics = get_basic_speedup_report( + onnx_tf_inference_time=10.0, onnx_ort_qnn_inference_time="null" + ) + + perf_summary = PerformanceSummary() + validate_perf_summary_is_empty(perf_summary) + + # Update perf summary + perf_summary.update_summary(MODEL_ID, prev_perf_metrics, new_perf_metrics) + + assert perf_summary.progressions["inf"] == [ + (MODEL_ID, "torchscript_onnx_tflite", "inf", 10.0, "null", CHIPSET, OS) + ] + + +def test_perf_progression_basic(): + prev_perf_metrics = get_basic_speedup_report( + onnx_tf_inference_time=10.0, onnx_ort_qnn_inference_time=5.123 + ) + new_perf_metrics = get_basic_speedup_report( + onnx_tf_inference_time=0.5, onnx_ort_qnn_inference_time=5.123 + ) + + perf_summary = PerformanceSummary() + validate_perf_summary_is_empty(perf_summary) + + # Update perf summary + perf_summary.update_summary(MODEL_ID, prev_perf_metrics, new_perf_metrics) + + expected_inf_bucket = [ + (MODEL_ID, "torchscript_onnx_tflite", 20.0, 0.5, 10.0, CHIPSET, OS), + ] + + assert perf_summary.progressions[10] == expected_inf_bucket + + +def test_perf_regression_basic(): + # Test regression in perf numbers + prev_perf_metrics = get_basic_speedup_report( + onnx_tf_inference_time=10.0, onnx_ort_qnn_inference_time=5.123 + ) + new_perf_metrics = get_basic_speedup_report( + onnx_tf_inference_time=20.0, onnx_ort_qnn_inference_time=5.123 + ) + + perf_summary = PerformanceSummary() + validate_perf_summary_is_empty(perf_summary) + + # Update perf summary + perf_summary.update_summary(MODEL_ID, prev_perf_metrics, new_perf_metrics) + + expected_inf_bucket = [ + (MODEL_ID, "torchscript_onnx_tflite", 2, 20.0, 10.0, CHIPSET, OS), + ] + + assert perf_summary.regressions[2] == expected_inf_bucket + + +def test_missing_devices(): + prev_perf_metrics = get_basic_speedup_report( + onnx_tf_inference_time=1.123, onnx_ort_qnn_inference_time=5.123 + ) + new_perf_metrics = get_basic_speedup_report( + onnx_tf_inference_time=0.372, onnx_ort_qnn_inference_time=5.123 + ) + + # Override chipset + new_perf_metrics["models"][0]["performance_metrics"][0]["reference_device_info"][ + "chipset" + ] = "diff-chip-xyz" + + perf_summary = PerformanceSummary() + validate_perf_summary_is_empty(perf_summary) + + # Update perf summary + perf_summary.update_summary(MODEL_ID, prev_perf_metrics, new_perf_metrics) + + assert len(perf_summary.missing_devices) == 1 + assert perf_summary.missing_devices[0] == (MODEL_ID, CHIPSET) + + +def test_empty_report(): + prev_perf_metrics = get_basic_speedup_report() + prev_perf_metrics["models"][0]["performance_metrics"][0][ + "reference_device_info" + ] = {} + new_perf_metrics = prev_perf_metrics + + perf_summary = PerformanceSummary() + validate_perf_summary_is_empty(perf_summary) + + # Update perf summary + perf_summary.update_summary(MODEL_ID, prev_perf_metrics, new_perf_metrics) + + assert len(perf_summary.empty_perf_report) == 1 + assert perf_summary.empty_perf_report[0] == (MODEL_ID,) + + +def test_e2e_aotgan_perf_summary_no_change(): + perf_filename = os.path.join(os.path.dirname(__file__), "perf.yaml") + + # Ensure perf.yaml is present, if moved, please make accordingly changes in the script. + assert os.path.exists(os.path.join(perf_filename)) + + perf_summary = PerformanceSummary() + validate_perf_summary_is_empty(perf_summary) + + existing_model_card = read_config(perf_filename) + perf_summary.update_summary( + "aotgan", + previous_report=existing_model_card, + new_report=existing_model_card, + ) + + # Ensure perf summary is empty + validate_perf_summary_is_empty(perf_summary) diff --git a/qai_hub_models/test/test_utils/test_qai_hub_helpers.py b/qai_hub_models/test/test_utils/test_qai_hub_helpers.py new file mode 100644 index 00000000..8bd1434e --- /dev/null +++ b/qai_hub_models/test/test_utils/test_qai_hub_helpers.py @@ -0,0 +1,82 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import numpy as np + +from qai_hub_models.utils.base_model import TargetRuntime +from qai_hub_models.utils.qai_hub_helpers import ( + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) + + +def test_transpose_qnn_case1(): + array = np.random.random((4, 3, 2)) + inp = dict(a=[array]) + result = transpose_channel_first_to_last("a", inp, TargetRuntime.QNN) + assert inp["a"][0].shape == result["a"][0].shape + assert np.allclose(inp["a"][0], result["a"][0]) + + +def test_transpose_qnn_case2(): + array = np.random.random((4, 3, 2, 5)) + inp = dict(a=[array]) + result = transpose_channel_first_to_last("a", inp, TargetRuntime.QNN) + assert list(result.keys())[0] == "a" + assert result["a"][0].shape == (4, 2, 5, 3) + result = transpose_channel_last_to_first("a", result, TargetRuntime.QNN) + assert inp["a"][0].shape == result["a"][0].shape + assert np.allclose(inp["a"][0], result["a"][0]) + + +def test_transpose_qnn_case3(): + array = np.random.random((4, 3, 2, 5, 6)) + inp = dict(a=[array]) + result = transpose_channel_first_to_last("a", inp, TargetRuntime.QNN) + assert list(result.keys())[0] == "a" + assert result["a"][0].shape == (4, 3, 5, 6, 2) + result = transpose_channel_last_to_first("a", result, TargetRuntime.QNN) + assert inp["a"][0].shape == result["a"][0].shape + assert np.allclose(inp["a"][0], result["a"][0]) + + +def test_transpose_tflite_case1(): + array = np.random.random((4, 3, 2)) + inp = dict(a=[array]) + result = transpose_channel_first_to_last("a", inp, TargetRuntime.TFLITE) + assert list(result.keys())[0] == "a" + assert result["a"][0].shape == (3, 2, 4) + result = transpose_channel_last_to_first("a", result, TargetRuntime.TFLITE) + assert inp["a"][0].shape == result["a"][0].shape + assert np.allclose(inp["a"][0], result["a"][0]) + + +def test_transpose_tflite_case2(): + array = np.random.random((4, 3, 2, 5)) + inp = dict(a=[array]) + result = transpose_channel_first_to_last("a", inp, TargetRuntime.TFLITE) + assert list(result.keys())[0] == "a" + assert result["a"][0].shape == (4, 2, 5, 3) + result = transpose_channel_last_to_first("a", result, TargetRuntime.TFLITE) + assert inp["a"][0].shape == result["a"][0].shape + assert np.allclose(inp["a"][0], result["a"][0]) + + +def test_transpose_tflite_case3(): + array = np.random.random((4, 3, 2, 5, 6)) + inp = dict(a=[array]) + result = transpose_channel_first_to_last("a", inp, TargetRuntime.TFLITE) + assert list(result.keys())[0] == "a" + assert result["a"][0].shape == (4, 3, 5, 6, 2) + result = transpose_channel_last_to_first("a", result, TargetRuntime.TFLITE) + assert inp["a"][0].shape == result["a"][0].shape + assert np.allclose(inp["a"][0], result["a"][0]) + + +def test_transpose_qnn_case4(): + array = np.random.random((4, 3)) + inp = dict(a=[array]) + result = transpose_channel_first_to_last("a", inp, TargetRuntime.TFLITE) + assert inp["a"][0].shape == result["a"][0].shape + assert np.allclose(inp["a"][0], result["a"][0]) diff --git a/qai_hub_models/utils/__init__.py b/qai_hub_models/utils/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/utils/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/utils/aimet/__init__.py b/qai_hub_models/utils/aimet/__init__.py new file mode 100644 index 00000000..21a22b31 --- /dev/null +++ b/qai_hub_models/utils/aimet/__init__.py @@ -0,0 +1,4 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- diff --git a/qai_hub_models/utils/aimet/config_loader.py b/qai_hub_models/utils/aimet/config_loader.py new file mode 100644 index 00000000..c7b63610 --- /dev/null +++ b/qai_hub_models/utils/aimet/config_loader.py @@ -0,0 +1,15 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from pathlib import Path + + +def get_default_aimet_config() -> str: + path = Path(__file__).parent / "default_config.json" + return str(path.resolve()) + + +def get_per_channel_aimet_config() -> str: + path = Path(__file__).parent / "default_config_per_channel.json" + return str(path.resolve()) diff --git a/qai_hub_models/utils/aimet/default_config.json b/qai_hub_models/utils/aimet/default_config.json new file mode 100644 index 00000000..f616005d --- /dev/null +++ b/qai_hub_models/utils/aimet/default_config.json @@ -0,0 +1,71 @@ +{ + "defaults": + { + "ops": + { + "is_output_quantized": "True" + }, + "params": + { + "is_quantized": "True", + "is_symmetric": "True" + }, + "strict_symmetric": "False", + "per_channel_quantization": "False" + }, + + "params": + { + "bias": + { + "is_quantized": "True" + } + }, + + "op_type": + { + "Squeeze": + { + "is_output_quantized": "False" + }, + "Pad": + { + "is_output_quantized": "False" + }, + "Mean": + { + "is_output_quantized": "False" + }, + "Gather": + { + "is_output_quantized": "False" + } + }, + + "supergroups": + [ + { + "op_list": ["Conv", "Relu"] + }, + { + "op_list": ["ConvTranspose", "Relu"] + }, + { + "op_list": ["Conv", "Clip"] + }, + { + "op_list": ["Add", "Relu"] + }, + { + "op_list": ["Gemm", "Relu"] + } + ], + + "model_input": + { + "is_input_quantized": "True" + }, + + "model_output": + {} +} diff --git a/qai_hub_models/utils/aimet/default_config_per_channel.json b/qai_hub_models/utils/aimet/default_config_per_channel.json new file mode 100644 index 00000000..b343a4a3 --- /dev/null +++ b/qai_hub_models/utils/aimet/default_config_per_channel.json @@ -0,0 +1,69 @@ +{ + "defaults": + { + "ops": + { + "is_output_quantized": "True", + "is_symmetric": "True" + }, + "params": + { + "is_quantized": "True", + "is_symmetric": "True" + }, + "strict_symmetric": "False", + "unsigned_symmetric": "False", + "per_channel_quantization": "True" + }, + + "params": + { + "bias": + { + "is_quantized": "True" + } + }, + + "op_type": + { + "Squeeze": + { + "is_output_quantized": "True" + }, + "Pad": + { + "is_output_quantized": "True" + }, + "Mean": + { + "is_output_quantized": "False" + } + }, + + "supergroups": + [ + { + "op_list": ["Conv", "Relu"] + }, + { + "op_list": ["Conv", "Clip"] + }, + { + "op_list": ["Conv", "BatchNormalization", "Relu"] + }, + { + "op_list": ["Add", "Relu"] + }, + { + "op_list": ["Gemm", "Relu"] + } + ], + + "model_input": + { + "is_input_quantized": "True" + }, + + "model_output": + {} +} diff --git a/qai_hub_models/utils/args.py b/qai_hub_models/utils/args.py new file mode 100644 index 00000000..c9f53100 --- /dev/null +++ b/qai_hub_models/utils/args.py @@ -0,0 +1,418 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +""" +Utility Functions for parsing input args for export and other customer facing scripts. +""" +from __future__ import annotations + +import argparse +import inspect +import os +import sys +from importlib import import_module +from pydoc import locate +from typing import Any, List, Mapping, Optional, Type + +import qai_hub as hub + +from qai_hub_models.utils.base_model import ( + BaseModel, + FromPrecompiledTypeVar, + FromPretrainedMixin, + FromPretrainedTypeVar, + InputSpec, + TargetRuntime, +) +from qai_hub_models.utils.inference import HubModel +from qai_hub_models.utils.qai_hub_helpers import _AIHUB_NAME, can_access_qualcomm_ai_hub + +DEFAULT_EXPORT_DEVICE = "Samsung Galaxy S23" + + +def parse_target_runtime(path: TargetRuntime | str) -> TargetRuntime: + return TargetRuntime[path.upper()] if isinstance(path, str) else path + + +def get_parser() -> argparse.ArgumentParser: + return argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + +def add_output_dir_arg(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + parser.add_argument( + "--output-dir", + "-o", + type=str, + default=None, + help="If specified, saves demo output (e.g. image) to this directory instead of displaying.", + ) + return parser + + +def get_on_device_demo_parser( + parser: argparse.ArgumentParser | None = None, + available_target_runtimes: List[TargetRuntime] = list( + TargetRuntime.__members__.values() + ), + add_output_dir: bool = False, +): + if not parser: + parser = get_parser() + + parser.add_argument( + "--on-device", + action="store_true", + help="If set, will evalute model using a Hub inference job instead of via torch.", + ) + parser.add_argument( + "--hub-model-id", + type=str, + default=None, + help="If running on-device, uses this model Hub model ID.", + ) + parser.add_argument( + "--device", + type=str, + default="Samsung Galaxy S23", + help="If running on-device, use this device.", + ) + if add_output_dir: + add_output_dir_arg(parser) + parser.add_argument( + "--device-os", + type=str, + default="", + help="Optionally specified together with --device", + ) + parser.add_argument( + "--inference-options", + type=str, + default="", + help="If running on-device, use these options when submitting the inference job.", + ) + default_runtime = ( + TargetRuntime.TFLITE + if TargetRuntime.TFLITE in available_target_runtimes + else available_target_runtimes[0] + ) + parser.add_argument( + "--target-runtime", + default=default_runtime.name, + help="The runtime to demo (if --on-device is specified). Default is TFLITE.", + choices=[x.name for x in available_target_runtimes], + ) + + return parser + + +def validate_on_device_demo_args(args: argparse.Namespace, model_name: str): + """ + Validates the the args for the on device demo are valid. + + Intended for use only in CLI scripts. + Prints error to console and exits if an error is found. + """ + if args.on_device and not can_access_qualcomm_ai_hub(): + print( + "On-device demos are not available without Qualcomm® AI Hub access.", + "Please sign up for Qualcomm® AI Hub at https://aihub.qualcomm.com/.", + sep=os.linesep, + ) + sys.exit(1) + + if (args.inference_options or args.hub_model_id) and not args.on_device: + print( + "A Hub model ID and inference options can be provided only if the --on-device flag is provided." + ) + sys.exit(1) + + +def get_model_cli_parser( + cls: Type[FromPretrainedTypeVar], parser: argparse.ArgumentParser | None = None +) -> argparse.ArgumentParser: + """ + Generate the argument parser to create this model from an argparse namespace. + Default behavior is to assume the CLI args have the same names as from_pretrained method args. + """ + if not parser: + parser = get_parser() + + from_pretrained_sig = inspect.signature(cls.from_pretrained) + for name, param in from_pretrained_sig.parameters.items(): + if name == "cls": + continue + # Determining type from param.annotation is non-trivial (it can be a + # strings like "Optional[str]" or "bool | None"). + if param.default is not None: + type_ = type(param.default) + elif param.annotation == "bool": + type_ = bool + else: + type_ = str + parser.add_argument( + f"--{name.replace('_', '-')}", + type=type_, + default=param.default, + help=f"For documentation, see {cls.__name__}::from_pretrained.", + ) + return parser + + +def get_model_kwargs( + model_cls: Type[FromPretrainedTypeVar], args_dict: Mapping[str, Any] +) -> Mapping[str, Any]: + """ + Given a dict with many args, pull out the ones relevant + to constructing the model via `from_pretrained`. + """ + from_pretrained_sig = inspect.signature(model_cls.from_pretrained) + model_kwargs = {} + for name in from_pretrained_sig.parameters: + if name == "cls" or name not in args_dict: + continue + model_kwargs[name] = args_dict.get(name) + return model_kwargs + + +def model_from_cli_args( + model_cls: Type[FromPretrainedTypeVar], cli_args: argparse.Namespace +) -> FromPretrainedTypeVar: + """ + Create this model from an argparse namespace. + Default behavior is to assume the CLI args have the same names as from_pretrained method args. + """ + return model_cls.from_pretrained(**get_model_kwargs(model_cls, vars(cli_args))) + + +def demo_model_from_cli_args( + model_cls: Type[FromPretrainedTypeVar], + cli_args: argparse.Namespace, + check_trace: bool = True, +) -> FromPretrainedTypeVar | HubModel: + """ + Create this model from an argparse namespace. + Default behavior is to assume the CLI args have the same names as from_pretrained method args. + + If the model is a BaseModel and an on-device demo is requested, the BaseModel will be wrapped in a HubModel. + """ + model = model_from_cli_args( + model_cls, cli_args + ) # TODO(9494): This should be replaced by static input spec + is_on_device = "on_device" in cli_args and cli_args.on_device + target_runtime = TargetRuntime[cli_args.target_runtime] + inference_model: FromPretrainedTypeVar | HubModel + if is_on_device and isinstance(model, BaseModel): + device = hub.Device(cli_args.device, cli_args.device_os) + if cli_args.hub_model_id: + model_from_hub = hub.get_model(cli_args.hub_model_id) + inference_model = HubModel( + model_from_hub, + list(model.get_input_spec().keys()), + device, + cli_args.inference_options, + ) + else: + model_cls = model_cls + export_file = f"qai_hub_models.models.{model.get_model_id()}.export" + export_module = import_module(export_file) + compile_job: hub.CompileJob + print(f"Compiling on-device model asset for {model.get_model_id()}.") + print( + f"Running python -m {export_file} --device {device.name} --target-runtime {target_runtime.name}\n" + ) + export_output = export_module.export_model( + device=device.name, + skip_profiling=True, + skip_inferencing=True, + skip_downloading=True, + skip_summary=True, + dst_runtime=target_runtime.name, + ) + + if len(export_output) == 0 or isinstance(export_output[0], str): + # The export returned local file paths, which mean Hub credentials were not found. + raise NotImplementedError( + f"Please sign-up for {_AIHUB_NAME} to continue the demo with on-device inference." + ) + + compile_job, _, _ = export_output + target_model = compile_job.get_target_model() + assert target_model is not None + + input_names = list(model.get_input_spec().keys()) + inference_model = HubModel( + target_model, + input_names, + device, + inference_options=cli_args.inference_options, + ) + print(f"Exported asset: {inference_model.model.name}\n") + else: + inference_model = model + return inference_model + + +def get_input_spec_kwargs( + model: "BaseModel", args_dict: Mapping[str, Any] +) -> Mapping[str, Any]: + """ + Given a dict with many args, pull out the ones relevant + to constructing the model's input_spec. + """ + get_input_spec_args = inspect.signature(model.get_input_spec) + input_spec_kwargs = {} + for name in get_input_spec_args.parameters: + if name == "self" or name not in args_dict: + continue + input_spec_kwargs[name] = args_dict[name] + return input_spec_kwargs + + +def get_model_input_spec_parser( + model_cls: Type[BaseModel], parser: argparse.ArgumentParser | None = None +) -> argparse.ArgumentParser: + """ + Generate the argument parser to get this model's input spec from an argparse namespace. + Default behavior is to assume the CLI args have the same names as get_input_spec method args. + """ + if not parser: + parser = get_parser() + + get_input_spec_sig = inspect.signature(model_cls.get_input_spec) + for name, param in get_input_spec_sig.parameters.items(): + if name == "self": + continue + type_: type | object + if isinstance(param.annotation, type): + type_ = param.annotation + else: + # locate() converts string type to cls type + # Any type can be resolved as long as it's accessible in this scope + type_ = locate(param.annotation) + assert isinstance(type_, type) + parser.add_argument( + f"--{name.replace('_', '-')}", + type=type_, + default=param.default, + help=f"For documentation, see {model_cls.__name__}::get_input_spec.", + ) + return parser + + +def input_spec_from_cli_args( + model: "BaseModel", cli_args: argparse.Namespace +) -> "InputSpec": + """ + Create this model's input spec from an argparse namespace. + Default behavior is to assume the CLI args have the same names as get_input_spec method args. + """ + return model.get_input_spec(**get_input_spec_kwargs(model, vars(cli_args))) + + +def export_parser( + model_cls: Type[FromPretrainedTypeVar] | Type[FromPrecompiledTypeVar], + components: Optional[List[str]] = None, + supports_qnn=True, + exporting_compiled_model=False, +) -> argparse.ArgumentParser: + """ + Arg parser to be used in export scripts. + + Parameters: + model_cls: Class of the model to be exported. Used to add additional + args for model instantiation. + components: Some models have multiple components that need to be + compiled separately. This represents the list of options for the user to + select which components they want to compile. + supports_qnn: + Whether QNN export is supported. + Default=True. + exporting_compiled_model: + True when exporting compiled model. + If set, removing skip_profiling flag from export arguments. + Default = False. + + Returns: + Arg parser object. + """ + parser = get_parser() + parser.add_argument( + "--device", + type=str, + default=DEFAULT_EXPORT_DEVICE, + help="Device for which to export.", + ) + parser.add_argument( + "--skip-profiling", + action="store_true", + help="If set, writes compiled model to local directory without profiling.", + ) + parser.add_argument( + "--skip-inferencing", + action="store_true", + help="If set, skips verifying on-device output vs local cpu.", + ) + if not exporting_compiled_model: + parser.add_argument( + "--skip-downloading", + action="store_true", + help="If set, skips downloading of compiled model.", + ) + parser.add_argument( + "--skip-summary", + action="store_true", + help="If set, skips printing summary of inference and profiling.", + ) + parser.add_argument( + "--output-dir", + type=str, + default=None, + help="Directory to store generated assets (e.g. compiled model). " + "Defaults to `/build/`.", + ) + if not exporting_compiled_model: + # Default runtime for compiled model is fixed for given model + parser.add_argument( + "--dst-runtime", + default="TFLITE", + help="The runtime to export for. Default is TF Lite.", + choices=TargetRuntime._member_names_ + if supports_qnn + else [TargetRuntime.TFLITE.name], + ) + # No compilation for compiled models + parser.add_argument( + "--compile-options", + type=str, + default="", + help="Additional options to pass when submitting the compile job.", + ) + parser.add_argument( + "--profile-options", + type=str, + default="", + help="Additional options to pass when submitting the profile job.", + ) + if components is not None: + parser.add_argument( + "--components", + nargs="+", + type=str, + default=None, + choices=components, + help="Which components of the model to be exported.", + ) + + if issubclass(model_cls, FromPretrainedMixin): + # Skip adding CLI from model for compiled model + # TODO: #9408 Refactor BaseModel, BasePrecompiledModel to fetch + # parameters from compiled model + parser = get_model_cli_parser(model_cls, parser) + + if issubclass(model_cls, BaseModel): + parser = get_model_input_spec_parser(model_cls, parser) + + return parser diff --git a/qai_hub_models/utils/asset_loaders.py b/qai_hub_models/utils/asset_loaders.py new file mode 100644 index 00000000..70eabe0a --- /dev/null +++ b/qai_hub_models/utils/asset_loaders.py @@ -0,0 +1,975 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import fileinput +import json +import os +import shutil +import sys +import tarfile +import tempfile +import threading +import time +from contextlib import contextmanager +from enum import Enum +from functools import partial +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Union +from zipfile import ZipFile + +import gdown +import numpy as np +import requests +import torch +import yaml +from git import Repo +from PIL import Image +from schema import And, Schema, SchemaError + +ASSET_BASES_DEFAULT_PATH = os.path.join( + os.path.dirname(os.path.dirname(__file__)), "asset_bases.yaml" +) + +QAIHM_STORE_ROOT = os.environ.get("QAIHM_STORE_ROOT", os.path.expanduser("~")) +LOCAL_STORE_DEFAULT_PATH = os.path.join(QAIHM_STORE_ROOT, ".qaihm") + +SOURCE_AS_ROOT_LOCK = threading.Lock() + +VersionType = Union[str, int] + +# If non-None, always enter this for yes (True)/no (False) prompts +_always_answer = None + + +@contextmanager +def always_answer_prompts(answer): + global _always_answer + old_value = _always_answer + _always_answer = answer + try: + yield + finally: + _always_answer = old_value + + +class QAIHM_WEB_ASSET(Enum): + STATIC_IMG = 0 + ANIMATED_MOV = 1 + + +class ModelZooAssetConfig: + def __init__( + self, + asset_url: str, + web_asset_folder: str, + static_web_banner_filename: str, + animated_web_banner_filename: str, + model_asset_folder: str, + dataset_asset_folder: str, + local_store_path: str, + qaihm_repo: str, + example_use: str, + huggingface_path: str, + repo_url: str, + models_website_url: str, + models_website_relative_path: str, + ) -> None: + self.local_store_path = local_store_path + self.asset_url = asset_url + self.web_asset_folder = web_asset_folder + self.static_web_banner_filename = static_web_banner_filename + self.animated_web_banner_filename = animated_web_banner_filename + self.model_asset_folder = model_asset_folder + self.dataset_asset_folder = dataset_asset_folder + self.qaihm_repo = qaihm_repo + self.example_use = example_use + self.huggingface_path = huggingface_path + self.repo_url = repo_url + self.models_website_url = models_website_url + self.models_website_relative_path = models_website_relative_path + + # Validation + for name in [ + self.asset_url, + self.web_asset_folder, + self.model_asset_folder, + self.static_web_banner_filename, + self.animated_web_banner_filename, + self.local_store_path, + self.qaihm_repo, + self.example_use, + self.huggingface_path, + self.models_website_relative_path, + ]: + assert not name.endswith("/") and not name.endswith("\\") + for name in [ + self.static_web_banner_filename, + self.animated_web_banner_filename, + ]: + assert not name.startswith("/") and not name.startswith("\\") + + for name in [self.repo_url, self.models_website_url]: + assert not name.endswith("/"), "URLs should not end with a slash" + + def get_hugging_face_url(self, model_name: str) -> str: + return f"https://huggingface.co/{self.get_huggingface_path(model_name)}" + + def get_huggingface_path(self, model_name: str) -> str: + return self.huggingface_path.replace("{model_name}", str(model_name)) + + def get_web_asset_url(self, model_id: str, type: QAIHM_WEB_ASSET): + if type == QAIHM_WEB_ASSET.STATIC_IMG: + file = self.static_web_banner_filename + elif type == QAIHM_WEB_ASSET.ANIMATED_MOV: + file = self.animated_web_banner_filename + else: + raise NotImplementedError("unsupported web asset type") + return f"{self.asset_url}/{ModelZooAssetConfig._replace_path_keywords(self.web_asset_folder, model_id=model_id)}/{file}" + + def get_local_store_model_path( + self, model_name: str, version: VersionType, filename: str + ) -> str: + model_dir = os.path.join( + self.local_store_path, + self.get_relative_model_asset_path(model_name, version, filename), + ) + return model_dir + + def get_local_store_dataset_path( + self, dataset_name: str, version: VersionType, filename: str + ) -> str: + model_dir = os.path.join( + self.local_store_path, + self.get_relative_dataset_asset_path(dataset_name, version, filename), + ) + return model_dir + + def get_relative_model_asset_path( + self, model_id: str, version: Union[int, str], file_name: str + ): + assert not file_name.startswith("/") and not file_name.startswith("\\") + return f"{ModelZooAssetConfig._replace_path_keywords(self.model_asset_folder, model_id=model_id, version=version)}/{file_name}" + + def get_relative_dataset_asset_path( + self, dataset_id: str, version: Union[int, str], file_name: str + ): + assert not file_name.startswith("/") and not file_name.startswith("\\") + return f"{ModelZooAssetConfig._replace_path_keywords(self.dataset_asset_folder, dataset_id=dataset_id, version=version)}/{file_name}" + + def get_model_asset_url( + self, model_id: str, version: Union[int, str], file_name: str + ): + assert not file_name.startswith("/") and not file_name.startswith("\\") + return f"{self.asset_url}/{self.get_relative_model_asset_path(model_id, version, file_name)}" + + def get_dataset_asset_url( + self, dataset_id: str, version: Union[int, str], file_name: str + ): + assert not file_name.startswith("/") and not file_name.startswith("\\") + return f"{self.asset_url}/{self.get_relative_dataset_asset_path(dataset_id, version, file_name)}" + + def get_qaihm_repo(self, model_id: str, relative=True): + relative_path = f"{ModelZooAssetConfig._replace_path_keywords(self.qaihm_repo, model_id=model_id)}" + if not relative: + return self.repo_url + "/" + relative_path + + return relative_path + + def get_website_url(self, model_id: str, relative=False): + relative_path = f"{ModelZooAssetConfig._replace_path_keywords(self.models_website_relative_path, model_id=model_id)}" + if not relative: + return self.models_website_url + "/" + relative_path + return relative_path + + def get_example_use(self, model_id: str): + return f"{ModelZooAssetConfig._replace_path_keywords(self.example_use, model_id=model_id)}" + + ### + # Helpers + ### + @staticmethod + def _replace_path_keywords( + path: str, + model_id: Optional[str] = None, + dataset_id: Optional[str] = None, + version: Optional[Union[int, str]] = None, + ): + if model_id: + path = path.replace("{model_id}", model_id) + if dataset_id: + path = path.replace("{dataset_id}", dataset_id) + if version: + path = path.replace("{version}", str(version)) + return path + + ### + # Load from CFG + ### + @staticmethod + def from_cfg( + asset_cfg_path: str = ASSET_BASES_DEFAULT_PATH, + local_store_path: str = LOCAL_STORE_DEFAULT_PATH, + verify_env_has_all_variables: bool = False, + ): + # Load CFG and params + asset_cfg = ModelZooAssetConfig.load_asset_cfg( + asset_cfg_path, verify_env_has_all_variables + ) + + return ModelZooAssetConfig( + asset_cfg["store_url"], + asset_cfg["web_asset_folder"], + asset_cfg["static_web_banner_filename"], + asset_cfg["animated_web_banner_filename"], + asset_cfg["model_asset_folder"], + asset_cfg["dataset_asset_folder"], + local_store_path, + asset_cfg["qaihm_repo"], + asset_cfg["example_use"], + asset_cfg["huggingface_path"], + asset_cfg["repo_url"], + asset_cfg["models_website_url"], + asset_cfg["models_website_relative_path"], + ) + + ASSET_CFG_SCHEMA = Schema( + And( + { + "store_url": str, + "web_asset_folder": str, + "dataset_asset_folder": str, + "static_web_banner_filename": str, + "animated_web_banner_filename": str, + "model_asset_folder": str, + "qaihm_repo": str, + "example_use": str, + "huggingface_path": str, + "repo_url": str, + "models_website_url": str, + "models_website_relative_path": str, + } + ) + ) + + @staticmethod + def load_asset_cfg(path, verify_env_has_all_variables: bool = False): + with open(path) as f: + data = yaml.safe_load(f) + try: + # Validate high level-schema + ModelZooAssetConfig.ASSET_CFG_SCHEMA.validate(data) + except SchemaError as e: + assert 0, f"{e.code} in {path}" + + for key, value in data.items(): + # Environment variable replacement + if isinstance(value, str) and value.startswith("env::"): + values = value.split("::") + if len(values) == 2: + _, env_var_name = values + default = value + elif len(values) == 3: + _, env_var_name, default = values + else: + raise NotImplementedError( + "Environment vars should be specified in asset_bases " + "using format env::::" + ) + + data[key] = os.environ.get(env_var_name, default) + if ( + verify_env_has_all_variables + and default == value + and env_var_name not in os.environ + ): + raise ValueError( + f"Environment variable '{env_var_name}' was specified in " + f"asset_bases.yaml for key '{key}', but is not defined." + ) + + return data + + +ASSET_CONFIG = ModelZooAssetConfig.from_cfg() + + +def _query_yes_no(question, default="yes"): + """ + Ask a yes/no question and return their answer. + + "question" is a string that is presented to the user. + "default" is the presumed answer if the user just hits . + It must be "yes" (the default), "no" or None (meaning + an answer is required of the user). + + The "answer" return value is True for "yes" or False for "no". + + Sourced from https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input + """ + global _always_answer + if _always_answer is not None: + return _always_answer + + valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} + if default is None: + prompt = " [y/n] " + elif default == "yes": + prompt = " [Y/n] " + elif default == "no": + prompt = " [y/N] " + else: + raise ValueError("invalid default answer: '%s'" % default) + + while True: + print(question + prompt, end="") + choice = input().lower() + if default is not None and choice == "": + return valid[default] + elif choice in valid: + return valid[choice] + else: + print("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") + + +def maybe_clone_git_repo( + git_file_path: str, + commit_hash, + model_name: str, + model_version: VersionType, + patches: List[str] = [], +) -> str: + """Clone (or pull) a repository, save it to disk in a standard location, + and return the absolute path to the cloned location. Patches can be applied + by providing a list of paths to diff files.""" + + # http://blah.come/author/name.git -> name, author + repo_name = os.path.basename(git_file_path).split(".")[0] + repo_author = os.path.basename(os.path.dirname(git_file_path)) + local_path = ASSET_CONFIG.get_local_store_model_path( + model_name, model_version, f"{repo_author}_{repo_name}_git" + ) + os.makedirs(os.path.dirname(local_path), exist_ok=True) + + if not os.path.exists(os.path.join(local_path, ".git")): + # Clone repo + should_clone = _query_yes_no( + f"{model_name} requires repository {git_file_path} . Ok to clone?", + ) + if should_clone: + print(f"Cloning {git_file_path} to {local_path}...") + repo = Repo.clone_from(git_file_path, local_path) + repo.git.checkout(commit_hash) + for patch in patches: + repo.git.execute(["git", "apply", patch]) + print("Done") + else: + raise ValueError( + f"Unable to load {model_name} without its required repository." + ) + + return local_path + + +def _load_file( + file: PathType, + loader_func: Callable[[str], Any], + dst_folder_path: tempfile.TemporaryDirectory | str | None = None, +) -> Any: + if isinstance(file, (str, Path)): + file = str(file) + if file.startswith("http"): + if dst_folder_path is None: + dst_folder_path = tempfile.TemporaryDirectory() + if isinstance(dst_folder_path, tempfile.TemporaryDirectory): + dst_folder_path_str = dst_folder_path.name + else: + dst_folder_path_str = dst_folder_path + dst_path = os.path.join(dst_folder_path_str, os.path.basename(file)) + download_file(file, dst_path) + return loader_func(dst_path) + else: + return loader_func(file) + elif isinstance(file, CachedWebAsset): + return loader_func(str(file.fetch())) + else: + raise NotImplementedError() + + +def load_image(image: PathType, verbose=False, desc="image") -> Image.Image: + if verbose: + print(f"Loading {desc} from {image}") + return _load_file(image, Image.open) + + +def load_numpy(file: PathType) -> Any: + return _load_file(file, np.load) + + +def load_torch(pt: PathType) -> Any: + return _load_file(pt, partial(torch.load, map_location="cpu")) + + +def load_json(json_file: PathType) -> Dict: + def _load_json_helper(file_path) -> Any: + with open(file_path, "r") as json_file: + return json.load(json_file) + + return _load_file(json_file, _load_json_helper) + + +def load_path(file: PathType, tmpdir: tempfile.TemporaryDirectory | str) -> str | Path: + """ + Get asset path on disk. + If `file` is a string URL, downloads the file to tmpdir.name. + """ + + def return_path(path): + return path + + return _load_file(file, return_path, tmpdir) + + +@contextmanager +def SourceAsRoot( + source_repo_url: str, + source_repo_commit_hash: str, + source_repo_name: str, + source_repo_version: int | str, + source_repo_patches: List[str] = [], + keep_sys_path: bool = False, +): + """ + Context manager that runs code with: + * the source repository added to the system path, + * cwd set to the source repo's root directory. + + Only one of this class should be active per Python session. + """ + + repository_path = maybe_clone_git_repo( + source_repo_url, + source_repo_commit_hash, + source_repo_name, + source_repo_version, + patches=source_repo_patches, + ) + SOURCE_AS_ROOT_LOCK.acquire() + cwd = os.getcwd() + original_path = list(sys.path) + try: + # Patch path for this load only, since the model source + # code references modules via a global scope. + # Insert with highest priority (see #7666) + sys.path.insert(0, repository_path) + os.chdir(repository_path) + + yield repository_path + finally: + # Be careful editing these lines (failure means partial clean-up) + os.chdir(cwd) + if not keep_sys_path: + sys.path = original_path + SOURCE_AS_ROOT_LOCK.release() + + +def find_replace_in_repo( + repo_path: str, filepaths: Union[str, List[str]], find_str: str, replace_str: str +): + """ + When loading models from external repos, sometimes small modifications + need to be made to the repo code to get it working in the zoo env. + + This does a simple find + replace within a single file. + + Parameters: + repo_path: Local filepath to the repo of interest. + filepath: Filepath within the repo to the file to change. + find_str: The string that needs to be replaced. + replace_str: The string with which to replace all instances of `find_str`. + """ + if isinstance(filepaths, str): + filepaths = [filepaths] + for filepath in filepaths: + with fileinput.FileInput( + Path(repo_path) / filepath, + inplace=True, + backup=".bak", + ) as file: + for line in file: + print(line.replace(find_str, replace_str), end="") + + +class CachedWebAsset: + """ + Helper class for downloading files for storage in the QAIHM asset cache. + """ + + def __init__( + self, + url: str, + local_cache_path: str, + asset_config=ASSET_CONFIG, + model_downloader: Callable[[str, str, int], str] | None = None, + downloader_num_retries=4, + ): + self.url = url + self.local_cache_path = local_cache_path + self.asset_config: ModelZooAssetConfig = asset_config + self._downloader: Callable = model_downloader or download_file + self.downloader_num_retries = downloader_num_retries + + # Append file name to local path if no file name is present + path, ext = os.path.splitext(self.local_cache_path) + if not ext: + file_name = self.url.rsplit("/", 1)[-1] + self.local_cache_path = os.path.join(path, file_name) + + # Set is_extracted if already extracted on disk + file, _ = os.path.splitext(self.local_cache_path) + self.is_extracted = list( + filter(local_cache_path.endswith, [".zip", ".tar", ".tar.gz", ".tgz"]) + ) != [] and os.path.isdir(file) + + def __repr__(self): + return self.url + + @staticmethod + def from_asset_store( + relative_store_file_path: str, num_retries=4, asset_config=ASSET_CONFIG + ): + """ + File from the online qaihm asset store. + + Parameters: + relative_store_file_path: Path relative to `qai_hub_models` cache root to store this asset. + (also relative to the root of the online file store) + + num_retries: Number of retries when downloading thie file. + + asset_config: Asset config to use to save this file. + """ + web_store_path = f"{asset_config.asset_url}/{relative_store_file_path}" + return CachedWebAsset( + web_store_path, + relative_store_file_path, + asset_config, + download_file, + num_retries, + ) + + @staticmethod + def from_google_drive( + gdrive_file_id: str, + relative_store_file_path: str, + num_retries=4, + asset_config=ASSET_CONFIG, + ): + """ + File from google drive. + + Parameters: + gdrive_file_id: Unique identifier of the file in Google Drive. + Typically found in the URL. + + relative_store_file_path: Path relative to `qai_hub_models` cache root to store this asset. + + num_retries: Number of retries when downloading thie file. + + asset_config: Asset config to use to save this file. + """ + return CachedWebAsset( + f"https://drive.google.com/uc?id={gdrive_file_id}", + relative_store_file_path, + asset_config, + download_and_cache_google_drive, + num_retries, + ) + + def path(self, extracted=None) -> Path: + """ + Get the path of this asset on disk. + + By default, for archived (.zip, .tar, .etc) assets, path() will return the extracted path if the asset + has been extracted, and the original archive file's path if it has not been extracted. + + Parameters: + extracted: If true, return the path of the extracted asset on disk. + If false, return the path of the archive path on disk. + """ + if (extracted is None and self.is_extracted) or extracted: + file, _ = os.path.splitext(self.local_cache_path) + else: + file = self.local_cache_path + + return Path(self.asset_config.local_store_path) / file + + def fetch(self, force=False, extract=False) -> Path: + """ + Fetch this file from the web if it does not exist on disk. + + Parameters: + force: If the file exists on disk already, discard it and download it again. + + extract: Extract the asset after downloading it. + """ + path = self.path() + + # Delete existing asset if requested + if path.exists(): + if force: + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + self.is_extracted = False + else: + return path + elif self.is_extracted: + # Someone deleted the extracted path. Fetch it again. + self.is_extracted = False + path = self.path() + + # Create dirs + os.makedirs(os.path.dirname(path), exist_ok=True) + + # Downloader should return path we expect. + p1 = self._downloader(self.url, self.local_cache_path) + assert str(p1) == str(path) + + # Extract asset if requested + if extract: + self.extract(force) + + return self.path() + + def extract(self, force=True) -> Path: + """ + Extract this asset if it is compressed. Updates the path of this asset to the folder to which the zip file was extracted. + """ + if self.is_extracted: + if force: + os.remove(self.path()) + self.is_extracted = False + else: + return self.path() + + _, ext = os.path.splitext(self.local_cache_path) + if ext == ".zip": + # Update local cache path to pont to the extracted zip folder. + extract_zip_file(str(self.path())) + os.remove(self.path()) # Deletes zip file + self.is_extracted = True # Updates path() to return extracted path + elif ext in [".tar", ".gz", ".tgz"]: + with tarfile.open(self.path()) as f: + f.extractall(os.path.dirname(self.path())) + os.remove(self.path()) # Deletes tar file + self.is_extracted = True # Updates path() to return extracted path + else: + raise ValueError(f"Unsupported compressed file type: {ext}") + + return self.path() + + +class CachedWebModelAsset(CachedWebAsset): + """ + Helper class for downloading files for storage in the QAIHM asset cache. + """ + + def __init__( + self, + url: str, + model_id: str, + model_asset_version: int | str, + filename: str, + asset_config=ASSET_CONFIG, + model_downloader: Callable[[str, str, int], str] | None = None, + downloader_num_retries=4, + ): + local_cache_path = asset_config.get_local_store_model_path( + model_id, model_asset_version, filename + ) + super().__init__( + url, + local_cache_path, + asset_config, + model_downloader, + downloader_num_retries, + ) + self.model_id = model_id + self.model_version = model_asset_version + + @staticmethod # type: ignore + def from_asset_store( + model_id: str, + model_asset_version: str | int, + filename: str, + num_retries=4, + asset_config=ASSET_CONFIG, + ): + """ + File from the online qaihm asset store. + + Parameters: + model_id: str + Model ID + + model_asset_version: str | int + Asset version for this model. + + num_retries: int + Number of retries when downloading thie file. + + asset_config: ModelZooAssetConfig + Asset config to use to save this file. + """ + web_store_path = asset_config.get_model_asset_url( + model_id, model_asset_version, filename + ) + return CachedWebModelAsset( + web_store_path, + model_id, + model_asset_version, + filename, + asset_config, + download_file, + num_retries, + ) + + @staticmethod # type: ignore + def from_google_drive( + gdrive_file_id: str, + model_id: str, + model_asset_version: str | int, + filename: str, + num_retries=4, + asset_config=ASSET_CONFIG, + ): + """ + File from google drive. + + Parameters: + gdrive_file_id: Unique identifier of the file in Google Drive. + Typically found in the URL. + + model_id: Model ID + + model_asset_version: Asset version for this model. + + filename: Filename for this asset on disk. + + num_retries: Number of retries when downloading thie file. + + asset_config: Asset config to use to save this file. + """ + return CachedWebModelAsset( + f"https://drive.google.com/uc?id={gdrive_file_id}", + model_id, + model_asset_version, + filename, + asset_config, + download_and_cache_google_drive, + num_retries, + ) + + +class CachedWebDatasetAsset(CachedWebAsset): + """ + Class representing dataset-specific files that needs stored in the local cache once downloaded. + + These files should correspond to a single (or group) of datasets in `qai_hub_models/dataset`. + """ + + def __init__( + self, + url: str, + dataset_id: str, + dataset_version: int | str, + filename: str, + asset_config=ASSET_CONFIG, + model_downloader: Callable[[str, str, int], str] | None = None, + downloader_num_retries=4, + ): + local_cache_path = asset_config.get_local_store_dataset_path( + dataset_id, dataset_version, filename + ) + super().__init__( + url, + local_cache_path, + asset_config, + model_downloader, + downloader_num_retries, + ) + self.dataset_id = dataset_id + self.dataset_version = dataset_version + + @staticmethod # type: ignore + def from_asset_store( + dataset_id: str, + dataset_version: str | int, + filename: str, + num_retries=4, + asset_config=ASSET_CONFIG, + ): + """ + File from the online qaihm asset store. + + Parameters: + model_id: Model ID + + dataset_version: Asset version for this model. + + num_retries: Number of retries when downloading thie file. + + asset_config: Asset config to use to save this file. + """ + web_store_path = asset_config.get_dataset_asset_url( + dataset_id, dataset_version, filename + ) + return CachedWebModelAsset( + web_store_path, + dataset_id, + dataset_version, + filename, + asset_config, + download_file, + num_retries, + ) + + @staticmethod # type: ignore + def from_google_drive( + gdrive_file_id: str, + model_id: str, + model_asset_version: str | int, + filename: str, + num_retries=4, + asset_config=ASSET_CONFIG, + ): + """ + File from google drive. + + Parameters: + gdrive_file_id: Unique identifier of the file in Google Drive. + Typically found in the URL. + + model_id: Model ID + + model_asset_version: Asset version for this model. + + filename: Filename for this asset on disk. + + num_retries: Number of retries when downloading thie file. + + asset_config: Asset config to use to save this file. + """ + return CachedWebModelAsset( + f"https://drive.google.com/uc?id={gdrive_file_id}", + model_id, + model_asset_version, + filename, + asset_config, + download_and_cache_google_drive, + num_retries, + ) + + +def download_file(web_url: str, dst_path: str, num_retries: int = 4) -> str: + """ + Downloads data from the internet and stores in `dst_folder`. + `dst_folder` should be relative to the local cache root for qai_hub_models. + """ + if not os.path.exists(dst_path): + print(f"Downloading data at {web_url} to {dst_path}... ", end="") + file_data = requests.get(web_url) + if file_data.status_code != 200: + raise ValueError(f"Unable to download file at {web_url}") + with open(dst_path, "wb") as dst_file: + dst_file.write(file_data.content) + print("Done") + return dst_path + + +def download_and_cache_google_drive(web_url: str, dst_path: str, num_retries: int = 4): + """ + Download file from google drive to the local directory. + + Parameters: + file_id: Unique identifier of the file in Google Drive. + Typically found in the URL. + model_name: Model for which this asset is being downloaded. + Used to choose where in the local filesystem to put it. + filename: Filename under which it will be saved locally. + num_retries: Number of times to retry in case download fails. + + Returns: + Filepath within the local filesystem. + """ + for i in range(num_retries): + print(f"Downloading data at {web_url} to {dst_path}... ") + try: + gdown.download(web_url, dst_path, quiet=False) + except Exception: + pass + if os.path.exists(dst_path): + print("Done") + return dst_path + else: + print(f"Failed to download file at {web_url}") + if i < num_retries - 1: + print("Retrying in 3 seconds.") + time.sleep(3) + return dst_path + + +def copyfile(src: str, dst: str, num_retries: int = 4): + if os.path.isdir(src): + shutil.copytree(src, dst) + else: + shutil.copyfile(src, dst) + return dst + + +def extract_zip_file(filepath_str: str) -> Path: + """ + Given a local filepath to a zip file, extract its contents into a folder + in the same directory. The directory with the contents will have the same + name as the .zip file without the `.zip` extention. + + Parameters: + filepath_str: String of the path to the zip file in the local directory. + """ + filepath = Path(filepath_str) + with ZipFile(filepath, "r") as zf: + out_path = filepath.parent / filepath.stem + zf.extractall(path=out_path) + return out_path + + +def callback_with_retry( + num_retries: int, + callback: Callable, + *args: Optional[Any], + **kwargs: Optional[Any], +) -> Any: + """Allow retries when running provided function.""" + if num_retries == 0: + raise RuntimeError(f"Unable to run function {callback.__name__}") + else: + try: + return callback(*args, **kwargs) + except Exception as error: + error_msg = ( + f"Error: {error.message}" # type: ignore + if hasattr(error, "message") + else f"Error: {str(error)}" + ) + print(error_msg) + if hasattr(error, "status_code"): + print(f"Status code: {error.status_code}") # type: ignore + time.sleep(10) + return callback_with_retry(num_retries - 1, callback, *args, **kwargs) + + +PathType = Union[str, Path, CachedWebAsset] diff --git a/qai_hub_models/utils/base_model.py b/qai_hub_models/utils/base_model.py new file mode 100644 index 00000000..e5a3b38e --- /dev/null +++ b/qai_hub_models/utils/base_model.py @@ -0,0 +1,231 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +from abc import ABC, ABCMeta, abstractmethod +from enum import Enum +from inspect import getmodule +from typing import Any, Dict, List, Type, TypeVar + +import numpy as np +import torch +from qai_hub.client import SourceModel + +from qai_hub_models.evaluators.base_evaluators import BaseEvaluator +from qai_hub_models.utils.input_spec import InputSpec, make_torch_inputs + +InputsType = Dict[str, List[np.ndarray]] + + +class TargetRuntime(Enum): + TFLITE = 0 + QNN = 1 + + +class SourceModelFormat(Enum): + ONNX = 0 + TORCHSCRIPT = 1 + + +class DocstringInheritorMeta(ABCMeta): + """ + Ensures that all subclasses retain the `forward` function's docstring. + """ + + def __new__(cls, name, bases, dct): + new_class = super().__new__(cls, name, bases, dct) + if hasattr(new_class, "forward"): + parent_method = getattr(bases[0], "forward", None) + if parent_method and new_class.forward.__doc__ is None: # type: ignore + new_class.forward.__doc__ = parent_method.__doc__ # type: ignore + return new_class + + +# Use this for typehints that take in a class and output an instance of the class. +FromPretrainedTypeVar = TypeVar("FromPretrainedTypeVar", bound="FromPretrainedMixin") +FromPrecompiledTypeVar = TypeVar("FromPrecompiledTypeVar", bound="FromPrecompiledMixin") + + +class FromPretrainedMixin(ABC): + @classmethod + @abstractmethod + def from_pretrained( + cls: Type[FromPretrainedTypeVar], *args, **kwargs + ) -> FromPretrainedTypeVar: + """ + Utility function that helps users get up and running with a default + pretrained model. While this function may take arguments, all arguments + should have default values specified, so that all classes can be invoked + with `cls.from_pretrained()` and always have it return something reasonable. + """ + pass + + +class CollectionModel(FromPretrainedMixin): + """ + Model that glues together several BaseModels + """ + + pass + + +class BaseModel( + torch.nn.Module, FromPretrainedMixin, ABC, metaclass=DocstringInheritorMeta +): + @abstractmethod + def get_input_spec(self, *args, **kwargs) -> InputSpec: + """ + Returns a map from `{input_name -> (shape, dtype)}` + specifying the shape and dtype for each input argument. + """ + pass + + @classmethod + def get_model_id(cls) -> str: + """ + Return model ID for this model. + The model ID is the same as the folder name for the model under qai_hub_models/models/... + """ + module = getmodule(cls) + if not module or not module.__file__: + raise ValueError(f"Unable to get model ID for {cls.__name__}") + + # Module path is always .../qai_hub_models/models//model.py + # Extract model ID from that path. + return os.path.basename(os.path.dirname(module.__file__)) + + def get_evaluator(self) -> BaseEvaluator: + """ + Gets default model output evaluator for this model. + """ + raise NotImplementedError("This model does not define a default evaluator.") + + def convert_to_torchscript( + self, input_spec: InputSpec | None = None, check_trace: bool = True + ) -> Any: + """ + Converts the torch module to a torchscript trace, which + is the format expected by qai hub. + + This is a default implementation that may be overriden by a subclass. + """ + if not input_spec: + input_spec = self.get_input_spec() + + return torch.jit.trace( + self, make_torch_inputs(input_spec), check_trace=check_trace + ) + + def convert_to_hub_source_model( + self, + target_runtime: TargetRuntime, + output_path: str, + input_spec: InputSpec | None = None, + check_trace: bool = True, + ) -> SourceModel: + """ + Convert to a AI Hub source model appropriate for the export method. + """ + # Local import to prevent circular dependency + from qai_hub_models.utils.inference import prepare_compile_zoo_model_to_hub + + assert isinstance(self, BaseModel) + source_model, _ = prepare_compile_zoo_model_to_hub( + self, + source_model_format=self.preferred_hub_source_model_format(target_runtime), + target_runtime=target_runtime, + output_path=output_path, + input_spec=input_spec, + check_trace=check_trace, + ) + return source_model + + def get_hub_compile_options( + self, + target_runtime: TargetRuntime, + other_compile_options: str = "", + ) -> str: + """ + Convert to a AI Hub source model appropriate for the export method. + """ + compile_options = "" + if target_runtime == TargetRuntime.QNN: + compile_options = "--target_runtime qnn_lib_aarch64_android" + if other_compile_options != "": + return compile_options + " " + other_compile_options + return compile_options + + def preferred_hub_source_model_format( + self, target_runtime: TargetRuntime + ) -> SourceModelFormat: + return SourceModelFormat.TORCHSCRIPT + + def sample_inputs(self, input_spec: InputSpec | None = None) -> InputsType: + """ + Returns a set of sample inputs for the model. + + For each input name in the model, a list of numpy arrays is provided. + If the returned set is batch N, all input names must contain exactly N numpy arrays. + + This is a default implementation that returns a single random data array + for each input name based on the shapes and dtypes in `get_input_spec`. + + A subclass may choose to override this and fetch a batch of real input data + from a data source. + """ + if not input_spec: + input_spec = self.get_input_spec() + inputs_dict = {} + inputs_list = make_torch_inputs(input_spec) + for i, input_name in enumerate(input_spec.keys()): + inputs_dict[input_name] = [inputs_list[i].numpy()] + return inputs_dict + + +class FromPrecompiledMixin(ABC): + @classmethod + @abstractmethod + def from_precompiled( + cls: Type[FromPrecompiledTypeVar], *args, **kwargs + ) -> "FromPrecompiledTypeVar": + """ + Utility function that helps users get up and running with a default + precompiled model. While this function may take arguments, all arguments + should have default values specified, so that all classes can be invoked + with `cls.from_precompiled()` and always have it return something reasonable. + """ + pass + + +class BasePrecompiledModel(FromPrecompiledMixin): + @abstractmethod + def get_input_spec(self, *args, **kwargs) -> InputSpec: + """ + Returns a map from `{input_name -> (shape, dtype)}` + specifying the shape and dtype for each input argument. + """ + pass + + def sample_inputs(self, input_spec: InputSpec | None = None) -> InputsType: + """ + Returns a set of sample inputs for the model. + + For each input name in the model, a list of numpy arrays is provided. + If the returned set is batch N, all input names must contain exactly N numpy arrays. + + This is a default implementation that returns a single random data array + for each input name based on the shapes and dtypes in `get_input_spec`. + + A subclass may choose to override this and fetch a batch of real input data + from a data source. + """ + if not input_spec: + input_spec = self.get_input_spec() + inputs_dict = {} + inputs_list = make_torch_inputs(input_spec) + for i, input_name in enumerate(input_spec.keys()): + inputs_dict[input_name] = [inputs_list[i].numpy()] + return inputs_dict diff --git a/qai_hub_models/utils/bounding_box_processing.py b/qai_hub_models/utils/bounding_box_processing.py new file mode 100644 index 00000000..6a7550a8 --- /dev/null +++ b/qai_hub_models/utils/bounding_box_processing.py @@ -0,0 +1,265 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import List, Tuple + +import cv2 +import numpy as np +import torch +from torchvision.ops import nms + + +def batched_nms( + iou_threshold: float, + score_threshold: float, + boxes: torch.Tensor, + scores: torch.Tensor, + *gather_additional_args, +) -> Tuple[List[torch.Tensor], ...]: + """ + Non maximum suppression over several batches. + + Inputs: + iou_threshold: float + Intersection over union (IoU) threshold + + score_threshold: float + Score threshold (throw away any boxes with scores under this threshold) + + boxes: torch.Tensor + Boxes to run NMS on. Shape is [B, N, 4], B == batch, N == num boxes, and 4 == (x1, x2, y1, y2) + + scores: torch.Tensor + Scores for each box. Shape is [B, N], range is [0:1] + + *gather_additional_args: torch.Tensor, ... + Additional tensor(s) to be gathered in the same way as boxes and scores. + In other words, each arg is returned with only the elements for the boxes selected by NMS. + Should be shape [B, N, ...] + + Outputs: + boxes_out: List[torch.Tensor] + Output boxes. This is list of tensors--one tensor per batch. + Each tensor is shape [S, 4], where S == number of selected boxes, and 4 == (x1, x2, y1, y2) + + boxes_out: List[torch.Tensor] + Output scores. This is list of tensors--one tensor per batch. + Each tensor is shape [S], where S == number of selected boxes. + + *args : List[torch.Tensor], ... + "Gathered" additional arguments, if provided. + """ + scores_out: List[torch.Tensor] = [] + boxes_out: List[torch.Tensor] = [] + args_out: List[List[torch.Tensor]] = ( + [[] for _ in gather_additional_args] if gather_additional_args else [] + ) + + for batch_idx in range(0, boxes.shape[0]): + # Clip outputs to valid scores + batch_scores = scores[batch_idx] + scores_idx = torch.nonzero(scores[batch_idx] >= score_threshold).squeeze(-1) + batch_scores = batch_scores[scores_idx] + batch_boxes = boxes[batch_idx, scores_idx] + batch_args = ( + [arg[batch_idx, scores_idx] for arg in gather_additional_args] + if gather_additional_args + else [] + ) + + if len(batch_scores > 0): + nms_indices = nms(batch_boxes[..., :4], batch_scores, iou_threshold) + batch_boxes = batch_boxes[nms_indices] + batch_scores = batch_scores[nms_indices] + batch_args = [arg[nms_indices] for arg in batch_args] + + boxes_out.append(batch_boxes) + scores_out.append(batch_scores) + for arg_idx, arg in enumerate(batch_args): + args_out[arg_idx].append(arg) + + return boxes_out, scores_out, *args_out + + +def compute_box_corners_with_rotation( + xc: torch.Tensor, + yc: torch.Tensor, + w: torch.Tensor, + h: torch.Tensor, + theta: torch.Tensor, +) -> torch.Tensor: + """ + From the provided information, compute the (x, y) coordinates of the box's corners. + + Inputs: + xc: torch.Tensor + Center of box (x). Shape is [ Batch ] + yc: torch.Tensor + Center of box (y). Shape is [ Batch ] + w: torch.Tensor + Width of box. Shape is [ Batch ] + h: torch.Tensor + Height of box. Shape is [ Batch ] + theta: torch.Tensor + Rotation of box (in radians). Shape is [ Batch ] + + Outputs: + corners: torch.Tensor + Computed corners. Shape is (B x 4 x 2), + where 2 == (x, y) + """ + batch_size = xc.shape[0] + + # Construct unit square + points = torch.tensor([[-1, -1, 1, 1], [-1, 1, -1, 1]], dtype=torch.float32).repeat( + batch_size, 1, 1 + ) # Construct Unit Square. Shape [B, 2, 4], where 2 == (X, Y) + points *= torch.stack((w / 2, h / 2), dim=-1).unsqueeze( + dim=2 + ) # Scale unit square to appropriate height and width + + # Rotate unit square to new coordinate system + R = torch.stack( + ( + torch.stack((torch.cos(theta), -torch.sin(theta)), dim=1), + torch.stack((torch.sin(theta), torch.cos(theta)), dim=1), + ), + dim=1, + ) # Construct rotation matrix + points = R @ points # Apply Rotation + + # Adjust box to center around the original center + points = points + torch.stack((xc, yc), dim=1).unsqueeze(dim=2) + + return points.transpose(-1, -2) + + +def compute_box_affine_crop_resize_matrix( + box_corners: torch.Tensor, output_image_size: Tuple[int, int] +) -> List[np.ndarray]: + """ + Computes the affine transform matrices required to crop, rescale, + and pad the box described by box_corners to fit into an image of the given size without warping. + + Inputs: + box_corners: torch.Tensor + Bounding box corners. These coordinates will be mapped to the output image. Shape is [B, 3, 2], + where B = batch, + 3 = (top left point, bottom left point, top right point) + and 2 = (x, y) + + output_image_size: float + Size of image to which the box should be resized and cropped. + + Outputs: + affines: List[np.ndarray] + Computed affine transform matrices. Shape is (2 x 3) + """ + # Define coordinates for translated image + network_input_points = np.array( + [[0, 0], [0, output_image_size[1] - 1], [output_image_size[0] - 1, 0]], + dtype=np.float32, + ) + + # Compute affine transformation that will map the square to the point + affines = [] + for batch in range(box_corners.shape[0]): + src = box_corners[batch][..., :3].detach().numpy() + affines.append(cv2.getAffineTransform(src, network_input_points)) + return affines + + +def box_xywh_to_xyxy(box_cwh: torch.Tensor) -> torch.Tensor: + """ + Convert center, W, H to top left / bottom right bounding box values. + + Inputs: + box_xy: torch.Tensor + Bounding box. Shape is [B, 2, 2] + [[xc, yc], [w, h]] * Batch + + Outputs: torch.Tensor + Output format is [[x0, y0], [x1, y1]] + """ + # Convert Xc, Yc, W, H to min and max bounding box values. + x_center = box_cwh[..., 0, 0] + y_center = box_cwh[..., 0, 1] + w = box_cwh[..., 1, 0] + h = box_cwh[..., 1, 1] + + out = torch.clone(box_cwh) + out[..., 0, 0] = x_center - w / 2.0 # x0 + out[..., 0, 1] = y_center - h / 2.0 # y0 + out[..., 1, 0] = x_center + w / 2.0 # x1 + out[..., 1, 1] = y_center + h / 2.0 # y1 + + return out + + +def box_xyxy_to_xywh( + box_xy: torch.Tensor, +) -> torch.Tensor: + """ + Converts box coordinates to center / width / height notation. + + Inputs: + box_xy: torch.Tensor + Bounding box. Shape is [B, 2, 2], + where B = batch, + 2 = (point 1, point 2), + and 2 = (x, y) + + Outputs: + box_cwh + Bounding box. Shape is [B, 2, 2], + [[xc, yc], [w, h]] * Batch + """ + x0 = box_xy[..., 0, 0] + y0 = box_xy[..., 0, 1] + x1 = box_xy[..., 1, 0] + y1 = box_xy[..., 1, 1] + + out = torch.clone(box_xy) + out[..., 1, 0] = x1 - x0 # w + out[..., 1, 1] = y1 - y0 # h + out[..., 0, 0] = x0 + out[..., 1, 0] / 2 # xc + out[..., 0, 1] = y0 + out[..., 1, 1] / 2 # yc + + return out + + +def apply_directional_box_offset( + offset: float | int | torch.Tensor, + vec_start: torch.Tensor, + vec_end: torch.Tensor, + xc: torch.Tensor, + yc: torch.Tensor, +): + """ + Offset the bounding box defined by [xc, yc] by a pre-determined length. + The offset will be applied in the direction of the supplied vector. + + Inputs: + offset: torch.Tensor + Floating point offset to apply to the bounding box, in absolute values. + vec_start: torch.Tensor + Starting point of the vector. Shape is [B, 2], where 2 == (x, y) + vec_end: torch.Tensor + Ending point of the vector. Shape is [B, 2], where 2 == (x, y) + xc: torch.Tensor + x center of box. + yc: torch.Tensor + y center of box + + Outputs: + No return value; xy and yc are modified in place. + """ + xlen = vec_end[..., 0] - vec_start[..., 0] + ylen = vec_end[..., 1] - vec_start[..., 1] + vec_len = torch.sqrt(torch.float_power(xlen, 2) + torch.float_power(ylen, 2)) + + xc += offset * (xlen / vec_len) + yc += offset * (ylen / vec_len) diff --git a/qai_hub_models/utils/camera_capture.py b/qai_hub_models/utils/camera_capture.py new file mode 100644 index 00000000..fcdc892a --- /dev/null +++ b/qai_hub_models/utils/camera_capture.py @@ -0,0 +1,55 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from typing import Callable + +import cv2 +import numpy as np + +ESCAPE_KEY_ID = 27 + + +def capture_and_display_processed_frames( + frame_processor: Callable[[np.ndarray], np.ndarray], + window_display_name: str, + cap_device: int = 0, +) -> None: + """ + Capture frames from the given input camera device, run them through + the frame processor, and display the outputs in a window with the given name. + + User should press Esc to exit. + + Inputs: + frame_processor: Callable[[np.ndarray], np.ndarray] + Processes frames. + Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte. + window_display_name: str + Name of the window used to display frames. + cap_device: int + Identifier for the camera to use to capture frames. + """ + cv2.namedWindow(window_display_name) + capture = cv2.VideoCapture(cap_device) + if not capture.isOpened(): + raise ValueError("Unable to open video capture.") + + frame_count = 0 + has_frame, frame = capture.read() + while has_frame: + frame_count = frame_count + 1 + + # mirror frame + frame = np.ascontiguousarray(frame[:, ::-1, ::-1]) + + # process & show frame + processed_frame = frame_processor(frame) + cv2.imshow(window_display_name, processed_frame[:, :, ::-1]) + + has_frame, frame = capture.read() + key = cv2.waitKey(1) + if key == ESCAPE_KEY_ID: + break + + capture.release() diff --git a/qai_hub_models/utils/compare.py b/qai_hub_models/utils/compare.py new file mode 100644 index 00000000..13ce8b2e --- /dev/null +++ b/qai_hub_models/utils/compare.py @@ -0,0 +1,111 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Dict, List, NamedTuple, Tuple, Union + +import numpy as np +import torch + + +class InfenceMetrics(NamedTuple): + psnr: float + shape: Tuple[int, ...] + + +def torch_inference( + model: torch.nn.Module, sample_inputs: Dict[str, List[np.ndarray]] +) -> List[np.ndarray]: + """ + Performs inference on a torch model given a set of sample inputs. + + Parameters: + model: The torch model. + sample_inputs: Map from input name to list of values for that input. + + Returns: + List of numpy array outputs, + """ + torch_outs: List[List[torch.Tensor]] = [] + input_names = sample_inputs.keys() + for i in range(len(list(sample_inputs.values())[0])): + inputs = {} + for input_name in input_names: + inputs[input_name] = torch.from_numpy(sample_inputs[input_name][i]) + with torch.no_grad(): + out = model(**inputs) + out_tuple = (out,) if isinstance(out, torch.Tensor) else out + for i, out_val in enumerate(out_tuple): + if i == len(torch_outs): + torch_outs.append([]) + torch_outs[i].append(out_val) + return [torch.cat(out_list, dim=0).numpy() for out_list in torch_outs] + + +def compute_psnr( + output_a: Union[torch.Tensor, np.ndarray], + output_b: Union[torch.Tensor, np.ndarray], + eps: float = 1e-5, + eps2: float = 1e-10, +) -> float: + """ + Computes the PSNR between two tensors. + """ + if not isinstance(output_a, np.ndarray): + a = output_a.detach().numpy().flatten() + else: + a = output_a.flatten() + if not isinstance(output_b, np.ndarray): + b = output_b.detach().numpy().flatten() + else: + b = output_b.flatten() + max_b = np.abs(b).max() + sumdeltasq = 0.0 + sumdeltasq = ((a - b) * (a - b)).sum() + sumdeltasq /= b.size + sumdeltasq = np.sqrt(sumdeltasq) + + return 20 * np.log10((max_b + eps) / (sumdeltasq + eps2)) + + +def compute_relative_error(expected: np.ndarray, actual: np.ndarray) -> np.ndarray: + assert expected.shape == actual.shape + return (np.abs(expected - actual) / (np.abs(expected) + 1e-20)).flatten() + + +def compare_psnr( + output_a: Union[torch.Tensor, np.ndarray], + output_b: Union[torch.Tensor, np.ndarray], + psnr_threshold: int, + eps: float = 1e-5, + eps2: float = 1e-10, +) -> None: + """ + Raises an error if the PSNR between two tensors is above a threshold. + """ + psnr = compute_psnr(output_a, output_b, eps, eps2) + assert psnr > psnr_threshold + + +def generate_comparison_metrics( + expected: List[np.ndarray], actual: List[np.ndarray] +) -> Dict[int, InfenceMetrics]: + """ + Compares the outputs of a model run in two different ways. + For example, expected might be run on local cpu and actual run on device. + + Parameters: + expected: List of numpy array outputs computed from a ground truth model. + actual: List of numpy array outputs computed from an experimental model. + + Returns: + A set of metrics representing how close the two sets of outputs are. + """ + metrics = {} + for i, (expected_arr, actual_arr) in enumerate(zip(expected, actual)): + metrics[i] = InfenceMetrics( + compute_psnr(expected_arr, actual_arr), expected_arr.shape + ) + return metrics diff --git a/qai_hub_models/utils/config_loaders.py b/qai_hub_models/utils/config_loaders.py new file mode 100644 index 00000000..90b50154 --- /dev/null +++ b/qai_hub_models/utils/config_loaders.py @@ -0,0 +1,784 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union + +import requests +import yaml +from qai_hub.util.session import create_session +from schema import And +from schema import Optional as OptionalSchema +from schema import Schema, SchemaError + +from qai_hub_models.utils.asset_loaders import ASSET_CONFIG, QAIHM_WEB_ASSET +from qai_hub_models.utils.base_model import TargetRuntime +from qai_hub_models.utils.path_helpers import ( + MODELS_PACKAGE_NAME, + QAIHM_PACKAGE_NAME, + get_qaihm_models_root, + get_qaihm_package_root, +) + +QAIHM_PACKAGE_ROOT = get_qaihm_package_root() +QAIHM_MODELS_ROOT = get_qaihm_models_root() +QAIHM_DIRS = [ + Path(f.path) + for f in os.scandir(QAIHM_MODELS_ROOT) + if f.is_dir() and "info.yaml" in os.listdir(f) +] +MODEL_IDS = [f.name for f in QAIHM_DIRS] + +HF_AVAILABLE_LICENSES = { + "apache-2.0", + "mit", + "openrail", + "bigscience-openrail-m", + "creativeml-openrail-m", + "bigscience-bloom-rail-1.0", + "bigcode-openrail-m", + "afl-3.0", + "artistic-2.0", + "bsl-1.0", + "bsd", + "bsd-2-clause", + "bsd-3-clause", + "bsd-3-clause-clear", + "c-uda", + "cc", + "cc0-1.0", + "cc0-2.0", + "cc-by-2.5", + "cc-by-3.0", + "cc-by-4.0", + "cc-by-sa-3.0", + "cc-by-sa-4.0", + "cc-by-nc-2.0", + "cc-by-nc-3.0", + "cc-by-nc-4.0", + "cc-by-nd-4.0", + "cc-by-nc-nd-3.0", + "cc-by-nc-nd-4.0", + "cc-by-nc-sa-2.0", + "cc-by-nc-sa-3.0", + "cc-by-nc-sa-4.0", + "cdla-sharing-1.0", + "cdla-permissive-1.0", + "cdla-permissive-2.0", + "wtfpl", + "ecl-2.0", + "epl-1.0", + "epl-2.0", + "etalab-2.0", + "agpl-3.0", + "gfdl", + "gpl", + "gpl-2.0", + "gpl-3.0", + "lgpl", + "lgpl-2.1", + "lgpl-3.0", + "isc", + "lppl-1.3c", + "ms-pl", + "mpl-2.0", + "odc-by", + "odbl", + "openrail++", + "osl-3.0", + "postgresql", + "ofl-1.1", + "ncsa", + "unlicense", + "zlib", + "pddl", + "lgpl-lr", + "deepfloyd-if-license", + "llama2", + "unknown", + "other", +} + + +class FORM_FACTOR(Enum): + PHONE = 0 + TABLET = 1 + IOT = 2 + XR = 3 + + @staticmethod + def from_string(string: str) -> "FORM_FACTOR": + return FORM_FACTOR[string.upper()] + + def __str__(self): + if self == FORM_FACTOR.IOT: + return "IoT" + return self.name.title() + + +class MODEL_DOMAIN(Enum): + COMPUTER_VISION = 0 + AUDIO = 1 + MULTIMODAL = 2 + GENERATIVE_AI = 3 + + @staticmethod + def from_string(string: str) -> "MODEL_DOMAIN": + return MODEL_DOMAIN[string.upper().replace(" ", "_")] + + def __str__(self): + return self.name.title().replace("_", " ") + + +class MODEL_TAG(Enum): + BACKBONE = 0 + REAL_TIME = 1 + FOUNDATION = 2 + QUANTIZED = 3 + LLM = 4 + GENERATIVE_AI = 5 + + @staticmethod + def from_string(string: str) -> "MODEL_TAG": + assert "_" not in string + return MODEL_TAG[string.upper().replace("-", "_")] + + def __str__(self) -> str: + return self.name.replace("_", "-").lower() + + def __repr__(self) -> str: + return self.__str__() + + +class MODEL_STATUS(Enum): + PUBLIC = 0 + PRIVATE = 1 + # proprietary models are released only internally + PROPRIETARY = 2 + + @staticmethod + def from_string(string: str) -> "MODEL_STATUS": + return MODEL_STATUS[string.upper()] + + def __str__(self): + return self.name + + +class MODEL_USE_CASE(Enum): + # Image: 100 - 199 + IMAGE_CLASSIFICATION = 100 + IMAGE_EDITING = 101 + IMAGE_GENERATION = 102 + SUPER_RESOLUTION = 103 + SEMANTIC_SEGMENTATION = 104 + # Ex: OCR, image caption + IMAGE_TO_TEXT = 105 + OBJECT_DETECTION = 106 + POSE_ESTIMATION = 107 + + # Audio: 200 - 299 + SPEECH_RECOGNITION = 200 + AUDIO_ENHANCEMENT = 201 + + # Video: 300 - 399 + VIDEO_CLASSIFICATION = 300 + VIDEO_GENERATION = 301 + + # LLM: 400 - 499 + TEXT_GENERATION = 400 + + @staticmethod + def from_string(string: str) -> "MODEL_USE_CASE": + return MODEL_USE_CASE[string.upper().replace(" ", "_")] + + def __str__(self): + return self.name.replace("_", " ").title() + + def map_to_hf_pipeline_tag(self): + """Map our usecase to pipeline-tag used by huggingface.""" + if self.name in {"IMAGE_EDITING", "SUPER_RESOLUTION"}: + return "image-to-image" + if self.name == "SEMANTIC_SEGMENTATION": + return "image-segmentation" + if self.name == "POSE_ESTIMATION": + return "image-classification" + if self.name == "AUDIO_ENHANCEMENT": + return "audio-to-audio" + if self.name == "VIDEO_GENERATION": + return "image-to-video" + if self.name == "IMAGE_GENERATION": + return "unconditional-image-generation" + if self.name == "SPEECH_RECOGNITION": + return "automatic-speech-recognition" + return self.name.replace("_", "-").lower() + + +TFLITE_PATH = "torchscript_onnx_tflite" +QNN_PATH = "torchscript_onnx_qnn" + + +class QAIHMModelPerf: + """Class to read the perf.yaml and parse it for displaying it on HuggingFace.""" + + @dataclass + class ModelRuntimePerformanceDetails: + model_name: str + device_name: str + device_os: str + runtime: TargetRuntime + inference_time_ms: int + peak_memory_bytes: Tuple[int, int] # min, max + compute_unit_counts: Dict[str, int] + + def __init__(self, perf_yaml_path, model_name): + self.model_name = model_name + self.perf_yaml_path = perf_yaml_path + self.skip_overall = False + self.skip_tflite = False + self.skip_qnn = False + self.tflite_row = ( + "| Samsung Galaxy S23 Ultra (Android 13) | Snapdragon® 8 Gen 2 |" + ) + self.qnn_row = "| Samsung Galaxy S23 Ultra (Android 13) | Snapdragon® 8 Gen 2 |" + + if os.path.exists(self.perf_yaml_path): + with open(self.perf_yaml_path, "r") as perf_file: + self.perf_details = yaml.safe_load(perf_file) + num_models = len(self.perf_details["models"]) + + # Get TFLite summary from perf.yaml + try: + self.tflite_summary = [] + for model in self.perf_details["models"]: + self.tflite_summary.append( + model["performance_metrics"][0][TFLITE_PATH] + ) + except Exception: + self.skip_tflite = True + + if not self.skip_overall and not self.skip_tflite: + for num in range(num_models): + if isinstance(self.tflite_summary[num]["inference_time"], str): + self.skip_tflite = True + + # Get QNN summary from perf.yaml + try: + self.qnn_summary = [] + for model in self.perf_details["models"]: + self.qnn_summary.append( + model["performance_metrics"][0][QNN_PATH] + ) + except Exception: + self.skip_qnn = True + if not self.skip_overall and not self.skip_qnn: + for num in range(num_models): + if isinstance(self.qnn_summary[num]["inference_time"], str): + self.skip_qnn = True + else: + self.skip_overall = True + + def _get_runtime_type(self, model_type): + if model_type == "tflite": + return "TFLite" + if model_type == "so": + return "QNN Model Library" + if model_type == "bin": + return "QNN Binary" + raise RuntimeError(f"Unsupported model_type specified {model_type}.") + + def get_row(self, skip, summary_list, initial_row, model_type, has_assets=True): + # Creating a row for performance table. + row = "" + if not skip: + names = self.get_submodel_names() + for summary, name in zip(summary_list, names): + inf_time = summary["inference_time"] + inference_time = f"{inf_time / 1000} ms" + mem_min = round( + summary["estimated_peak_memory_range"]["min"] / 1024 / 1024 + ) + mem_max = round( + summary["estimated_peak_memory_range"]["max"] / 1024 / 1024 + ) + peak_memory_range = f"{mem_min} - {mem_max} MB" + if model_type == "tflite": + self.tflite_inference_time = inference_time + self.tflite_peak_memory_range = peak_memory_range + elif model_type == "so" or model_type == "bin": + self.qnn_inference_time = inference_time + self.qnn_peak_memory_range = peak_memory_range + primary_compute_unit = summary["primary_compute_unit"] + precision = summary["precision"].upper() + base_url = ASSET_CONFIG.get_hugging_face_url(self.model_name) + # For no_assets models, only show model name and no-link + # as there is not target model to download + if has_assets: + target_model = f" [{name}.{model_type}]({base_url}/blob/main/{name}.{model_type})" + else: + target_model = name + + runtime_type = self._get_runtime_type(model_type) + row += ( + initial_row + + f" {runtime_type} | {inference_time} | {peak_memory_range} | {precision} | {primary_compute_unit} | {target_model} \n" + ) + return row + return "" + + def get_tflite_row(self): + # Get TFLite row for a submodel on a device. + return self.get_row( + self.skip_tflite, self.tflite_summary, self.tflite_row, "tflite" + ) + + def get_qnn_row(self, is_precompiled: bool = False, has_assets=True): + # Get QNN row for a submodel on a device. + return self.get_row( + self.skip_qnn, + self.qnn_summary, + self.qnn_row, + "bin" if is_precompiled else "so", + has_assets, + ) + + def body_perf(self, is_precompiled: bool = False, has_assets: bool = True): + # Combine all the rows to make the body of performance table. + if self.skip_tflite: + return self.get_qnn_row(is_precompiled, has_assets) + elif self.skip_qnn: + return self.get_tflite_row() + else: + return self.get_tflite_row() + self.get_qnn_row(is_precompiled, has_assets) + + def compute_unit_summary(self, runtime_path=TFLITE_PATH): + # Get compute unit summary for export script's output. + npu, gpu, cpu = 0, 0, 0 + cu_summary = "" + for model in self.perf_details["models"]: + layer_info = model["performance_metrics"][0][runtime_path]["layer_info"] + npu += layer_info["layers_on_npu"] + gpu += layer_info["layers_on_gpu"] + cpu += layer_info["layers_on_cpu"] + if npu > 0: + cu_summary += f"NPU ({npu})" + if gpu > 0: + cu_summary += f"GPU ({gpu})" + if cpu > 0: + cu_summary += f"CPU ({cpu})" + return cu_summary + + def get_submodel_names_and_ids(self): + # Get the names, TFLite job ids and QNN job ids. + names = self.get_submodel_names() + tflite_job_ids, qnn_job_ids = [], [] + for model in self.perf_details["models"]: + if TFLITE_PATH in model["performance_metrics"][0]: + tflite_job_ids.append( + model["performance_metrics"][0][TFLITE_PATH]["job_id"] + ) + if QNN_PATH in model["performance_metrics"][0]: + qnn_job_ids.append(model["performance_metrics"][0][QNN_PATH]["job_id"]) + return names, tflite_job_ids, qnn_job_ids + + def get_submodel_names(self): + # Get names of all the submodels. + names = [] + for model in self.perf_details["models"]: + names.append(model["name"]) + return names + + def get_perf_details( + self, + runtime: TargetRuntime, + device: str | None = None, + device_os: str | None = None, + ) -> Dict[str, ModelRuntimePerformanceDetails | None]: + """ + Get model performance details for the selected device and runtime. + + If device is None, picks the first device specified in the perf results. + + Returns a dictionary of + { model_component_name : performance details object } + + If there is only one component, model_component_name == model_name. + + The performance details object will be null if the requested + perf details do not exist, or if the perf job failed. + """ + if runtime == TargetRuntime.TFLITE: + rt_name = "torchscript_onnx_tflite" + elif runtime == TargetRuntime.QNN: + rt_name = "torchscript_onnx_qnn" + else: + raise NotImplementedError() + + # Model -> Performance Details + # None == Test did not run. + perf_details: Dict[ + str, QAIHMModelPerf.ModelRuntimePerformanceDetails | None + ] = {} + + for model in self.perf_details["models"]: + name = model["name"] + metrics = model["performance_metrics"] + for device_metrics in metrics: + device_name = device_metrics["reference_device_info"]["name"] + metric_device_os = device_metrics["reference_device_info"]["os"] + + # Verify Device Matches Requested Device + if device and device_name != device: + continue + if device_os and metric_device_os != device_os: + continue + + perf_rt = device_metrics.get(rt_name, None) + + # Inference Time + inf_time = perf_rt["inference_time"] if perf_rt else "null" + if inf_time == "null": + # Compilation or inference failed. + perf_details[name] = None + continue + inf_time /= 1000 + + # Memory + peak_mem = perf_rt["estimated_peak_memory_range"] + peak_mem_bytes: Tuple[int, int] = tuple([peak_mem["min"], peak_mem["max"]]) # type: ignore + + # Layer Info + layer_info = perf_rt["layer_info"] + compute_unit_counts = {} + for layer_name, count in layer_info.items(): + if "layers_on" in layer_name: + if count > 0: + compute_unit_counts[layer_name[-3:].upper()] = count + + perf_details[name] = QAIHMModelPerf.ModelRuntimePerformanceDetails( + model_name=model, + device_name=device_name, + device_os=metric_device_os, + runtime=runtime, + inference_time_ms=inf_time, + peak_memory_bytes=peak_mem_bytes, + compute_unit_counts=compute_unit_counts, + ) + + if name not in perf_details.keys(): + perf_details[name] = None + + return perf_details + + +class QAIHMModelInfo: + def __init__( + self, + name: str, + id: str, + status: MODEL_STATUS, + headline: str, + domain: MODEL_DOMAIN, + description: str, + use_case: MODEL_USE_CASE, + tags: List[MODEL_TAG], + research_paper: str, + research_paper_title: str, + license: str, + source_repo: str, + applicable_scenarios: List[str], + related_models: List[str], + form_factors: List[FORM_FACTOR], + has_static_banner: bool, + has_animated_banner: bool, + code_gen_config: Dict[str, str | bool], + license_type: str, + dataset: List[str], + technical_details: Dict[str, str], + ) -> None: + self.name = name + self.id = id + self.status = status + self.headline = headline + self.domain = domain + self.description = description + self.use_case = use_case + self.tags = tags + self.research_paper = research_paper + self.research_paper_title = research_paper_title + self.license = license + self.license_type = license_type + self.dataset = dataset + self.source_repo = source_repo + self.applicable_scenarios = applicable_scenarios + self.related_models = related_models + self.form_factors = form_factors + self.has_static_banner = has_static_banner + self.has_animated_banner = has_animated_banner + self.code_gen_config = code_gen_config + self.technical_details = technical_details + + def validate(self) -> Tuple[bool, Optional[str]]: + """Returns false with a reason if the info spec for this model is not valid.""" + # Validate ID + if self.id not in MODEL_IDS: + return False, f"{self.id} is not a valid QAI Hub Models ID." + if " " in self.id or "-" in self.id: + return False, "Model IDs cannot contain spaces or dashes." + if self.id.lower() != self.id: + return False, "Model IDs must be lowercase." + + # Validate (used as repo name for HF as well) + if " " in self.name: + return False, "Model Name must not have a space." + + # Headline should end with period + if not self.headline.endswith("."): + return False, "Model headlines must end with a period." + + # Quantized models must contain quantized tag + if ("quantized" in self.id) and (MODEL_TAG.QUANTIZED not in self.tags): + return False, f"Quantized models must have quantized tag. tags: {self.tags}" + if ("quantized" not in self.id) and (MODEL_TAG.QUANTIZED in self.tags): + return ( + False, + f"Models with a quantized tag must have 'quantized' in the id. tags: {self.tags}", + ) + + # Validate related models are present + for r_model in self.related_models: + if r_model not in MODEL_IDS: + return False, f"Related model {r_model} is not a valid model ID." + if r_model == self.id: + return False, f"Model {r_model} cannot be related to itself." + + # If paper is arxiv, it should be an abs link + if self.research_paper.startswith("https://arxiv.org/"): + if "/abs/" not in self.research_paper: + return ( + False, + "Arxiv links should be `abs` links, not link directly to pdfs.", + ) + + # If license_type does not match the map, return an error + if self.license_type not in HF_AVAILABLE_LICENSES: + return False, f"license can be one of these: {HF_AVAILABLE_LICENSES}" + + # Web assets exist + if self.status == MODEL_STATUS.PUBLIC and not self.has_static_banner: + return False, "All public models must have a static banner." + + # Required assets exist + if self.status == MODEL_STATUS.PUBLIC: + if not os.path.exists(self.get_package_path() / "info.yaml"): + return False, "All public models must have an info.yaml" + + if self.code_gen_config.get( + "tflite_export_failure_reason", False + ) and self.code_gen_config.get("qnn_export_failure_reason", False): + return False, "Public models must support at least one export path" + + session = create_session() + if self.has_static_banner: + static_banner_url = ASSET_CONFIG.get_web_asset_url( + self.id, QAIHM_WEB_ASSET.STATIC_IMG + ) + if session.head(static_banner_url).status_code != requests.codes.ok: + return False, f"Static banner is missing at {static_banner_url}" + if self.has_animated_banner: + animated_banner_url = ASSET_CONFIG.get_web_asset_url( + self.id, QAIHM_WEB_ASSET.ANIMATED_MOV + ) + if session.head(animated_banner_url).status_code != requests.codes.ok: + return False, f"Animated banner is missing at {animated_banner_url}" + + expected_qaihm_repo = f"qai_hub_models/models/{self.id}" + if expected_qaihm_repo != ASSET_CONFIG.get_qaihm_repo(self.id): + return False, "QAIHM repo not pointing to expected relative path" + + expected_example_use = f"qai_hub_models/models/{self.id}#example--usage" + if expected_example_use != ASSET_CONFIG.get_example_use(self.id): + return False, "Example-usage field not pointing to expected relative path" + + return True, None + + def get_package_name(self): + return f"{QAIHM_PACKAGE_NAME}.{MODELS_PACKAGE_NAME}.{self.id}" + + def get_package_path(self, root: Path = QAIHM_PACKAGE_ROOT): + return get_qaihm_models_root(root) / self.id + + def get_model_definition_path(self): + return os.path.join( + ASSET_CONFIG.get_qaihm_repo(self.id, relative=False), "model.py" + ) + + def get_demo_path(self): + return os.path.join( + ASSET_CONFIG.get_qaihm_repo(self.id, relative=False), "demo.py" + ) + + def get_info_yaml_path(self, root: Path = QAIHM_PACKAGE_ROOT): + return self.get_package_path(root) / "info.yaml" + + def get_hf_pipeline_tag(self): + return self.use_case.map_to_hf_pipeline_tag() + + def get_hugging_face_metadata(self, root: Path = QAIHM_PACKAGE_ROOT): + # Get the metadata for huggingface model cards. + hf_metadata: Dict[str, Union[str, List[str]]] = dict() + hf_metadata["library_name"] = "pytorch" + hf_metadata["license"] = self.license_type + hf_metadata["tags"] = [tag.name.lower() for tag in self.tags] + ["android"] + if self.dataset != []: + hf_metadata["datasets"] = self.dataset + hf_metadata["pipeline_tag"] = self.get_hf_pipeline_tag() + return hf_metadata + + def get_model_details(self): + # Model details. + details = ( + "- **Model Type:** " + + self.use_case.__str__().lower().capitalize() + + "\n- **Model Stats:**" + ) + for name, val in self.technical_details.items(): + details += f"\n - {name}: {val}" + return details + + def get_perf_yaml_path(self, root: Path = QAIHM_PACKAGE_ROOT): + return self.get_package_path(root) / "perf.yaml" + + def get_code_gen_yaml_path(self, root: Path = QAIHM_PACKAGE_ROOT): + return self.get_package_path(root) / "code-gen.yaml" + + def get_readme_path(self, root: Path = QAIHM_PACKAGE_ROOT): + return self.get_package_path(root) / "README.md" + + def get_hf_model_card_path(self, root: Path = QAIHM_PACKAGE_ROOT): + return self.get_package_path(root) / "HF_MODEL_CARD.md" + + def get_requirements_path(self, root: Path = QAIHM_PACKAGE_ROOT): + return self.get_package_path(root) / "requirements.txt" + + def has_model_requirements(self, root: Path = QAIHM_PACKAGE_ROOT): + return os.path.exists(self.get_requirements_path(root)) + + @staticmethod + def from_model(model_id: str): + schema_path = QAIHM_MODELS_ROOT / model_id / "info.yaml" + code_gen_path = QAIHM_MODELS_ROOT / model_id / "code-gen.yaml" + if not os.path.exists(schema_path): + raise ValueError(f"{model_id} does not exist") + return QAIHMModelInfo.from_yaml(schema_path, code_gen_path) + + @staticmethod + def from_yaml(info_path: str | Path, code_gen_path: str | Path | None = None): + # Load CFG and params + info_yaml = QAIHMModelInfo.load_info_yaml(info_path) + code_gen_config = QAIHMModelInfo.load_code_gen_yaml(code_gen_path) + return QAIHMModelInfo( + info_yaml["name"], + info_yaml["id"], + MODEL_STATUS.from_string(info_yaml["status"]), + info_yaml["headline"], + MODEL_DOMAIN.from_string(info_yaml["domain"]), + info_yaml["description"], + MODEL_USE_CASE.from_string(info_yaml["use_case"]), + [MODEL_TAG.from_string(tag) for tag in info_yaml["tags"]], + info_yaml["research_paper"], + info_yaml["research_paper_title"], + info_yaml["license"], + info_yaml["source_repo"], + info_yaml["applicable_scenarios"], + info_yaml["related_models"], + [FORM_FACTOR.from_string(ff) for ff in info_yaml["form_factors"]], + info_yaml["has_static_banner"], + info_yaml["has_animated_banner"], + code_gen_config, + info_yaml["license_type"], + info_yaml["dataset"], + info_yaml["technical_details"], + ) + + # Schema for info.yaml + INFO_YAML_SCHEMA = Schema( + { + "name": And(str), + "id": And(str), + "status": And(str), + "headline": And(str), + "domain": And(str), + "description": And(str), + "use_case": And(str), + "tags": And(lambda s: len(s) >= 0), + "research_paper": And(str), + "research_paper_title": And(str), + "license": And(str), + "source_repo": And(str), + "technical_details": And(dict), + "applicable_scenarios": And(lambda s: len(s) >= 0), + "related_models": And(lambda s: len(s) >= 0), + "form_factors": And(lambda s: len(s) >= 0), + "has_static_banner": And(bool), + "has_animated_banner": And(bool), + "license_type": And(str), + "dataset": And(list), + } + ) + + # Schema for code-gen.yaml + CODE_GEN_YAML_SCHEMA = Schema( + And( + { + OptionalSchema("has_components", default=""): str, + OptionalSchema("is_aimet", default=False): bool, + OptionalSchema("has_on_target_demo", default=False): bool, + OptionalSchema("qnn_export_failure_reason", default=""): str, + OptionalSchema("tflite_export_failure_reason", default=""): str, + OptionalSchema("has_demo", default=True): bool, + OptionalSchema("check_trace", default=True): bool, + OptionalSchema("default_profile_options", default=""): str, + OptionalSchema("default_compile_options", default=""): str, + OptionalSchema("channel_last_input", default=""): str, + OptionalSchema("channel_last_output", default=""): str, + OptionalSchema("outputs_to_skip_validation", default=[]): list, + OptionalSchema("export_test_model_kwargs", default={}): dict, + OptionalSchema("components", default={}): dict, + OptionalSchema("default_components", default=[]): list, + OptionalSchema("skip_tests", default=False): bool, + OptionalSchema("is_precompiled", default=False): bool, + OptionalSchema("no_assets", default=False): bool, + OptionalSchema("torchscript_opt", default=[]): list, + } + ) + ) + + @staticmethod + def load_info_yaml(path: str | Path): + with open(path) as f: + data = yaml.safe_load(f) + try: + # Validate high level-schema + data = QAIHMModelInfo.INFO_YAML_SCHEMA.validate(data) + except SchemaError as e: + assert 0, f"{e.code} in {path}" + return data + + @staticmethod + def load_code_gen_yaml(path: str | Path | None): + if not path or not os.path.exists(path): + return QAIHMModelInfo.CODE_GEN_YAML_SCHEMA.validate({}) # Default Schema + with open(path) as f: + data = yaml.safe_load(f) + try: + # Validate high level-schema + data = QAIHMModelInfo.CODE_GEN_YAML_SCHEMA.validate(data) + except SchemaError as e: + assert 0, f"{e.code} in {path}" + return data diff --git a/qai_hub_models/utils/display.py b/qai_hub_models/utils/display.py new file mode 100644 index 00000000..c628b0ff --- /dev/null +++ b/qai_hub_models/utils/display.py @@ -0,0 +1,98 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +from pathlib import Path +from typing import Optional + +from PIL.Image import Image +from PIL.ImageShow import IPythonViewer, _viewers # type: ignore + +ALWAYS_DISPLAY_VAR = "QAIHM_ALWAYS_DISPLAY_OUTPUT" + + +def is_running_in_notebook(): + try: + from IPython import get_ipython + + if "IPKernelApp" not in get_ipython().config: # pragma: no cover + return False + except ImportError: + return False + except AttributeError: + return False + return True + + +def save_image(image: Image, base_dir: str, filename: str, desc: str): + os.makedirs(base_dir, exist_ok=True) + filename = os.path.join(base_dir, filename) + image.save(filename) + print(f"Saving {desc} to {filename}") + + +def display_image(image: Image, desc: str = "image") -> bool: + """ + Attempt to display image. + Return true if displaying was attempted without exceptions. + """ + # Display IPython viewer first + # Remote server notebooks will be caught here as well + if is_running_in_notebook(): + for viewer in _viewers: + if isinstance(viewer, IPythonViewer): + viewer.show(image) + return True + + try: + if os.environ.get(ALWAYS_DISPLAY_VAR) == "1" or not ( + os.environ.get("SSH_TTY") or os.environ.get("SSH_CLIENT") + ): + print(f"Displaying {desc}") + image.show() + return True + else: + print( + "\nDemo image display is disabled by default for remote servers. " + f"To override, set `{ALWAYS_DISPLAY_VAR}=1` in your environment.\n" + ) + except Exception: + print("Failure to display demo images displayed on screen.") + print( + "If you are using a notebook environment like Jupyter/Collab, please use %run -m to run the script instead of python -m." + ) + return False + + +def display_or_save_image( + image: Image, + output_dir: Optional[str] = None, + filename: str = "image.png", + desc: str = "image", +) -> bool: + """ + If output_dir is set, save image to disk and return. + Else try to display image. + If displaying image fails, save to disk in a default location. + + Parameters: + image: PIL Image to save. + output_dir: If set, saves image to this directory. + filename: If saving to directory, the filename to use. + desc: Description of what the image is, used in a print statement. + + Returns: + True if displaying was attempted. + """ + if output_dir is not None: + save_image(image, output_dir, filename, desc) + return False + + if display_image(image, desc): + return True + + save_image(image, str(Path.cwd() / "build"), filename, desc) + return False diff --git a/qai_hub_models/utils/draw.py b/qai_hub_models/utils/draw.py new file mode 100644 index 00000000..9352e7ef --- /dev/null +++ b/qai_hub_models/utils/draw.py @@ -0,0 +1,223 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import List, Optional, Tuple + +import cv2 +import numpy +import torch + + +def draw_points( + frame: numpy.ndarray, + points: numpy.ndarray | torch.Tensor, + color: Tuple[int, int, int] = (0, 0, 0), + size: int = 3, +): + """ + Draw the given points on the frame. + + Parameters: + frame: numpy.ndarray + numpy array (H W C x uint8, BGR) + + points: numpy.ndarray | torch.Tensor + array (N, 2) where layout is + [x1, y1] [x2, y2], ... + or + array (N * 2,) where layout is + x1, y1, x2, y2, ... + + color: Tuple[int, int, int] + Color of drawn points (RGB) + + size: int + Size of drawn points + + Returns: + None; modifies frame in place. + """ + n2 = len(points.shape) == 2 + for i in range(0, len(points) if n2 else len(points) // 2): + x, y = points[i] if n2 else (points[i * 2], points[i * 2 + 1]) + cv2.circle(frame, (int(x), int(y)), size, color, thickness=size) + + +def draw_connections( + frame: numpy.ndarray, + points: numpy.ndarray | torch.Tensor, + connections: List[Tuple[int, int]], + color: Tuple[int, int, int] = (0, 0, 0), + size: int = 3, +): + """ + Draw connecting lines between the given points on the frame. + + Parameters: + frame: numpy.ndarray + numpy array (H W C x uint8, BGR) + + points: numpy.ndarray | torch.Tensor + array (N, 2) where layout is + [x1, y1] [x2, y2], ... + or + array (N * 2,) where layout is + x1, y1, x2, y2, ... + + connections: List[Tuple[int, int]] + List of points that should be connected by a line. + Format is [(src point index, dst point index), ...] + + color: Tuple[int, int, int] + Color of drawn points (RGB) + + size: int + Size of drawn connection lines + + Returns: + None; modifies frame in place. + """ + n2 = len(points.shape) == 2 + for connection in connections: + x0, y0 = ( + points[connection[0]] + if n2 + else (points[connection[0] * 2], points[connection[0] * 2 + 1]) + ) + x1, y1 = ( + points[connection[1]] + if n2 + else (points[connection[1] * 2], points[connection[1] * 2 + 1]) + ) + x0, y0 = int(x0), int(y0) + x1, y1 = int(x1), int(y1) + cv2.line(frame, (x0, y0), (x1, y1), color, size) + + +def draw_box_from_corners( + frame: numpy.ndarray, corners: numpy.ndarray | torch.Tensor, color=(0, 0, 0), size=3 +): + """ + Draw a box using the 4 points provided as boundaries. + + Parameters: + frame: numpy.ndarray + numpy array (H W C x uint8, BGR) + + corners: numpy.ndarray | torch.Tensor + array (4, 2) where layout is + [x1, y1] [x2, y2], ... + or + array (8) where layout is + x1, y1, x2, y2 + + color: Tuple[int, int, int] + Color of drawn points and connection lines (BGR) + + size: int + Size of drawn points and connection lines + + Returns: + None; modifies frame in place. + """ + draw_points(frame, corners, color, size) + draw_connections(frame, corners, [(0, 1), (0, 2), (1, 3), (2, 3)], color, size) + + +def draw_box_from_xywh( + frame: numpy.ndarray, + box: numpy.ndarray | torch.Tensor, + color: Tuple[int, int, int] = (0, 0, 0), + size: int = 3, +): + """ + Draw a box using the provided data (center / height / width) to compute the box. + + Parameters: + frame: numpy.ndarray + numpy array (H W C x uint8, BGR) + + box: numpy.ndarray | torch.Tensor + array (4), where layout is + [xcenter, ycenter, h, w] + + color: Tuple[int, int, int] + Color of drawn points and connection lines (RGB) + + size: int + Size of drawn points and connection lines + + Returns: + None; modifies frame in place. + """ + xc, yc, h, w = box + TL = [xc - w // 2, yc - h // 2] + BR = [xc + w // 2, yc + h // 2] + cv2.rectangle(frame, TL, BR, color, size) + + +def draw_box_from_xyxy( + frame: numpy.ndarray, + top_left: numpy.ndarray | torch.Tensor | Tuple[int, int], + bottom_right: numpy.ndarray | torch.Tensor | Tuple[int, int], + color: Tuple[int, int, int] = (0, 0, 0), + size: int = 3, + text: Optional[str] = None, +): + """ + Draw a box using the provided top left / bottom right points to compute the box. + + Parameters: + frame: numpy.ndarray + numpy array (H W C x uint8, BGR) + + box: numpy.ndarray | torch.Tensor + array (4), where layout is + [xc, yc, h, w] + + color: Tuple[int, int, int] + Color of drawn points and connection lines (RGB) + + size: int + Size of drawn points and connection lines BGR channel layout + + text: None | str + Overlay text at the top of the box. + + Returns: + None; modifies frame in place. + """ + if not isinstance(top_left, tuple): + top_left = (int(top_left[0].item()), int(top_left[1].item())) + if not isinstance(bottom_right, tuple): + bottom_right = (int(bottom_right[0].item()), int(bottom_right[1].item())) + cv2.rectangle(frame, top_left, bottom_right, color, size) + if text is not None: + cv2.putText( + frame, + text, + (top_left[0], top_left[1] - 10), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, + color, + size, + ) + + +def create_color_map(num_classes): + """ + Assign a random color to each class in the dataset to produce a segmentation mask for drawing. + + Inputs: + num_classes: Number of colors to produce. + + Returns: + A list of `num_classes` colors in RGB format. + """ + numpy.random.seed(42) # For reproducible results + color_map = numpy.random.randint(0, 256, size=(num_classes, 3), dtype=numpy.uint8) + color_map[0] = [0, 0, 0] # Background class, usually black + return color_map diff --git a/qai_hub_models/utils/huggingface.py b/qai_hub_models/utils/huggingface.py new file mode 100644 index 00000000..d278d95c --- /dev/null +++ b/qai_hub_models/utils/huggingface.py @@ -0,0 +1,47 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +from pathlib import Path +from typing import List + +from huggingface_hub import HfFileSystem, hf_hub_download + +from qai_hub_models.utils.asset_loaders import ASSET_CONFIG, ModelZooAssetConfig +from qai_hub_models.utils.base_model import TargetRuntime + + +def fetch_huggingface_target_model( + model_name: str, + dst_folder: str | Path, + runtime_path: TargetRuntime = TargetRuntime.TFLITE, + config: ModelZooAssetConfig = ASSET_CONFIG, +) -> List[str]: + fs = HfFileSystem() + hf_path = config.get_huggingface_path(model_name) + + if runtime_path == TargetRuntime.TFLITE: + file_types = ["tflite"] + elif runtime_path == TargetRuntime.QNN: + file_types = ["so", "bin"] + else: + raise NotImplementedError() + + files = [] + for file_type in file_types: + files += fs.glob(os.path.join(hf_path, f"**/*.{file_type}")) + if not files: + raise FileNotFoundError( + f"No compiled assets are available on Huggingface for {model_name} with runtime {runtime_path.name}." + ) + + os.makedirs(dst_folder, exist_ok=True) + paths = [] + for file in files: + path = hf_hub_download(hf_path, file[len(hf_path) + 1 :], local_dir=dst_folder) + paths.append(path) + + return paths diff --git a/qai_hub_models/utils/image_processing.py b/qai_hub_models/utils/image_processing.py new file mode 100644 index 00000000..c1deebf4 --- /dev/null +++ b/qai_hub_models/utils/image_processing.py @@ -0,0 +1,344 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Callable, List, Tuple + +import cv2 +import numpy as np +import torch +import torchvision.transforms as transforms +from PIL.Image import Image +from PIL.Image import fromarray as ImageFromArray +from torch.nn.functional import interpolate, pad +from torchvision import transforms + + +def app_to_net_image_inputs( + pixel_values_or_image: torch.Tensor | np.ndarray | Image | List[Image], +) -> Tuple[List[np.ndarray], torch.Tensor]: + """ + Convert the provided images to application inputs. + ~~This does not change channel order. RGB stays RGB, BGR stays BGR, etc~~ + + Parameters: + pixel_values_or_image: torch.Tensor + PIL image + or + list of PIL images + or + numpy array (H W C x uint8) or (N H W C x uint8) -- both BGR or grayscale channel layout + or + pyTorch tensor (N C H W x fp32, value range is [0, 1]), BGR or grayscale channel layout + + dst_size: (height, width) + Size to which the image should be reshaped. + + Returns: + NHWC_int_numpy_frames: List[numpy.ndarray] + List of numpy arrays (one per input image with uint8 dtype, [H W C] shape, and BGR or grayscale layout. + This output is typically used for use of drawing/displaying images with PIL and CV2 + + NCHW_fp32_torch_frames: torch.Tensor + Tensor of images in fp32 (range 0:1), with shape [Batch, Channels, Height, Width], and BGR or grayscale layout. + + Based on https://github.com/zmurez/MediaPipePyTorch/blob/master/blazebase.py + """ + NHWC_int_numpy_frames: List[np.ndarray] = [] + NCHW_fp32_torch_frames: torch.Tensor + if isinstance(pixel_values_or_image, Image): + pixel_values_or_image = [pixel_values_or_image] + if isinstance(pixel_values_or_image, list): + fp32_frames = [] + for image in pixel_values_or_image: + NHWC_int_numpy_frames.append(np.array(image.convert("RGB"))) + fp32_frames.append(preprocess_PIL_image(image)) + NCHW_fp32_torch_frames = torch.cat(fp32_frames) + elif isinstance(pixel_values_or_image, torch.Tensor): + NCHW_fp32_torch_frames = pixel_values_or_image + for b_img in pixel_values_or_image: + NHWC_int_numpy_frames.append((b_img.permute(1, 2, 0) * 255).byte().numpy()) + else: + assert isinstance(pixel_values_or_image, np.ndarray) + NHWC_int_numpy_frames = ( + [pixel_values_or_image] + if len(pixel_values_or_image.shape) == 3 + else [x for x in pixel_values_or_image] + ) + NCHW_fp32_torch_frames = numpy_image_to_torch(pixel_values_or_image) + + return NHWC_int_numpy_frames, NCHW_fp32_torch_frames + + +def preprocess_PIL_image(image: Image) -> torch.Tensor: + """Convert a PIL image into a pyTorch tensor with range [0, 1] and shape NCHW.""" + transform = transforms.Compose([transforms.PILToTensor()]) # bgr image + img: torch.Tensor = transform(image) # type: ignore + img = img.float().unsqueeze(0) / 255.0 # int 0 - 255 to float 0.0 - 1.0 + return img + + +def preprocess_PIL_image_mask(image_mask: Image) -> torch.Tensor: + """Convert a PIL mask image into a pyTorch tensor with values 0. or 1.""" + transform = transforms.Compose([transforms.PILToTensor()]) + mask = transform(image_mask.convert("L")) + mask = mask.unsqueeze(0).float() + mask = (mask > 1.0) * 1.0 + return mask + + +def numpy_image_to_torch(image: np.ndarray) -> torch.Tensor: + """Convert a Numpy image (dtype uint8, shape [H W C] or [N H W C]) into a pyTorch tensor with range [0, 1] and shape NCHW.""" + image_torch = torch.from_numpy(image) + if len(image.shape) == 3: + image_torch = image_torch.unsqueeze(0) + return image_torch.permute(0, 3, 1, 2).float() / 255.0 + + +def torch_tensor_to_PIL_image(data: torch.Tensor) -> Image: + """ + Convert a Torch tensor (dtype float32) with range [0, 1] and shape CHW into PIL image CHW + """ + out = torch.clip(data, min=0.0, max=1.0) + np_out = (out.permute(1, 2, 0).detach().numpy() * 255).astype(np.uint8) + return ImageFromArray(np_out) + + +def normalize_image_transform() -> Callable: + """ + Returns a torchvision transform that returns a torch tensor normalized according to some constants. + + There are many PyTorch models that expect input images normalized with + these specific constants, so this utility can be re-used across many models. + """ + return transforms.Compose( + [ + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] + ) + + +def pad_to_square(frame: np.ndarray) -> np.ndarray: + """ + Pad an image or video frame to square dimensions with whitespace. + Assumes the input shape is of format (H, W, C). + """ + h, w, _ = frame.shape + if h < w: + top_pad = (w - h) // 2 + pad_values = ((top_pad, w - h - top_pad), (0, 0), (0, 0)) + else: + top_pad = (h - w) // 2 + pad_values = ((0, 0), (top_pad, h - w - top_pad), (0, 0)) + return np.pad(frame, pad_values, constant_values=255) + + +def resize_pad(image: torch.Tensor, dst_size: Tuple[int, int]): + """ + Resize and pad image to be shape [..., dst_size[0], dst_size[1]] + + Parameters: + image: (..., H, W) + Image to reshape. + + dst_size: (height, width) + Size to which the image should be reshaped. + + Returns: + rescaled_padded_image: torch.Tensor (..., dst_size[0], dst_size[1]) + scale: scale factor between original image and dst_size image, (w, h) + pad: pixels of padding added to the rescaled image: (left_padding, top_padding) + + Based on https://github.com/zmurez/MediaPipePyTorch/blob/master/blazebase.py + """ + height, width = image.shape[-2:] + dst_frame_height, dst_frame_width = dst_size + + h_ratio = dst_frame_height / height + w_ratio = dst_frame_width / width + if width * h_ratio > dst_frame_height: + scale = w_ratio + else: + scale = h_ratio + + import math + + new_height = math.floor(height * scale) + new_width = math.floor(width * scale) + pad_h = dst_frame_height - new_height + pad_w = dst_frame_width - new_width + + pad_top = int(pad_h // 2) + pad_bottom = int(pad_h // 2 + pad_h % 2) + pad_left = int(pad_w // 2) + pad_right = int(pad_w // 2 + pad_w % 2) + + rescaled_image = interpolate( + image, size=[int(new_height), int(new_width)], mode="bilinear" + ) + rescaled_padded_image = pad( + rescaled_image, (pad_left, pad_right, pad_top, pad_bottom) + ) + padding = (pad_left, pad_top) + + return rescaled_padded_image, scale, padding + + +def undo_resize_pad( + image: torch.Tensor, orig_size_wh: Tuple[int, int], padding: Tuple[int, int] +): + """ + Undos the efffect of resize_pad. Instead of scale, the original size + (in order width, height) is provided to prevent an off-by-one size. + """ + width, height = orig_size_wh + cropped_image = image[ + ..., padding[1] : padding[1] + height, padding[0] : padding[0] + width + ] + + rescaled_image = interpolate(cropped_image, size=[height, width], mode="bilinear") + + return rescaled_image + + +def pil_resize_pad( + image: Image, dst_size: Tuple[int, int] +) -> Tuple[Image, float, Tuple[int, int]]: + torch_image = preprocess_PIL_image(image) + torch_out_image, scale, padding = resize_pad( + torch_image, + dst_size, + ) + pil_out_image = torch_tensor_to_PIL_image(torch_out_image[0]) + return (pil_out_image, scale, padding) + + +def pil_undo_resize_pad( + image: Image, orig_size_wh: Tuple[int, int], padding: Tuple[int, int] +) -> Image: + torch_image = preprocess_PIL_image(image) + torch_out_image = undo_resize_pad(torch_image, orig_size_wh, padding) + pil_out_image = torch_tensor_to_PIL_image(torch_out_image[0]) + return pil_out_image + + +def denormalize_coordinates( + coordinates: torch.Tensor, + input_img_size: Tuple[int, int], + scale: float = 1.0, + pad: Tuple[int, int] = (0, 0), +) -> None: + """ + Maps detection coordinates from [0,1] to coordinates in the original image. + + This function can be exported and run inside inference frameworks if desired. + + Note: If included in the model, this code is likely to be unfriendly to quantization. + This is because of the high range and variability of the output tensor. + + For best quantization accuracy, this code should be run separately from the model, + or the model should de-quantize activations before running these layers. + + Inputs: + coordinates: [..., 2] tensor + coordinates. Range must be [0, 1] + + input_img_size: Tuple(int, int) + The size of the tensor that was fed to the NETWORK (NOT the original image size). + H / W is the same order as coordinates. + + scale: float + Scale factor that to resize the image to be fed to the network. + + pad: Tuple(int, int) + Padding used during resizing of input image to network input tensor. + This is the absolute # of padding pixels in the network input tensor, NOT in the original image. + H / W is in the same order as coordinates. + + Outputs: + coordinates: [..., m] tensor, where m is always (y0, x0) + The absolute coordinates of the box in the original image. + The "coordinates" input is modified in place. + """ + img_0, img_1 = input_img_size + pad_0, pad_1 = pad + + coordinates[..., 0] = ((coordinates[..., 0] * img_0 - pad_0) / scale).int() + coordinates[..., 1] = ((coordinates[..., 1] * img_1 - pad_1) / scale).int() + + +def apply_batched_affines_to_frame( + frame: np.ndarray, affines: List[np.ndarray], output_image_size: Tuple[int, int] +) -> np.ndarray: + """ + Generate one image per affine applied to the given frame. + I/O is numpy since this uses cv2 APIs under the hood. + + Inputs: + frame: np.ndarray + Frame on which to apply the affine. Shape is [ H W C ], dtype must be np.byte. + affines: List[np.ndarray] + List of 2x3 affine matrices to apply to the frame. + output_image_size: torch.Tensor + Size of each output frame. + + Outputs: + images: np.ndarray + Computed images. Shape is [B H W C] + """ + assert ( + frame.dtype == np.byte or frame.dtype == np.uint8 + ) # cv2 does not work correctly otherwise. Don't remove this assertion. + imgs = [] + for affine in affines: + img = cv2.warpAffine(frame, affine, output_image_size) + imgs.append(img) + return np.stack(imgs) + + +def apply_affine_to_coordinates( + coordinates: torch.Tensor, affine: torch.Tensor +) -> torch.Tensor: + """ + Apply the given affine matrix to the given coordinates. + + Inputs: + coordinates: torch.Tensor + Coordinates on which to apply the affine. Shape is [ ..., 2 ], where 2 == [X, Y] + affines: torch.Tensor + Affine matrix to apply to the coordinates. + + Outputs: + Transformed coordinates. Shape is [ ..., 2 ], where 2 == [X, Y] + """ + return (affine[:, :2] @ coordinates.T + affine[:, 2:]).T + + +def compute_vector_rotation( + vec_start: torch.Tensor, + vec_end: torch.Tensor, + offset_rads: float | torch.Tensor = 0, +) -> torch.Tensor: + """ + From the given vector, compute the rotation of the vector with added offset. + + Inputs: + vec_start: torch.Tensor + Starting point of the vector. Shape is [B, 2], where 2 == (x, y) + vec_end: torch.Tensor + Ending point of the vector. Shape is [B, 2], where 2 == (x, y) + offset_rads: float | torch.Tensor + Offset to subtract from the rotation calculation. + Can be size [1] or [ Batch ] + + Outputs: + theta: computed rotation angle in radians. Shape is [Batch] + """ + return ( + torch.atan2( + vec_start[..., 1] - vec_end[..., 1], vec_start[..., 0] - vec_end[..., 0] + ) + - offset_rads + ) diff --git a/qai_hub_models/utils/inference.py b/qai_hub_models/utils/inference.py new file mode 100644 index 00000000..ae5d9f6f --- /dev/null +++ b/qai_hub_models/utils/inference.py @@ -0,0 +1,289 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +import tempfile +from typing import List, Tuple + +import numpy as np +import qai_hub as hub +import torch +from qai_hub.public_rest_api import DatasetEntries + +from qai_hub_models.utils.base_model import BaseModel, SourceModelFormat, TargetRuntime +from qai_hub_models.utils.input_spec import InputSpec +from qai_hub_models.utils.qai_hub_helpers import ( + transpose_channel_first_to_last, + transpose_channel_last_to_first, +) +from qai_hub_models.utils.qnn_helpers import is_qnn_hub_model + +try: + from qai_hub_models.utils.quantization_aimet import AIMETQuantizableMixin +except NotImplementedError: + AIMETQuantizableMixin = None # type: ignore + + +def prepare_compile_zoo_model_to_hub( + model: BaseModel, + source_model_format: SourceModelFormat, + target_runtime: TargetRuntime, + output_path: str = "", + input_spec: InputSpec | None = None, + check_trace: bool = True, + prepare_compile_options_only: bool = False, +) -> Tuple[str | None, str]: + """ + Args: + + - (source_model_format, target_runtime): One of the followings + + (1) (ONNX, QNN) + + (a) For fp32 model, torch -> onnx -> qnn. + + (b) For AIMET, torch -> onnx + aimet encodings -> qnn + + (2) (ONNX, TFLITE) + + (a) For fp32, torch (fp32) -> onnx -> tflite, + + (b) For quantized, torch(AIMET) -> onnx + aimet .encodings -> tflite + (via qnn-onnx-converter). + + (3) (TORCHSCRIPT, TFLITE) + + (a) Fp32: Invalid option for model not subclass of AIMETQuantizableMixin + + (b) For AIMETQuantizableMixin subclass, torch(AIMET) -> + torchscript with embedded quantizer -> tflite + + (4) (TORCHSCRIPT, QNN) + + (a) For fp32, torch -> qnn (via qnn-torch-converter, aka + --use_qnn_pytorch_converter flag in Hub) + + (b) For AIMETQuantizableMixin subclass, torch(AIMET) -> + torchscript with embedded quantizer -> qnn (via + qnn-pytorch-converter) + + Returns: + + Path to source model that can be used directly with hub.upload_model or + hub.submit_compile_job. + """ + is_aimet = AIMETQuantizableMixin is not None and isinstance( + model, AIMETQuantizableMixin + ) + + model_name = model.__class__.__name__ + + compilation_options = model.get_hub_compile_options(target_runtime) + + if is_aimet: + if source_model_format == SourceModelFormat.ONNX: + + def export_model_func(): + return model.convert_to_onnx_and_aimet_encodings( + output_path, model_name=model_name + ) + + elif ( + source_model_format == SourceModelFormat.TORCHSCRIPT + and target_runtime == TargetRuntime.TFLITE + ): + + def export_model_func(): + traced_model = model.convert_to_quantized_torchscript( + input_spec=input_spec, check_trace=check_trace + ) + model_path = os.path.join(output_path, model_name + ".pt") + os.makedirs(output_path, exist_ok=True) + torch.jit.save(traced_model, model_path) + return model_path + + else: # Torchscript and QNN + + def export_model_func(): + exported_model = model.convert_to_torchscript_and_aimet_encodings( # type: ignore + output_path, + model_name=model_name, + input_spec=input_spec, + ) + return exported_model + + else: # fp32 + + def export_model_func(): + traced_model = model.convert_to_torchscript( + input_spec=input_spec, check_trace=check_trace + ) + model_path = os.path.join(output_path, model_name + ".pt") + os.makedirs(output_path, exist_ok=True) + torch.jit.save(traced_model, model_path) + return model_path + + if ( + target_runtime == TargetRuntime.TFLITE + and source_model_format == SourceModelFormat.ONNX + ): + pass # default is good + + if prepare_compile_options_only: + return None, compilation_options + else: + return export_model_func(), compilation_options + + +def compile_zoo_model_to_hub( + model: BaseModel, + device: hub.Device, + source_model_format: SourceModelFormat, + target_runtime: TargetRuntime, + calibration_data: DatasetEntries | None = None, + input_spec: InputSpec | None = None, + inference_options: str = "", + check_trace: bool = True, +) -> HubModel: + """ + Similar to `prepare_compile_zoo_model_to_hub`, but also performs the + compilation on AI Hub and construct a HubModel object. + """ + + if input_spec is None: + input_spec = model.get_input_spec() + + model_name = model.__class__.__name__ + + with tempfile.TemporaryDirectory() as tmp_dir: + assert tmp_dir is not None + source_model, compilation_options = prepare_compile_zoo_model_to_hub( + model=model, + source_model_format=source_model_format, + target_runtime=target_runtime, + output_path=tmp_dir, + check_trace=check_trace, + ) + + compile_job = hub.submit_compile_job( + model=source_model, + input_specs=input_spec, + device=device, + name=f"{model_name}_{source_model_format.name}_{target_runtime.name}", + options=compilation_options, + calibration_data=calibration_data, + ) + assert isinstance(compile_job, hub.CompileJob) + if not compile_job.wait().success: + job_msg = compile_job.get_status().message or "(no job failure message)" + raise ValueError(f"Compile job {compile_job} failed: {job_msg}") + + hub_model = compile_job.get_target_model() + assert hub_model is not None + input_names = list(model.get_input_spec().keys()) + return HubModel( + hub_model, + input_names, + device, + inference_options=inference_options, + ) + + +class HubModel: + """ + Class that behaves like a pytorch model except when called, it runs an + inference job on hub and returns a torch output. + + Intended to be passed as in input to app.py to run an app on-device. + + Parameters: + input_names: List of input names to the model. + device: Device on which to execute inference. + hub_model_id: ID of Model stored in hub that will be used to run inference. + model: If hub_model_id is absent, this model is compiled and used for inference. + + Returns: + Callable that mimics the I/O of a torch model and evaluates inference on device. + """ + + def __init__( + self, + model: hub.Model, + input_names: List[str], + device: hub.Device, + inference_options: str = "", + ): + self.model = model + self.input_names = input_names + self.device = device + self.inference_options = inference_options + + def __call__( + self, + *input_tensors: torch.Tensor + | List[torch.Tensor] + | hub.Dataset + | DatasetEntries, + ) -> torch.Tensor | Tuple[torch.Tensor, ...]: + inputs: hub.Dataset | DatasetEntries + if len(input_tensors) == 1 and isinstance(input_tensors[0], hub.Dataset): + inputs = input_tensors[0] + else: + # Upload dataset + inputs = {} + for name, tensor in zip(self.input_names, input_tensors): + if isinstance(tensor, (list, tuple)): + inputs[name] = [t.detach().numpy() for t in tensor] # type: ignore + else: + inputs[name] = [tensor.detach().numpy()] # type: ignore + target_runtime = ( + TargetRuntime.QNN if is_qnn_hub_model(self.model) else TargetRuntime.TFLITE + ) + + channel_last_input, channel_last_output = "", "" + if self.model.producer is not None: + model_options = self.model.producer.options.strip().split() + for option_num in range(len(model_options)): + if model_options[option_num] == "--force_channel_last_input": + channel_last_input = model_options[option_num + 1] + if model_options[option_num] == "--force_channel_last_output": + channel_last_output = model_options[option_num + 1] + if channel_last_input != "": + inputs = transpose_channel_first_to_last( + channel_last_input, inputs, target_runtime + ) + + inference_job = hub.submit_inference_job( + model=self.model, + inputs=inputs, + device=self.device, + name=f"{self.model.name}_demo_inference", + options=self.inference_options, + ) + assert isinstance(inference_job, hub.InferenceJob) + if not inference_job.wait().success: + job_msg = inference_job.get_status().message or "(no job failure message)" + raise ValueError(f"Inference job {inference_job} failed: {job_msg}") + + output_ds_handle = inference_job.get_output_dataset() + assert output_ds_handle is not None + output_dataset = output_ds_handle.download() + + if channel_last_output != "": + output_dataset = transpose_channel_last_to_first( + channel_last_output, + output_dataset, # type: ignore + target_runtime, + ) # type: ignore + + output_torch = [ + torch.from_numpy(np.concatenate(outputs, axis=0)) + for outputs in output_dataset.values() # type: ignore + ] + + if len(output_torch) == 1: + return output_torch[0] + return tuple(output_torch) diff --git a/qai_hub_models/utils/input_spec.py b/qai_hub_models/utils/input_spec.py new file mode 100644 index 00000000..0944724f --- /dev/null +++ b/qai_hub_models/utils/input_spec.py @@ -0,0 +1,36 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from typing import Dict, List, Optional, Tuple + +import torch + +# PyTorch trace doesn't capture the input specs. Hence we need an additional +# InputSpec (name -> (shape, type)) when submitting profiling job to Qualcomm AI Hub. +# This is a subtype of qai_hub.InputSpecs +InputSpec = Dict[str, Tuple[Tuple[int, ...], str]] + + +def str_to_torch_dtype(s): + return dict( + int32=torch.int32, + float32=torch.float32, + )[s] + + +def make_torch_inputs(spec: InputSpec, seed: Optional[int] = 42) -> List[torch.Tensor]: + """Make sample torch inputs from input spec""" + torch_input = [] + generator = None + if seed is not None: + generator = torch.Generator() + generator.manual_seed(seed) + for sp in spec.values(): + torch_dtype = str_to_torch_dtype(sp[1]) + if sp[1] in {"int32"}: + t = torch.randint(10, sp[0], generator=generator).to(torch_dtype) + else: + t = torch.rand(sp[0], generator=generator).to(torch_dtype) + torch_input.append(t) + return torch_input diff --git a/qai_hub_models/utils/measurement.py b/qai_hub_models/utils/measurement.py new file mode 100644 index 00000000..cf51c776 --- /dev/null +++ b/qai_hub_models/utils/measurement.py @@ -0,0 +1,135 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +import tempfile +from pathlib import Path +from typing import List, Union + +import numpy as np +import qai_hub as hub + + +def display_with_sig_figs(num: float, num_sig_figs: int = 3) -> str: + """ + Displays the given number as a string with the appropriate number of + significant figures. Example: + display_with_sig_figs(1234.2, num_sig_figs=3) -> "1230" + Parameters: + num: Number to display. + num_sig_figs: How many sig figs to use. + """ + rounded_num = float(f"{num:.{num_sig_figs}g}") + num_digits = len(str(int(rounded_num))) + + # Only display as many numbers after the decimal point to fit number of sig figs + return f"{rounded_num:.{max(0, num_sig_figs - num_digits)}f}" + + +def get_formatted_size(size: float, units: List[str], unit_step_size: float) -> str: + """ + Formats the number according to the units provided. For example: + format_size(3600, units=["B", "KB", ...], unit_step_size=1024.0) + would return "3.6KB" + Parameters: + num: Raw count of size. + units: A list of increasing unit sizes (e.g. ["B", "KB", ...]) + unit_step_size: The ratio in size between successive units. + """ + + unit_index = 0 + + while size >= unit_step_size and unit_index < len(units) - 1: + size /= unit_step_size + unit_index += 1 + + return f"{display_with_sig_figs(size)}{units[unit_index]}" + + +def get_checkpoint_file_size(model_path: str, as_str: bool = True) -> Union[str, int]: + """ + Computes how much memory the model checkpoint consumes. + Parameters: + model_path: Path to the model checkpoint file. + as_str: Whether to return the result as an int or a string formatted to 2 sig figs. + """ + num_bytes = os.path.getsize(model_path) + + if not (as_str): + return num_bytes + + return get_formatted_size(num_bytes, [" B", " KB", " MB", " GB", " TB"], 1024.0) + + +def get_tflite_unique_parameters( + model_path: str, as_str: bool = True +) -> Union[str, int]: + """ + TFLite parameters are defined at two levels: Tensors and Buffers + + Only tensors can tell us how many parameters, but we do not want to over-count + tensors that point to the same buffers. So, we keep track of all buffers + we have counted through tensors. + """ + from tensorflow.lite.python import schema_py_generated as schema_fb + + with open(model_path, "rb") as f: + tflite_model = f.read() + model_obj = schema_fb.Model.GetRootAsModel(tflite_model, 0) + model = schema_fb.ModelT.InitFromObj(model_obj) + + parameter_cnt = 0 + buffers_counted = set() + for graph in model.subgraphs: + for tensor in graph.tensors: + buf_index = tensor.buffer + + buffer = model.buffers[buf_index] + if buffer.data is not None: + if buf_index not in buffers_counted: + parameter_cnt += np.prod(tensor.shape) + buffers_counted.add(buf_index) + + if not as_str: + return parameter_cnt + + return get_formatted_size(parameter_cnt, ["", "K", "M", "B", "T"], 1000.0) + + +def get_model_size_mb(hub_model: hub.Model) -> float: + """Return target model size in MB. This is a special case for ease of + testing""" + assert hub_model is not None + with tempfile.TemporaryDirectory() as tmp_dir: + download_path = Path(tmp_dir) / "model" + # Download the model into the temporary directory + hub_model.download(download_path) # type: ignore + size_mb = get_disk_size(download_path, unit="MB") + return size_mb + + +def get_disk_size(path: str | Path, unit: str = "byte") -> float: + """ + Returns file or directory size in `unit` + + Args: + - unit: One of ["byte", "MB"] + """ + if os.path.isdir(path): + # Traverse the directory and add up the file sizes. + total_size = 0 + for dirpath, dirnames, filenames in os.walk(path): + for f in filenames: + fp = os.path.join(dirpath, f) + # skip if it is symbolic link + if not os.path.islink(fp): + total_size += os.path.getsize(fp) + else: + total_size = os.path.getsize(path) + + if unit == "MB": + total_size /= 2**20 # type: ignore + return total_size diff --git a/qai_hub_models/utils/model_adapters.py b/qai_hub_models/utils/model_adapters.py new file mode 100644 index 00000000..73cbb30d --- /dev/null +++ b/qai_hub_models/utils/model_adapters.py @@ -0,0 +1,42 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Tuple + +import numpy as np +import torch + + +def flatten(obj): + """Flatten nested list or tuple""" + tgt_type = (list, tuple) # targeted types + flattened_list = [] + for item in obj: + if isinstance(item, tgt_type): + flattened_list.extend(flatten(item, tgt_type)) + else: + flattened_list.append(item) + return flattened_list + + +class TorchNumpyAdapter: + def __init__(self, base_model: torch.jit.ScriptModule | torch.nn.Module): + """ + Wraps torch models to use numpy input / outputs + """ + assert isinstance(base_model, (torch.jit.ScriptModule, torch.nn.Module)) + self.base_model = base_model + + def __call__(self, *args) -> Tuple[np.ndarray, ...]: + input_data = tuple(torch.from_numpy(t) for t in args) + res = self.base_model(*input_data) + if isinstance(res, torch.Tensor): + output = res.detach().numpy() + else: + output = tuple(t.detach().numpy() for t in flatten(res)) + if isinstance(output, tuple) and len(output) == 1: + return output[0] + return output diff --git a/qai_hub_models/utils/model_card.py b/qai_hub_models/utils/model_card.py new file mode 100644 index 00000000..91682146 --- /dev/null +++ b/qai_hub_models/utils/model_card.py @@ -0,0 +1,306 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import datetime +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +import qai_hub as hub + + +def chipset_marketting_name(chipset) -> str: + """Sanitize chip name to match marketting.""" + chip = [word.capitalize() for word in chipset.split("-")] + details_to_remove = [] + for i in range(len(chip)): + if chip[i] == "8gen3": + chip[i] = "8 Gen 3" + if chip[i] == "8gen2": + chip[i] = "8 Gen 2" + elif chip[i] == "8gen1": + chip[i] = "8 Gen 1" + elif chip[i] == "Snapdragon": + # Marketing name for Qualcomm Snapdragon is Snapdragon® + chip[i] = "Snapdragon®" + elif chip[i] == "Qualcomm": + details_to_remove.append(chip[i]) + + for detail in details_to_remove: + chip.remove(detail) + return " ".join(chip) + + +class MODEL_CARD_RUNTIMES(Enum): + """Runtime to be stored in model card.""" + + TORCHSCRIPT_ONNX_TFLITE = 100 + TORCHSCRIPT_ONNX_QNN = 101 + + @staticmethod + def from_string(string: str) -> "MODEL_CARD_RUNTIMES": + return MODEL_CARD_RUNTIMES["TORCHSCRIPT_ONNX_" + string.upper()] + + +@dataclass +class ModelRun: + model_id: str + profile_job_id: str + runtime: MODEL_CARD_RUNTIMES + + def chipset(self) -> Optional[str]: + """Chipset the job was run on.""" + if self.profile_job is not None: + hub_device = self.profile_job.device + for attr in hub_device.attributes: + if attr.startswith("chipset:qualcomm"): + return attr.split(":")[1] + return "" + + @property + def profile_job(self): + """Get the hub.ProfileJob object.""" + if len(self.profile_job_id) > 0: + return hub.get_job(self.profile_job_id) + return None + + def job_status(self) -> str: + """Get the job status of the profile job.""" + if self.profile_job is not None: + if self.profile_job.get_status().success: + return "Passed" + elif self.profile_job.get_status().failure: + return "Failed" + return "Skipped" + + @property + def quantized(self) -> str: + """Quantized models are marked so precision can be correctly recorded.""" + return "Yes" if self.model_id.endswith("_quantized") else "No" + + @property + def profile_results(self): + """Profile results from profile job.""" + if self.job_status() == "Passed": + return self.profile_job.download_profile() + return None + + def get_inference_time(self) -> Union[float, str]: + """Get the inference time from the profile job.""" + if self.profile_results is not None: + return float( + self.profile_results["execution_summary"]["estimated_inference_time"] + ) + return "null" + + def get_throughput(self) -> Union[float, str]: + """Get the throughput from the profile job.""" + if not isinstance(self.get_inference_time(), str): + return 1000000 / self.get_inference_time() # type: ignore + return "null" + + def get_layer_info(self, unit: str) -> int: + """Count layers per compute unit.""" + if self.profile_results is not None: + count: int = 0 + count = sum( + 1 + for detail in self.profile_results["execution_detail"] + if detail["compute_unit"] == unit + ) + return count + return 0 + + def npu(self) -> Any: + """Get number of layers running on NPU.""" + return self.get_layer_info("NPU") if self.profile_results is not None else 0 + + def gpu(self) -> Any: + """Get number of layers running on GPU.""" + return self.get_layer_info("GPU") if self.profile_results is not None else 0 + + def cpu(self) -> Any: + """Get number of layers running on CPU.""" + return self.get_layer_info("CPU") if self.profile_results is not None else 0 + + def total(self) -> Any: + """Get the total number of layers.""" + return self.npu() + self.gpu() + self.cpu() + + def primary_compute_unit(self) -> str: + """Get the primary compute unit.""" + layers_npu = self.npu() + layers_gpu = self.gpu() + layers_cpu = self.cpu() + + if layers_npu == 0 and layers_gpu == 0 and layers_cpu == 0: + return "null" + compute_unit_for_most_layers = max(layers_cpu, layers_gpu, layers_npu) + if compute_unit_for_most_layers == layers_npu: + return "NPU" + elif compute_unit_for_most_layers == layers_gpu: + return "GPU" + return "CPU" + + def get_peak_memory_range(self) -> Dict[str, int]: + """Get the estimated peak memory range.""" + if self.profile_results is not None: + low, high = self.profile_results["execution_summary"][ + "inference_memory_peak_range" + ] + return dict(min=low, max=high) + return dict(min=0, max=0) + + def precision(self) -> str: + """Get the precision of the model based on the run.""" + if self.profile_results is not None: + compute_unit = self.primary_compute_unit() + if compute_unit == "CPU": + return "fp32" + if self.quantized == "Yes": + return "int8" + return "fp16" + return "null" + + +@dataclass +class ModelPerf: + model_runs: List[ModelRun] + + def supported_chipsets(self, chips) -> List[str]: + """Return all the supported chipsets given the chipset it works on.""" + supported_chips = chips + for chip in chips: + if chip == "qualcomm-snapdragon-8gen2": + supported_chips.extend( + ["qualcomm-snapdragon-8gen1", "qualcomm-snapdragon-888"] + ) + if chip == "qualcomm-snapdragon-855": + supported_chips.extend( + ["qualcomm-snapdragon-845", "qualcomm-snapdragon-865"] + ) + return supported_chips + + def supported_chipsets_santized(self, chips) -> List[str]: + """Santize the chip name passed via hub.""" + chips = [chip for chip in chips if chip != ""] + return sorted( + list( + set( + [ + chipset_marketting_name(chip) + for chip in self.supported_chipsets(chips) + ] + ) + ) + ) + + def supported_devices(self, chips) -> List[str]: + """Return all the supported devicesgiven the chipset being used.""" + supported_devices = [] + for chip in self.supported_chipsets(chips): + supported_devices.extend( + [ + device.name + for device in hub.get_devices(attributes=f"chipset:{chip}") + ] + ) + supported_devices.extend( + [ + "Google Pixel 3", + "Google Pixel 3a", + "Google Pixel 4", + "Google Pixel 3a XL", + "Google Pixel 4a", + "Google Pixel 5a 5G", + ] + ) + return sorted(list(set(supported_devices))) + + def supported_oses(self) -> List[str]: + """Return all the supported operating systems.""" + return ["Android"] + + def reference_device_info(self) -> Dict[str, str]: + """Return a reference ID.""" + chipset = "qualcomm-snapdragon-8gen2" + hub_device = hub.get_devices("Samsung Galaxy S23 Ultra")[0] + device_name = hub_device.name + os_version = hub_device.os + os_name, form_factor, manufacturer = "", "", "" + for attr in hub_device.attributes: + if attr.startswith("vendor"): + manufacturer = attr.split(":")[-1] + if attr.startswith("format"): + form_factor = attr.split(":")[-1] + if attr.startswith("os"): + os_name = attr.split(":")[-1].capitalize() + chipset = chipset_marketting_name(chipset) + device_info = dict( + name=device_name, + os=os_version, + form_factor=form_factor.capitalize(), + os_name=os_name, + manufacturer=manufacturer.capitalize(), + chipset=chipset, + ) + return device_info + + def performance_metrics(self): + """Performance metrics as per model card.""" + perf_card = dict() + + # Figure out unique models in various baselines + unique_model_ids = [] + chips = [] + for run in self.model_runs: + if run.model_id not in unique_model_ids: + unique_model_ids.append(run.model_id) + if run.chipset not in chips: + chips.append(run.chipset()) + + perf_card["aggregated"] = dict( + supported_oses=self.supported_oses(), + supported_devices=self.supported_devices(chips), + supported_chipsets=self.supported_chipsets_santized(chips), + ) + + perf_per_model = [] + + for mid in unique_model_ids: + perf_per_device = [] + # Calculate per data per runtime + perf_per_runtime = dict() + for run in self.model_runs: + if run.model_id == mid: + runtime_name = run.runtime.name.lower() + perf_per_runtime[runtime_name] = dict( + inference_time=run.get_inference_time(), + throughput=run.get_throughput(), + estimated_peak_memory_range=run.get_peak_memory_range(), + primary_compute_unit=run.primary_compute_unit(), + precision=run.precision(), + layer_info=dict( + layers_on_npu=run.npu(), + layers_on_gpu=run.gpu(), + layers_on_cpu=run.cpu(), + total_layers=run.total(), + ), + job_id=run.profile_job_id, + job_status=run.job_status(), + ) + + # Per model, the device used and timestamp for model card + perf_per_runtime["reference_device_info"] = self.reference_device_info() + perf_per_runtime["timestamp"] = datetime.datetime.utcnow().isoformat() + "Z" + + perf_per_device.append(perf_per_runtime) + + perf_model = dict(name=mid, performance_metrics=perf_per_device) + perf_model["name"] = mid + perf_per_model.append(perf_model) + + # Perf card with multiple models + perf_card["models"] = perf_per_model + return perf_card diff --git a/qai_hub_models/utils/path_helpers.py b/qai_hub_models/utils/path_helpers.py new file mode 100644 index 00000000..e1157e1b --- /dev/null +++ b/qai_hub_models/utils/path_helpers.py @@ -0,0 +1,32 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from pathlib import Path +from typing import Optional + +MODELS_PACKAGE_NAME = "models" +QAIHM_PACKAGE_NAME = "qai_hub_models" + + +def get_all_models(): + zoo_root = get_qaihm_models_root() + all_models = [] + for subdir in zoo_root.iterdir(): + if not subdir.is_dir(): + continue + # Heuristic to see if this is a model we should generate export.py for. + if (subdir / "model.py").exists() and (subdir / "test.py").exists(): + all_models.append(subdir.name) + return all_models + + +def get_qaihm_package_root() -> Path: + """Get local path to qaihm package root.""" + return Path(__file__).parent.parent + + +def get_qaihm_models_root(package_root: Optional[Path] = None) -> Path: + if package_root is None: + package_root = get_qaihm_package_root() + return package_root / MODELS_PACKAGE_NAME diff --git a/qai_hub_models/utils/perf_summary.py b/qai_hub_models/utils/perf_summary.py new file mode 100644 index 00000000..38202f39 --- /dev/null +++ b/qai_hub_models/utils/perf_summary.py @@ -0,0 +1,257 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import datetime +import os +from typing import Dict, List, Tuple + +from prettytable import PrettyTable + +RUNTIMES_TO_COMPARE = ["torchscript_onnx_qnn", "torchscript_onnx_tflite"] + + +class PerformanceSummary: + """ + Generates Perf Summary of two 'performance_metrics' from perf.yaml + + Perf summary is generated w.r.t 'perf_buckets' to summarize difference in decreasing order + - "INF" -> Inference failure toggled. + - 10 -> Speedup difference >= 10 and so on ... + + Why use speedup difference? + - Speedup is relative to baseline measured with similar constraints and changes + - Speedup difference gives a generate sense on the how Tetra performance diverged w.r.t. baseline at that point + + What all to capture in the summary (Summary of Interest) ? + 1. Inferences that started to fail or work (Speedup = "INF") + 2. Speedup difference >= 0.1 (check models closely from higher buckets) + 3. Missing devices (new runs missing data for certain devices) + 4. New models (models with new perf.yamls) + 5. Empty perf reports (models with no passing jobs) + """ + + def __init__(self): + # List of new reports being added + self.new_perf_report: List[Tuple[str]] = [] + + # Device present in previous run, but missing in new + self.missing_devices: List = [] + + # Perf report with no passing job + self.empty_perf_report: List[Tuple[str]] = [] + + # Perf buckets to track + self.perf_buckets = ["inf", 10, 5, 2, 1.5, 1.3, 1.2, 1.1, 1.05, 1.03] + + # Only track PerfSummary for Android + self.tracked_oses: List = ["Android"] + + # Map of perf_bucket -> List of tuple of progression summary entry + self.progressions: Dict = {} + + # Map of perf_bucket -> List of tuple of regression summary entry + self.regressions: Dict = {} + + for each in self.perf_buckets: + self.progressions[each] = [] + self.regressions[each] = [] + + def add_missing_model(self, model_id: str): + self.new_perf_report.append((model_id,)) + + def _format_speedup(self, num): + if isinstance(num, str): + return num + return float(format(num, ".5f")) + + def update_summary(self, model_id: str, previous_report, new_report): + prev_perf_metrics = {} + new_perf_metrics = {} + + # Create chipset to perf metric + for i in range(len(previous_report["models"])): + for j in range(len(new_report["models"])): + if ( + previous_report["models"][i]["name"] + == new_report["models"][j]["name"] + ): + for prev_metric in previous_report["models"][i][ + "performance_metrics" + ]: + if "chipset" in prev_metric["reference_device_info"]: + ref_device = prev_metric["reference_device_info"]["chipset"] + prev_perf_metrics[ref_device] = prev_metric + + for new_metric in new_report["models"][j]["performance_metrics"]: + if "chipset" in new_metric["reference_device_info"]: + ref_device = new_metric["reference_device_info"]["chipset"] + new_perf_metrics[ref_device] = new_metric + + if len(prev_perf_metrics) == 0 or len(new_perf_metrics) == 0: + self.empty_perf_report.append((model_id,)) + + for device in prev_perf_metrics.keys(): + device_info = prev_perf_metrics[device]["reference_device_info"] + if device_info["os_name"] not in self.tracked_oses: + continue + + # Case 3: Chipset is missing in new data + if device not in new_perf_metrics: + self.missing_devices.append((model_id, device)) + continue + + for runtime_type in RUNTIMES_TO_COMPARE: + prev_inference_time = prev_perf_metrics[device][runtime_type][ + "inference_time" + ] + new_inference_time = new_perf_metrics[device][runtime_type][ + "inference_time" + ] + if new_inference_time == prev_inference_time: + continue + + if new_inference_time == "null" or prev_inference_time == "null": + # Case 1: Model either failed to infer or had a successful run + summary_entry = ( + model_id, + runtime_type, + "inf", + self._format_speedup(new_inference_time), + self._format_speedup(prev_inference_time), + device_info["chipset"], + device_info["os"], + ) + + if new_inference_time == "null": + self.regressions["inf"].append(summary_entry) + else: + self.progressions["inf"].append(summary_entry) + continue + + # Case 2: Bucketize speedup difference + progression_speedup = float(prev_inference_time) / float( + new_inference_time + ) + regression_speedup = float(new_inference_time) / float( + prev_inference_time + ) + is_progression = progression_speedup >= 1 + speedup = progression_speedup if is_progression else regression_speedup + + for bucket in self.perf_buckets[1:]: + if bucket <= speedup: + summary = ( + model_id, + runtime_type, + self._format_speedup(speedup), + self._format_speedup(new_inference_time), + self._format_speedup(prev_inference_time), + device_info["chipset"], + device_info["os"], + ) + if is_progression: + self.progressions[bucket].append(summary) + else: + self.regressions[bucket].append(summary) + break + + def _get_summary_table(self, bucket_id, get_progressions=True): + """ + Returns Summary Table for given bucket + Args: + bucket_id : bucket_id from perf_buckets + """ + table = PrettyTable( + [ + "Model ID", + "Runtime", + "Kx faster" if get_progressions else "Kx slower", + "New Inference time", + "Prev Inference time", + "Chipset", + "OS", + ] + ) + data = self.progressions if get_progressions else self.regressions + rows = data[bucket_id] + rows.sort(key=lambda k: k[2]) + table.add_rows(rows) + return table + + def _has_perf_changes(self): + """Returns True if there are perf changes""" + for _, val in self.progressions.items(): + if len(val) > 0: + return True + for _, val in self.regressions.items(): + if len(val) > 0: + return True + return False + + def print_summary(self): + """ + Prints Perf change summary captured so far. + """ + + file_unique_name = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") + test_results_path = os.path.join("build", "test-results") + os.makedirs(test_results_path, exist_ok=True) + summary_file_path = os.path.join( + test_results_path, f"perf-summary-{file_unique_name}.txt" + ) + + with open(summary_file_path, "w") as sf: + sf.write("================= Perf Change Summary =================") + if self._has_perf_changes(): + sf.write("\n\n----------------- Regressions -----------------\n") + # Dumps Point 1 and 2 from Summary of Interest + # 1. Inferences that started to fail (Speedup = "INF") + # 2. Slower than previous run + for bucket in self.perf_buckets: + if len(self.regressions[bucket]) > 0: + sf.write( + f"\n----------------- >= {bucket}x slower -----------------\n" + ) + sf.write( + str(self._get_summary_table(bucket, get_progressions=False)) + ) + + sf.write("\n\n----------------- Progressions -----------------\n") + + # Dumps Point 1 and 2 from Summary of Interest + # 1. Inferences that started to work (Speedup = "INF") + # 2. Faster than previous run + for bucket in self.perf_buckets: + if len(self.progressions[bucket]) > 0: + sf.write( + f"\n----------------- >= {bucket}x faster -----------------\n" + ) + sf.write(str(self._get_summary_table(bucket))) + else: + sf.write("\nNo significant changes observed.") + + if len(self.missing_devices) > 0: + # 3. Missing devices (New runs missing data for certain devices) + sf.write("\n----------------- Missing devices -----------------\n") + table = PrettyTable(["Model ID", "Missing Device"]) + table.add_rows(self.missing_devices) + sf.write(str(table)) + + if len(self.new_perf_report) > 0: + # 4. New Models (Models that did not have perf.yaml previously) + sf.write("\n----------------- New models -----------------\n") + table = PrettyTable(["Model ID"]) + table.add_rows(self.new_perf_report) + sf.write(str(table)) + + if len(self.empty_perf_report) > 0: + # 5. Empty reports (Models with no passing jobs) + sf.write( + "\n----------------- Empty reports (No passing jobs) -----------------\n" + ) + table = PrettyTable(["Model ID"]) + table.add_rows(self.empty_perf_report) + sf.write(str(table)) + + print(f"Perf change summary written to {summary_file_path}") diff --git a/qai_hub_models/utils/printing.py b/qai_hub_models/utils/printing.py new file mode 100644 index 00000000..8c09e9ba --- /dev/null +++ b/qai_hub_models/utils/printing.py @@ -0,0 +1,134 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from collections import Counter +from pathlib import Path +from typing import Any, Dict, List, Optional + +import numpy as np +import qai_hub as hub +from prettytable import PrettyTable +from qai_hub.client import SourceModelType + +from qai_hub_models.utils.base_model import TargetRuntime +from qai_hub_models.utils.compare import generate_comparison_metrics +from qai_hub_models.utils.config_loaders import QAIHMModelPerf +from qai_hub_models.utils.qnn_helpers import is_qnn_hub_model + +_INFO_DASH = "-" * 60 + + +def print_inference_metrics( + inference_job: hub.InferenceJob, + inference_result: Dict[str, List[np.ndarray]], + torch_out: List[np.ndarray], + outputs_to_skip: Optional[List[int]] = None, +) -> None: + inference_data = [ + np.concatenate(outputs, axis=0) for outputs in inference_result.values() + ] + output_names = list(inference_result.keys()) + metrics = generate_comparison_metrics(torch_out, inference_data) + print( + f"\nComparing on-device vs. local-cpu inference for {inference_job.name.title()}." + ) + + table = PrettyTable(align="l") + table.field_names = ["Name", "Shape", "Peak Signal-to-Noise Ratio (PSNR)"] + outputs_to_skip = outputs_to_skip or [] + i = 0 + while i in metrics or i in outputs_to_skip: + if i in outputs_to_skip or np.prod(np.array(metrics[i].shape)) < 5: + table.add_row([output_names[i], metrics[i].shape, "Skipped"]) + i += 1 + continue + table.add_row([output_names[i], metrics[i].shape, f"{metrics[i].psnr:.4g} dB"]) + i += 1 + + print(table.get_string()) + last_line = f"More details: {inference_job.url}" + print(last_line) + + +def print_profile_metrics_from_job( + profile_job: hub.ProfileJob, + profile_data: Dict[str, Any], +): + compute_unit_counts = Counter( + [op.get("compute_unit", "UNK") for op in profile_data["execution_detail"]] + ) + execution_summary = profile_data["execution_summary"] + inference_time_ms = execution_summary["estimated_inference_time"] / 1000 + peak_memory_bytes = execution_summary["inference_memory_peak_range"] + print(f"\n{_INFO_DASH}") + print(f"Performance results on-device for {profile_job.name.title()}.") + print(_INFO_DASH) + + if profile_job.model.model_type == SourceModelType.TFLITE: + runtime = TargetRuntime.TFLITE + elif is_qnn_hub_model(profile_job.model): + runtime = TargetRuntime.QNN + else: + raise NotImplementedError() + + print_profile_metrics( + QAIHMModelPerf.ModelRuntimePerformanceDetails( + profile_job.model.name, + profile_job.device.name, + profile_job.device.os, + runtime, + inference_time_ms, + peak_memory_bytes, + compute_unit_counts, + ) + ) + print(_INFO_DASH) + last_line = f"More details: {profile_job.url}\n" + print(last_line) + + +def print_profile_metrics( + details: QAIHMModelPerf.ModelRuntimePerformanceDetails, +): + inf_time = details.inference_time_ms + peak_memory_bytes = f"[{round(details.peak_memory_bytes[0] / 1e6)}, {round(details.peak_memory_bytes[1] / 1e6)}]" + num_ops = sum(details.compute_unit_counts.values()) + compute_units = [ + f"{unit} ({num_ops} ops)" + for unit, num_ops in details.compute_unit_counts.items() + ] + + rows = [ + ["Device", f"{details.device_name} ({details.device_os})"], + ["Runtime", f"{details.runtime.name}"], + [ + "Estimated inference time", + "less than 0.1ms" if inf_time < 0.1 else f"{inf_time}", + ], + ["Estimated peak memory usage", f"{peak_memory_bytes}"], + ["Total # Ops", f"{num_ops}"], + ["Compute Unit(s)", " ".join(compute_units)], + ] + table = PrettyTable(align="l", header=False, border=False, padding_width=0) + for row in rows: + table.add_row([row[0], f": {row[1]}"]) + print(table.get_string()) + + +def print_on_target_demo_cmd( + compile_job: hub.CompileJob, model_folder: Path, device: str +) -> None: + """ + Outputs a command that will run a model's demo script via inference job. + """ + assert compile_job.wait().success + print("\nRun this model on a hosted device on sample data using:") + target_model = compile_job.get_target_model() + assert target_model is not None + print( + f"python {model_folder / 'demo.py'} " + "--on-device " + f"--hub-model-id {target_model.model_id} " + f'--device "{device}"\n' + ) diff --git a/qai_hub_models/utils/qai_hub_helpers.py b/qai_hub_models/utils/qai_hub_helpers.py new file mode 100644 index 00000000..c44f6085 --- /dev/null +++ b/qai_hub_models/utils/qai_hub_helpers.py @@ -0,0 +1,160 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +from typing import Any, Dict, List, Union + +import numpy as np +import qai_hub as hub +from qai_hub.client import APIException, UserError + +from qai_hub_models.utils.asset_loaders import ASSET_CONFIG +from qai_hub_models.utils.base_model import TargetRuntime +from qai_hub_models.utils.config_loaders import QAIHMModelPerf +from qai_hub_models.utils.huggingface import fetch_huggingface_target_model +from qai_hub_models.utils.printing import print_profile_metrics + + +def transpose_channel( + io_names: str, + inputs: Union[hub.Dataset, Dict[str, Any]], + target_runtime: "TargetRuntime", + first_to_last: bool, +): + + min_dim = 4 if target_runtime == TargetRuntime.QNN else 3 + io_names_list = io_names.strip().split(",") + target = dict() + + assert isinstance(inputs, dict) + for name, array in inputs.items(): + if len(array[0].shape) < min_dim or len(array[0].shape) > 5: + target[name] = array + elif name in io_names_list: + transpose_order = list(range(len(array[0].shape))) + if first_to_last: + transpose_order.append(transpose_order.pop(-3)) + else: + transpose_order.insert(-2, transpose_order.pop(-1)) + target[name] = [np.transpose(arr, transpose_order) for arr in array] + else: + target[name] = array + return target + + +def transpose_channel_first_to_last( + io_names: str, + sample_inputs: Union[hub.Dataset, Dict[str, Any]], + target_runtime: "TargetRuntime", +) -> Dict[str, List[np.ndarray]]: + return transpose_channel(io_names, sample_inputs, target_runtime, True) + + +def transpose_channel_last_to_first( + io_names: str, + job_outputs: Union[hub.Dataset, Dict[str, Any]], + target_runtime: "TargetRuntime", +) -> Dict[str, List[np.ndarray]]: + return transpose_channel(io_names, job_outputs, target_runtime, False) + + +def can_access_qualcomm_ai_hub(): + try: + hub.get_devices() + except APIException: + return False + except UserError: + return False + return True + + +_AIHUB_URL = "https://aihub.qualcomm.com" +_AIHUB_NAME = "Qualcomm® AI Hub" +_WARNING_DASH = "=" * 114 +_INFO_DASH = "-" * 55 + + +def export_without_hub_access( + model_id: str, + model_display_name: str, + device_name: str, + skip_profiling: bool, + skip_inferencing: bool, + skip_downloading: bool, + skip_summary: bool, + output_path: str, + target_runtime: TargetRuntime, + compile_options: str, + profile_options: str, + components: List[str] | None = None, +) -> List[str] | None: + print(_WARNING_DASH) + print( + f"Unable to find a valid API token for {_AIHUB_NAME}. Using results from a previous job run on the same device.\n" + f"To get access to the complete experience, please sign-up for access at {_AIHUB_URL}." + ) + print(_WARNING_DASH) + + if compile_options or profile_options: + raise RuntimeError( + f"Jobs with `compile_options` or `profile_options` can only be run with {_AIHUB_NAME} access." + ) + + if not skip_profiling and not skip_summary: + print("") + + missing_perf = True + # Components in perf.yaml don't yet have the same name as their code generated names. + if not components: + perf_yaml_path = os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "models", + model_id, + "perf.yaml", + ) + if os.path.exists(perf_yaml_path): + parsed_perf = QAIHMModelPerf(perf_yaml_path, model_id).get_perf_details( + target_runtime, device_name + ) + missing_perf = None in parsed_perf.values() + + if not missing_perf: + print(f"Profiling Results for {model_display_name}\n{_INFO_DASH}") + for model_name, perf in parsed_perf.items(): + assert perf is not None # for mypy + print_profile_metrics(perf) + + if missing_perf: + print( + f"Cannot obtain results for Device({device_name}) with runtime {target_runtime.name} without an API token.\n" + f"Please sign-up for {_AIHUB_NAME} to get run this configuration on hosted devices." + ) + + print("") + + if not skip_inferencing and not skip_summary: + print( + f"\nSkipping on-device numerical validation. " + f"Please sign-up for {_AIHUB_NAME} to perform numerical validation on hosted devices." + ) + + paths = [] + if not skip_downloading: + print("") + print( + f"Downloading model(s) from a previous job on {_AIHUB_NAME}.\n" + f"More details are availiable on Hugging Face: {ASSET_CONFIG.get_hugging_face_url(model_display_name)}" + ) + try: + paths = fetch_huggingface_target_model( + model_display_name, output_path, target_runtime + ) + print(f"Deployable model(s) saved to: {paths}") + except Exception as e: + print(f"Download failure: {e}") + print("") + + return paths diff --git a/qai_hub_models/utils/qnn_helpers.py b/qai_hub_models/utils/qnn_helpers.py new file mode 100644 index 00000000..51d9e255 --- /dev/null +++ b/qai_hub_models/utils/qnn_helpers.py @@ -0,0 +1,46 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import json +from pathlib import Path +from typing import Dict, List + +import torch +from qai_hub.client import Job, Model, SourceModelType + + +def onnx_elem_type_to_str(elem_type: int) -> str: + if elem_type == 1: + return "float32" + elif elem_type == 2: + return "uint8" + elif elem_type == 3: + return "int8" + elif elem_type == 6: + return "int8" + elif elem_type == 10: + return "float16" + raise ValueError("Unsupported elem_type.") + + +def load_encodings(output_path: Path, model_name: str) -> Dict: + encodings_file = output_path / f"{model_name}.aimet" / f"{model_name}.encodings" + with open(encodings_file) as f: + encodings = json.load(f) + return encodings["activation_encodings"] + + +def get_qnn_inputs(compile_job: Job, sample_inputs: Dict[str, List[torch.Tensor]]): + compile_job.target_shapes + return dict(zip(compile_job.target_shapes.keys(), sample_inputs.values())) + + +def is_qnn_hub_model(model: Model): + return model.model_type in [ + SourceModelType.QNN_BIN, + SourceModelType.QNN_LIB_AARCH64_ANDROID, + SourceModelType.QNN_LIB_X86_64_LINUX, + ] diff --git a/qai_hub_models/utils/quantization.py b/qai_hub_models/utils/quantization.py new file mode 100644 index 00000000..78f87d52 --- /dev/null +++ b/qai_hub_models/utils/quantization.py @@ -0,0 +1,61 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +from typing import Optional + +import torch +from torch.utils.data import DataLoader + +from qai_hub_models.utils.asset_loaders import CachedWebAsset, load_torch + +IMAGE_QUANTIZATION_SAMPLES_URL = CachedWebAsset.from_asset_store( + "/quantization/image_quantization_samples.pt" +) + + +def make_image_sample_data_loader() -> DataLoader: + img_tensor = get_image_quantization_samples() + tensor_dataset = torch.utils.data.TensorDataset(img_tensor) + return DataLoader(tensor_dataset, batch_size=32) + + +def get_image_quantization_samples( + quantization_samples_path: Optional[str] = None, +) -> torch.Tensor: + """ + Loads a tensor of sample input image data from the specified path. + This data is intended to be used for post-training quantization. + + If no path is provided, the method returns a default tensor containing + data from images fetched from the Google OpenImages dataset. + + The default tensor has shape (50, 3, 224, 224). Here is the code to produce + the default tensor: + + ``` + import fiftyone.zoo as foz + from PIL import Image + import torch + from qai_hub_models.models._shared.imagenet_classifier.app import preprocess_image + + image_dataset = foz.load_models_dataset( + "open-images-v6", + split="validation", + max_samples=50, + shuffle=True, + ) + + tensors = [] + for sample in image_dataset: + img = Image.open(sample.filepath) + tensors.append(preprocess_image(img)) + + final_tensor = torch.cat(tensors, dim=0) + + torch.save(final_tensor, "imagenet_quantization_samples.pt") + ``` + """ + return load_torch(quantization_samples_path or IMAGE_QUANTIZATION_SAMPLES_URL) diff --git a/qai_hub_models/utils/quantization_aimet.py b/qai_hub_models/utils/quantization_aimet.py new file mode 100644 index 00000000..e70ddc33 --- /dev/null +++ b/qai_hub_models/utils/quantization_aimet.py @@ -0,0 +1,279 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +try: + from aimet_torch import onnx_utils + from aimet_torch.qc_quantize_op import QcQuantizeWrapper + from aimet_torch.quantsim import QuantizationSimModel +except (ImportError, ModuleNotFoundError): + raise NotImplementedError( + "AIMET must be installed to load quantized models. " + "Install AIMET via the instructions here: " + "https://quic.github.io/aimet-pages/releases/latest/install/index.html" + ) + +import os +import shutil +import tempfile +from pathlib import Path +from typing import Any +from zipfile import ZipFile + +import torch +from qai_hub.client import DatasetEntries + +from qai_hub_models.evaluators.base_evaluators import ( + BaseEvaluator, + _DataLoader, + _for_each_batch, +) +from qai_hub_models.utils.base_model import ( + BaseModel, + InputSpec, + SourceModelFormat, + TargetRuntime, +) +from qai_hub_models.utils.input_spec import make_torch_inputs + + +class AIMETQuantizableMixin: + """ + This mixin provides quantization support with Qualcomm's AIMET package. + """ + + def __init__( + self, + sim_model: QuantizationSimModel, + needs_onnx_direct_aimet_export: bool = False, + ): + self.quant_sim = sim_model + self.needs_onnx_direct_aimet_export = needs_onnx_direct_aimet_export + + def preferred_hub_source_model_format( + self, target_runtime: TargetRuntime + ) -> SourceModelFormat: + if target_runtime == TargetRuntime.QNN: + return SourceModelFormat.ONNX + else: + return SourceModelFormat.TORCHSCRIPT + + def quantize( + self, + data: _DataLoader, + num_samples: int | None = None, + evaluator: BaseEvaluator | None = None, + device: str = "cpu", + requantize_model_weights=False, + ) -> float | None: + """ + Re-compute quantization encodings for this model with the given dataset and model evaluator. + + This model will be updated with a new set of quantization parameters. Future calls to + forward() and export_...() will take these quantization parameters into account. + + Parameters: + data: torch DataLoader | Collection + Data loader for the dataset to use for evaluation. + If an evaluator is __NOT__ provided (see "evaluator" parameter), the iterator must return + inputs: Collection[torch.Tensor] | torch.Tensor + + otherwise, if an evaluator __IS__ provided, the iterator must return + tuple( + inputs: Collection[torch.Tensor] | torch.Tensor, + ground_truth: Collection[torch.Tensor] | torch.Tensor] + ) + + num_samples: int | None + Number of samples to use for evaluation. One sample is one iteration from iter(data). + If none, defaults to the number of samples in the dataset. + + evaluator: BaseModelEvaluator | None + Evaluator to populate while quantizing the data. + If not provided, an evaluator is not used. + + device: str + Name of device on which inference should be run. + + requantize_model_weights: bool + If a weight is quantized, recompute its quantization parameters. + + Returns: + If an evaluator is provided, returns its accuracy score. No return value otherwise. + """ + assert isinstance(self, BaseModel) + if not evaluator: + evaluator = self.get_evaluator() + + # Enable or disable quantization for model parameters (model weights). + # Activations are always re-quantized. + for quant_module in self.quant_sim.model.modules(): + if isinstance(quant_module, QcQuantizeWrapper): + for param_quantizer in quant_module.param_quantizers.values(): + if not requantize_model_weights: + try: + param_quantizer.freeze_encoding() + except RuntimeError: + # Encoding is not set, so it can't be frozen. + pass + else: + # Un-freeze the quantizer. + param_quantizer._is_encoding_frozen = False + + # Reset evaluator if applicable + if evaluator: + evaluator.reset() + + # Define evaluator function for this model. + def evaluator_func(model: torch.nn.Module, args): + # This function is defined because AIMET does not unwrap + # the arguments you pass to `compute_encodings`. + return ( + evaluator.add_from_dataset(model, *args) + if evaluator + else _for_each_batch(model, *args) + ) + + # Compute the new encodings. + self.quant_sim.compute_encodings(evaluator_func, [data, num_samples, device]) + + # Return accuracy score if applicable + return evaluator.get_accuracy_score() if evaluator else None + + def convert_to_torchscript_and_aimet_encodings( + self, + output_dir: str | Path, + input_spec: InputSpec | None = None, + model_name: str | None = None, + ) -> str: + """ + Converts the torch module to a zip file containing an + unquantized torchscript trace and an aimet quantization encodings file. + """ + if model_name is None: + model_name = self.__class__.__name__ + if not input_spec: + input_spec = self._get_input_spec_ts() + + os.makedirs(output_dir, exist_ok=True) + zip_path = os.path.join(output_dir, f"{model_name}.aimet.zip") + base_dir = Path(f"{model_name}.aimet") + base_path = Path(output_dir) / base_dir + if base_path.exists(): + shutil.rmtree(base_path) + os.makedirs(base_path) + self.quant_sim.export( + str(base_path), + model_name, + tuple(make_torch_inputs(input_spec)), + export_to_torchscript=True, + ) + + # AIMET exports GraphModule. Convert it to ScriptModule + fx_graph_path = base_path / f"{model_name}.pth" + fx_graph = torch.load(fx_graph_path) + script_module = torch.jit.trace(fx_graph, tuple(make_torch_inputs(input_spec))) + torch.jit.save(script_module, base_path / f"{model_name}.pt") + + with ZipFile(zip_path, "w") as zip_object: + zip_object.write(base_path, base_dir) + zip_object.write( + base_path / f"{model_name}.pt", base_dir / f"{model_name}.pt" + ) + zip_object.write( + base_path / f"{model_name}_torch.encodings", + base_dir / f"{model_name}_torch.encodings", + ) + + return zip_path + + def convert_to_onnx_and_aimet_encodings( + self, + output_dir: str | Path, + input_spec: InputSpec | None = None, + model_name: str | None = None, + ) -> str: + """ + Converts the torch module to a zip file containing an + unquantized ONNX model and an aimet quantization encodings file. + """ + if model_name is None: + model_name = self.__class__.__name__ + if not input_spec: + input_spec = self._get_input_spec_ts() + + os.makedirs(output_dir, exist_ok=True) + zip_path = os.path.join(output_dir, f"{model_name}.aimet.zip") + base_dir = Path(f"{model_name}.aimet") + base_path = Path(output_dir) / base_dir + if base_path.exists(): + shutil.rmtree(base_path) + os.makedirs(base_path) + + onnx_utils.EXPORT_TO_ONNX_DIRECT = self.needs_onnx_direct_aimet_export + self.quant_sim.export( + str(base_path), + model_name, + tuple(make_torch_inputs(input_spec)), + onnx_export_args=dict(input_names=[name for name in input_spec]), + ) + + onnx_file_name = f"{model_name}.onnx" + encodings_file_name = f"{model_name}.encodings" + with ZipFile(zip_path, "w") as zip_object: + zip_object.write(base_path, base_dir) + zip_object.write( + base_path / onnx_file_name, os.path.join(base_dir, onnx_file_name) + ) + zip_object.write( + base_path / encodings_file_name, + os.path.join(base_dir, encodings_file_name), + ) + + return zip_path + + def convert_to_torchscript(*args, **kwargs): + """Block users from calling convert_to_torchscript() on quantized models, since python will call both parent classes.""" + raise NotImplementedError( + "This model is quantized. Use `model.convert_to_quantized_torchscript` instead!" + ) + + def convert_to_quantized_torchscript( + self, input_spec: InputSpec | None = None, check_trace: bool = True + ) -> Any: + """ + Converts the torch module to a quantized torchscript trace. + """ + if not input_spec: + input_spec = self._get_input_spec_ts() + + with tempfile.TemporaryDirectory() as tempdir: + self.quant_sim.export( + tempdir, + "model", + tuple(make_torch_inputs(input_spec)), + export_to_torchscript=True, + use_embedded_encodings=True, + ) + return torch.jit.load(f"{tempdir}/model_embedded.torchscript.pth") + + def get_calibration_data( + self, + target_runtime: TargetRuntime, + input_spec: InputSpec | None = None, + ) -> DatasetEntries | None: + """ + Calibration dataset for this model and input spec. + Default behavior is randomized input in range [0, 1]. + """ + if not input_spec: + input_spec = self._get_input_spec_ts() + inputs = make_torch_inputs(input_spec) + return {k: v.numpy() for k, v in zip(input_spec.keys(), inputs)} + + def _get_input_spec_ts(self, *args, **kwargs) -> InputSpec: + """Type safe version of get_input_spec.""" + assert isinstance(self, BaseModel) + return self.get_input_spec(*args, **kwargs) diff --git a/qai_hub_models/utils/testing.py b/qai_hub_models/utils/testing.py new file mode 100644 index 00000000..998d9594 --- /dev/null +++ b/qai_hub_models/utils/testing.py @@ -0,0 +1,97 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import numpy as np +import pytest + +from qai_hub_models.utils.asset_loaders import always_answer_prompts + + +def skip_clone_repo_check(func): + """ + When running QAI Hub Models functions, the user sometimes needs to type "y" + before the repo is cloned. When testing in CI, we want to skip this check. + + Add this function as a decorator to any test function that needs to bypass this. + + @skip_clone_repo_check + def test_fn(): + ... + """ + + def wrapper(*args, **kwargs): + with always_answer_prompts(True): + return func(*args, **kwargs) + + return wrapper + + +@pytest.fixture +def skip_clone_repo_check_fixture(): + with always_answer_prompts(True): + yield + + +def assert_most_same(arr1: np.ndarray, arr2: np.ndarray, diff_tol: float) -> None: + """ + Checks whether most values in the two numpy arrays are the same. + + Particularly for image models, slight differences in the PIL/cv2 envs + may cause image <-> tensor conversion to be slightly different. + + Instead of using np.assert_allclose, this may be a better way to test image outputs. + + Parameters: + arr1: First input image array. + arr2: Second input image array. + diff_tol: Float in range [0,1] representing percentage of values + that can be different while still having the assertion pass. + + Raises: + AssertionError if input arrays are different size, + or too many values are different. + """ + + different_values = arr1 != arr2 + assert ( + np.mean(different_values) <= diff_tol + ), f"More than {diff_tol * 100}% of values were different." + + +def assert_most_close( + arr1: np.ndarray, + arr2: np.ndarray, + diff_tol: float, + rtol: float = 0.0, + atol: float = 0.0, +) -> None: + """ + Checks whether most values in the two numpy arrays are close. + + Particularly for image models, slight differences in the PIL/cv2 envs + may cause image <-> tensor conversion to be slightly different. + + Instead of using np.assert_allclose, this may be a better way to test image outputs. + + Parameters: + arr1: First input image array. + arr2: Second input image array. + diff_tol: Float in range [0,1] representing percentage of values + that can be different while still having the assertion pass. + atol: See rtol documentation. + rtol: Two values a, b are considered close if the following expresion is true + `absolute(a - b) <= (atol + rtol * absolute(b))` + Documentation copied from `np.isclose`. + + Raises: + AssertionError if input arrays are different size, + or too many values are not close. + """ + + not_close_values = ~np.isclose(arr1, arr2, atol=atol, rtol=rtol) + assert ( + np.mean(not_close_values) <= diff_tol + ), f"More than {diff_tol * 100}% of values were not close." diff --git a/redame-assets/overview.jpg b/redame-assets/overview.jpg deleted file mode 100644 index 948fdbca..00000000 Binary files a/redame-assets/overview.jpg and /dev/null differ diff --git a/repolint.json b/repolint.json deleted file mode 100644 index 565d670a..00000000 --- a/repolint.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "extends": "https://raw.githubusercontent.com/quic/.github/main/repolint.json", - "rules": { - "source-license-headers-exist": { - "level": "error", - "rule": { - "type": "file-starts-with", - "options": { - "globsAll": [ - "**/*.py", - "**/*.js", - "**/*.c", - "**/*.cc", - "**/*.cpp", - "**/*.h", - "**/*.ts", - "**/*.sh", - "**/*.rs", - "**/*.java" - ], - "skip-paths-matching": { - "patterns": [ - "babel.config.js", - "build\/", - "jest.config.js", - "node_modules\/", - "types\/", - "models-accuracy/super_resolution/quicksrnet/dataloader/imresize.py", - "models-accuracy/super_resolution/quicksrnet/model/imresize.py" - ] - }, - "lineCount": 40, - "patterns": [ - "Copyright.*Qualcomm Innovation Center, Inc|©.*Qualcomm Innovation Center, Inc", - "SPDX-License-Identifier" - ], - "flags": "i" - } - } - } - } -} \ No newline at end of file diff --git a/scripts/build_and_test.py b/scripts/build_and_test.py new file mode 100755 index 00000000..78a7a6af --- /dev/null +++ b/scripts/build_and_test.py @@ -0,0 +1,616 @@ +#!/usr/bin/env python3 + +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse +import glob +import logging +import os +import sys +import textwrap +from typing import Callable, List, Optional + +from tasks.changes import ( + get_all_models, + get_changed_models, + get_models_to_run_general_tests, + get_models_to_test_export, + get_models_with_changed_definitions, + get_models_with_export_file_changes, +) +from tasks.constants import VENV_PATH +from tasks.github import set_github_output +from tasks.plan import ( + ALL_TASKS, + PUBLIC_TASKS, + SUMMARIZERS, + TASK_DEPENDENCIES, + Plan, + depends, + public_task, + summarizer, + task, +) +from tasks.release import ReleaseTask +from tasks.task import ( + COVERAGE_DIR, + TEST_RESULTS_DIR, + ConditionalTask, + ListTasksTask, + NoOpTask, + RunCommandsWithVenvTask, + Task, +) +from tasks.test import ( + PyTestE2eHubTask, + PyTestModelsTask, + PyTestScriptsTask, + PyTestUtilsTask, +) +from tasks.util import can_support_aimet, echo, run, run_with_venv_and_get_output +from tasks.venv import CreateVenvTask, SyncLocalQAIHMVenvTask + + +def get_coverage_reports(): + return glob.glob(os.path.join(COVERAGE_DIR, ".coverage.*")) + + +def parse_arguments(): + parser = argparse.ArgumentParser( + description="Build and test all the things.", + formatter_class=argparse.RawTextHelpFormatter, + ) + + parser.add_argument( + "--task", + "--tasks", + dest="legacy_task", + type=str, + help="[deprecated] Comma-separated list of tasks to run; use --task=list to list all tasks.", + ) + parser.add_argument( + "task", + type=str, + nargs="*", + help='Task(s) to run. Specify "list" to show all tasks.', + ) + + parser.add_argument( + "--only", + action="store_true", + help="Run only the listed task(s), skipping any dependencies.", + ) + + parser.add_argument( + "--print-task-graph", + action="store_true", + help="Print the task library in DOT format and exit. Combine with --task to highlight what would run.", + ) + + parser.add_argument( + "--python", + type=str, + default="python3.8", + help="Python executable path or name (only used when creating the venv).", + ) + + parser.add_argument( + "--venv", + type=str, + metavar="...", + default=VENV_PATH, + help=textwrap.dedent( + """\ + [optional] Use the virtual env at the specified path. + - Creates a virtual env at that path if none exists. + - If omitted, creates and uses a virtual environment at """ + + VENV_PATH + + """ + - If [none], does not create or activate a virtual environment. + """ + ), + ) + + parser.add_argument( + "--dry-run", action="store_true", help="Print the plan, rather than running it." + ) + + parser.add_argument( + "--defer-coverage-report", + action="store_true", + help=textwrap.dedent( + """\ + Skip coverage report and keep coverage files. These files will + be included in subsequent runs to build_and_test.py that do not + defer the report. This helps produce a single report from a + series of separate build_and_test.py commands. + """ + ), + ) + + args = parser.parse_args() + if args.legacy_task: + args.task.extend(args.legacy_task.split(",")) + delattr(args, "legacy_task") + return args + + +class TaskLibrary: + def __init__( + self, + python_executable: str, + venv_path: Optional[str], + defer_coverage_report: bool = False, + ) -> None: + self.python_executable = python_executable + self.venv_path = venv_path + self.defer_coverage_report = defer_coverage_report + + @staticmethod + def to_dot(highlight: List[str] = []) -> str: + elements: List[str] = [] + for tsk in ALL_TASKS: + task_attrs: List[str] = [] + if tsk in PUBLIC_TASKS: + task_attrs.append("style=filled") + if tsk in highlight: + task_attrs.append("penwidth=4.0") + if len(task_attrs) > 0: + elements.append(f"{tsk} [{' '.join(task_attrs)}]") + else: + elements.append(tsk) + for tsk in TASK_DEPENDENCIES: + for dep in TASK_DEPENDENCIES[tsk]: + elements.append(f"{tsk} -> {dep}") + elements_str = "\n".join([f" {element};" for element in elements]) + return f"digraph {{\n{elements_str}\n}}" + + @public_task("Print a list of commonly used tasks; see also --task=list_all.") + @depends(["list_public"]) + def list(self, plan: Plan) -> str: + return plan.add_step("list", NoOpTask()) + + @task + def list_all(self, plan: Plan) -> str: + return plan.add_step("list_all", ListTasksTask(ALL_TASKS)) + + @task + def list_public(self, plan: Plan) -> str: + return plan.add_step("list_public", ListTasksTask(PUBLIC_TASKS)) + + @public_task("precheckin") + @depends( + [ + "test_utils", + "test_scripts", + "test_changed_models", + ] + ) + def precheckin(self, plan: Plan) -> str: + # Excludes export tests, and uses the same environment for each model. + return plan.add_step("precheckin", NoOpTask()) + + @public_task("precheckin_long") + @depends( + [ + "test_utils", + "test_scripts", + "test_changed_models_long", + ] + ) + def precheckin_long(self, plan: Plan) -> str: + # Includes export tests, and creates a fresh environment for each model. + return plan.add_step("precheckin_long", NoOpTask()) + + @public_task("all_tests") + @depends( + [ + "test_utils", + "test_scripts", + "test_all_models", + "test_e2e_on_hub", + ] + ) + def all_tests(self, plan: Plan) -> str: + return plan.add_step("all_tests", NoOpTask()) + + @public_task("all_tests_long") + @depends( + [ + "test_utils", + "test_scripts", + "test_all_models_long", + "test_e2e_on_hub", + ] + ) + def all_tests_long(self, plan: Plan) -> str: + return plan.add_step("all_tests_long", NoOpTask()) + + @task + def create_venv(self, plan: Plan, step_id: str = "create_venv") -> str: + return plan.add_step( + step_id, + ConditionalTask( + group_name=None, + condition=lambda: self.venv_path is None + or os.path.exists(self.venv_path), + true_task=NoOpTask("Not creating/activating any virtual environment."), + false_task=CreateVenvTask(self.venv_path, self.python_executable), + ), + ) + + @public_task("Install dependencies for model zoo.") + @depends(["create_venv"]) + def install_deps(self, plan: Plan, step_id: str = "install_deps") -> str: + return plan.add_step( + step_id, + SyncLocalQAIHMVenvTask( + self.venv_path, + ["dev"], + can_support_aimet(), + ), + ) + + @task + def clean_pip(self, plan: Plan) -> str: + class CleanPipTask(Task): + def __init__(self, venv_path: Optional[str]) -> None: + super().__init__("Deleting python packages") + self.venv_path = venv_path + + def does_work(self) -> bool: + return True + + def run_task(self) -> None: + if self.venv_path is not None: + # Some sanity checking to make sure we don't accidentally "rm -rf /" + if not self.venv_path.startswith(os.environ["HOME"]): + run(f"rm -rI {self.venv_path}") + else: + run(f"rm -rf {self.venv_path}") + + return plan.add_step("clean_pip", CleanPipTask(self.venv_path)) + + @public_task("Run tests for common utilities.") + @depends(["install_deps"]) + def test_utils(self, plan: Plan, step_id: str = "test_utils") -> str: + return plan.add_step(step_id, PyTestUtilsTask(self.venv_path)) + + @public_task("Run tests for common scripts.") + @depends(["install_deps"]) + def test_scripts(self, plan: Plan, step_id: str = "test_scripts") -> str: + return plan.add_step( + step_id, + PyTestScriptsTask(self.venv_path), + ) + + @public_task( + "Run most tests for only added/modified models in Model Zoo. Includes most tests, uses shared global cache, and uses the same environment for each model." + ) + @depends(["install_deps"]) + def test_changed_models( + self, plan: Plan, step_id: str = "test_changed_models" + ) -> str: + changed_model_defs = set( + get_models_with_changed_definitions() + ) # model.py changed + export_changed_models = set( + get_models_with_export_file_changes() + ) # export.py or test_generated.py changed + + # Get the set of models for which export changed and model defs changed + model_and_export_changed = changed_model_defs & export_changed_models + if len(model_and_export_changed) > 0: + # Don't bother testing all models for export. + # Just test the export for the models whose definitions changed. + export_models = model_and_export_changed + elif len(export_changed_models) > 0: + # This is true when `export.py` or `test_generated.py` are mass-changed, + # but no model definitions actually changed. That means this was a mass-change + # to the export scripts. + # + # Just use 1 model as a sample to test the export. This makes CI significantly faster. + export_models = set([next(iter(export_changed_models))]) + else: + export_models = set() + + # Set of models to run general tests + models_to_run_tests = set( + get_models_to_run_general_tests() + ) # demo.py or model.py changed + models_to_run_tests = ( + models_to_run_tests | export_models + ) # export tests can only run alongside general model tests + + return plan.add_step( + step_id, + PyTestModelsTask( + self.python_executable, + models_to_run_tests, + export_models, + self.venv_path, + venv_for_each_model=False, + use_shared_cache=True, + ), + ) + + @public_task( + "Run all tests for only added/modified models in Model Zoo. Includes all tests, and creates a fresh environment for each model." + ) + @depends(["install_deps"]) + def test_changed_models_long( + self, plan: Plan, step_id: str = "test_changed_models_long" + ) -> str: + default_test_models = ["mobilenet_v2", "googlenet"] + return plan.add_step( + step_id, + PyTestModelsTask( + self.python_executable, + get_changed_models() or default_test_models, + get_models_to_test_export() or default_test_models, + self.venv_path, + venv_for_each_model=True, + use_shared_cache=False, + ), + ) + + @public_task("Run tests for all models in Model Zoo.") + @depends(["install_deps"]) + def test_all_models(self, plan: Plan, step_id: str = "test_all_models") -> str: + # Excludes export tests, and uses the same environment for each model. + all_models = get_all_models() + return plan.add_step( + step_id, + PyTestModelsTask( + self.python_executable, + all_models, + [], + self.venv_path, + venv_for_each_model=False, + use_shared_cache=True, + ), + ) + + @public_task("Run profile jobs for all models in Model Zoo.") + @depends(["install_deps"]) + def test_profile_all_models( + self, plan: Plan, step_id: str = "test_profile_all_models" + ) -> str: + all_models = get_all_models() + return plan.add_step( + step_id, + PyTestModelsTask( + self.python_executable, + all_models, + all_models, + self.venv_path, + venv_for_each_model=False, + use_shared_cache=True, + export_func="profile", + skip_standard_unit_test=True, + ), + ) + + @public_task("Run tests for all models in Model Zoo.") + @depends(["install_deps"]) + def test_all_models_long( + self, plan: Plan, step_id: str = "test_all_models_long" + ) -> str: + # Includes export tests, and creates a fresh environment for each model. + all_models = get_all_models() + return plan.add_step( + step_id, + PyTestModelsTask( + self.python_executable, + all_models, + all_models, + self.venv_path, + venv_for_each_model=True, + use_shared_cache=False, + ), + ) + + @public_task("Run e2e tests against Hub") + @depends(["install_deps"]) + def test_e2e_on_hub(self, plan: Plan, step_id: str = "test_e2e_on_hub") -> str: + return plan.add_step( + step_id, + PyTestE2eHubTask(self.venv_path), + ) + + @summarizer + def test_report_coverage(self, plan: Plan) -> str: + defer_coverage_report = self.defer_coverage_report + + class RunCoverageTask(Task): + def __init__(self, venv_path: Optional[str]) -> None: + super().__init__("Report Coverage") + self.venv_path = venv_path + + def does_work(self) -> bool: + return True + + def run_task(self) -> None: + coverage_reports = get_coverage_reports() + all_reports = '"' + '" "'.join(coverage_reports) + '"' + RunCommandsWithVenvTask( + group_name=None, + venv=self.venv_path, + commands=[ + f"coverage combine {all_reports}", + "coverage report", + f'coverage html -d "{TEST_RESULTS_DIR}/html"', + ], + ).run() + coverage = run_with_venv_and_get_output( + self.venv_path, + "coverage report | tail -1 | sed 's/[[:blank:]]*$//;s/.*[[:blank:]]//'", + ) + set_github_output("coverage", coverage) + + class ReportCoverageTask(ConditionalTask): + def __init__(self, venv_path: Optional[str]) -> None: + super().__init__( + group_name=None, + condition=lambda: len(get_coverage_reports()) == 0 + or defer_coverage_report, + true_task=NoOpTask(), + false_task=RunCoverageTask(venv_path), + ) + + def does_work(self) -> bool: + return True + + return plan.add_step("test_report_coverage", ReportCoverageTask(self.venv_path)) + + @public_task("Release QAIHM (build repo & wheel, push repo & wheel)") + @depends(["install_deps"]) + def release(self, plan: Plan, step_id: str = "release") -> str: + return plan.add_step( + step_id, + ReleaseTask( + self.venv_path, + self.python_executable, + build_repository=True, + push_repository=True, + build_wheel=True, + publish_wheel=True, + ), + ) + + @public_task("Mock Release QAIHM (build repo & wheel, but do not push them)") + @depends(["install_deps"]) + def mock_release(self, plan: Plan, step_id: str = "mock_release") -> str: + return plan.add_step( + step_id, + ReleaseTask( + self.venv_path, + self.python_executable, + build_repository=True, + push_repository=False, + build_wheel=True, + publish_wheel=False, + ), + ) + + # This taks has no depedencies and does nothing. It will still trigger + # summarizer, so it can be used to finalize a coverage report. + @task + def nop(self, plan: Plan) -> str: + return plan.add_step("nop", NoOpTask()) + + +def plan_from_dependencies( + main_tasks: List[str], + python_executable: str, + venv_path: Optional[str], + defer_coverage_report: bool = False, +) -> Plan: + task_library = TaskLibrary( + python_executable, + venv_path, + defer_coverage_report=defer_coverage_report, + ) + plan = Plan() + + # We always run summarizers, which perform conditional work on the output + # of other steps. + work_list = SUMMARIZERS + + # The work list is processed as a stack, so LIFO. We reverse the user-specified + # tasks so that they (and their dependencies) can be expressed in a natural order. + work_list.extend(reversed(main_tasks)) + + for task_name in work_list: + if not hasattr(task_library, task_name): + echo(f"Task '{task_name}' does not exist.", file=sys.stderr) + sys.exit(1) + + while len(work_list) > 0: + task_name = work_list.pop() + unfulfilled_deps: List[str] = [] + for dep in TASK_DEPENDENCIES.get(task_name, []): + if not plan.has_step(dep): + unfulfilled_deps.append(dep) + if not hasattr(task_library, dep): + echo( + f"Non-existent task '{dep}' was declared as a dependency for '{task_name}'.", + file=sys.stderr, + ) + sys.exit(1) + if len(unfulfilled_deps) == 0: + # add task_name to plan + task_adder: Callable[[Plan], str] = getattr(task_library, task_name) + task_adder(plan) + else: + # Look at task_name again later when its deps are satisfied + work_list.append(task_name) + work_list.extend(reversed(unfulfilled_deps)) + + return plan + + +def plan_from_task_list( + tasks: List[str], + python_executable: str, + venv_path: Optional[str], + defer_coverage_report: bool = False, +) -> Plan: + task_library = TaskLibrary( + python_executable, + venv_path, + defer_coverage_report=defer_coverage_report, + ) + plan = Plan() + for task_name in tasks: + # add task_name to plan + task_adder: Callable[[Plan], str] = getattr(task_library, task_name) + task_adder(plan) + return plan + + +def build_and_test(): + log_format = "[%(asctime)s] [bnt] [%(levelname)s] %(message)s" + logging.basicConfig(level=logging.DEBUG, format=log_format) + + args = parse_arguments() + + venv_path = args.venv if args.venv != "none" else None + python_executable = args.python + + plan = Plan() + + if len(args.task) > 0: + planner = plan_from_task_list if args.only else plan_from_dependencies + plan = planner( + args.task, + python_executable, + venv_path, + defer_coverage_report=args.defer_coverage_report, + ) + + if args.print_task_graph: + print(TaskLibrary.to_dot(plan.steps)) + sys.exit(0) + elif len(args.task) == 0: + echo("At least one task or --print-task-graph is required.") + + if args.dry_run: + plan.print() + else: + caught = None + try: + plan.run() + except Exception as ex: + caught = ex + print() + plan.print_report() + print() + if caught: + raise caught + + +if __name__ == "__main__": + build_and_test() diff --git a/scripts/ci/git-credential-helper.sh b/scripts/ci/git-credential-helper.sh new file mode 100644 index 00000000..1a294a88 --- /dev/null +++ b/scripts/ci/git-credential-helper.sh @@ -0,0 +1,3 @@ +#!/bin/bash +echo username="$GIT_USER" +echo password="$GIT_PASSWORD" diff --git a/scripts/examples/conftest.py b/scripts/examples/conftest.py new file mode 100644 index 00000000..b1c7a380 --- /dev/null +++ b/scripts/examples/conftest.py @@ -0,0 +1,25 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import pytest + + +def pytest_addoption(parser): + parser.addoption( + "--on-device", action="store_true", default=False, help="Run on-device tests" + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "on_device: Tests running Hub inference jobs") + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--on-device"): + # --on-device given in cli: do not skip on_device tests + return + skip_on_device = pytest.mark.skip(reason="need --on-device option to run") + for item in items: + if "on_device" in item.keywords: + item.add_marker(skip_on_device) diff --git a/scripts/examples/quantize_deeplabv3.py b/scripts/examples/quantize_deeplabv3.py new file mode 100644 index 00000000..8d3d62ca --- /dev/null +++ b/scripts/examples/quantize_deeplabv3.py @@ -0,0 +1,54 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +""" +This is a sample script showing how to take a AIMET model zoo model without +pre-computed activations, and compute those activations using QAIHM. + +This script assumes the model is added to QAIHM, but is missing quantization parameters. +""" +import argparse +import os + +from aimet_zoo_torch.deeplabv3.dataloader import get_dataloaders_and_eval_func + +from qai_hub_models.models.deeplabv3_plus_mobilenet_quantized.model import ( + MODEL_ID, + DeepLabV3PlusMobileNetQuantizable, +) +from qai_hub_models.utils.asset_loaders import CachedWebModelAsset + +if __name__ == "__main__": + # Args + parser = argparse.ArgumentParser() + parser.add_argument( + "--voc-path", + required=True, + help="Local path to VOCdevkit/VOC2012. VOC Devkit can be found here http://host.robots.ox.ac.uk/pascal/VOC/voc2012/#devkit", + ) + parser.add_argument( + "--num-iter", type=int, default=None, help="Number of dataset iterations to use" + ) + args = parser.parse_args() + + # Load model. + train_loader, _, _ = get_dataloaders_and_eval_func(args.voc_path) + + # You can skip loading parameters in from_pretrained() if you haven't generated them yet. + m = DeepLabV3PlusMobileNetQuantizable.from_pretrained() + + # Load adaround (weight-only) encodings from the AIMET zoo + weight_encodings = CachedWebModelAsset( + "https://github.com/quic/aimet-model-zoo/releases/download/torch_dlv3_w8a8_pc/deeplabv3+w8a8_tfe_perchannel_param.encodings", + "example_scripts", + "1", + "deeplabv3+w8a8_tfe_perchannel_param.encodings", + ) + m.quant_sim.set_and_freeze_param_encodings(weight_encodings.fetch()) + + # Quantize activations + m.quantize(train_loader, args.num_iter, m.get_evaluator()) + + # Export encodings + m.convert_to_torchscript_and_aimet_encodings(os.getcwd(), model_name=MODEL_ID) diff --git a/scripts/examples/quantize_imagenet_classifier.py b/scripts/examples/quantize_imagenet_classifier.py new file mode 100644 index 00000000..45fb88ce --- /dev/null +++ b/scripts/examples/quantize_imagenet_classifier.py @@ -0,0 +1,68 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +""" +This is a sample script showing how to take a AIMET model zoo model without +pre-computed activations, and compute those activations using QAIHM. +This script assumes the model is added to QAIHM, but is missing quantization parameters. +""" +import argparse +import importlib +from pathlib import Path + +import torch +from torch.utils.data import DataLoader + +from qai_hub_models.datasets.imagenette import ImagenetteDataset + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--num-iter", type=int, default=1, help="Number of batches to use." + ) + parser.add_argument( + "--batch-size", + type=int, + default=8, + help="Batch size to use on each iteration.", + ) + parser.add_argument( + "--output-dir", + type=str, + default=None, + help="Directory where encodings should be stored. Defaults to ./build.", + ) + parser.add_argument( + "--output-name", + type=str, + default=None, + help="Encodings filename. Defaults to _encodings.", + ) + parser.add_argument( + "--model", + type=str, + required=True, + help="Name of the model folder to compute encodings.", + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="Manual seed to ensure reproducibility for quantization.", + ) + args = parser.parse_args() + module = importlib.import_module(f"qai_hub_models.models.{args.model}") + + dataset = ImagenetteDataset() + torch.manual_seed(args.seed) + dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True) + + model = module.Model.from_pretrained(aimet_encodings=None) + + accuracy = model.quantize(dataloader, args.num_iter, model.get_evaluator()) + print(f"Accuracy: {accuracy * 100:.3g}%") + + output_path = args.output_dir or str(Path() / "build") + output_name = args.output_name or f"{module.MODEL_ID}_encodings" + model.quant_sim.save_encodings_to_json(output_path, output_name) diff --git a/scripts/examples/quantize_superresolution.py b/scripts/examples/quantize_superresolution.py new file mode 100644 index 00000000..02f9caf2 --- /dev/null +++ b/scripts/examples/quantize_superresolution.py @@ -0,0 +1,68 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +""" +This is a sample script showing how to take a AIMET model zoo model without +pre-computed activations, and compute those activations using QAISM. + +This script assumes the model is added to QAISM, but is missing quantization parameters. +""" +import argparse +import importlib +from pathlib import Path + +import torch +from torch.utils.data import DataLoader + +from qai_hub_models.datasets.bsd300 import BSD300Dataset + +from qai_hub_models.utils.quantization_aimet import ( # isort: skip + AIMETQuantizableMixin, +) + +if __name__ == "__main__": + # Args + parser = argparse.ArgumentParser() + parser.add_argument( + "--num-iter", type=int, default=1, help="Number of batches to use." + ) + parser.add_argument( + "--batch-size", + type=int, + default=128, + help="Batch size to use on each iteration.", + ) + parser.add_argument( + "--model", + type=str, + default="sesr_m5_quantized", + help="Name of the model folder to compute encodings. This script expects a super resolution model with a scaling parameter, eg SESR M5 Quantized.", + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="Manual seed to ensure reproducibility for quantization.", + ) + args = parser.parse_args() + module = importlib.import_module(f"qai_hub_models.models.{args.model}") + + # Load dataset + dataset = BSD300Dataset(scaling_factor=module.model.SCALING_FACTOR) + torch.manual_seed(args.seed) + # Pass it to the dataloader + dataloader = DataLoader( + dataset, batch_size=args.batch_size, shuffle=True, drop_last=False + ) + + # Load model and confirm it's a quantizable type. + model = module.Model.from_pretrained(aimet_encodings=None) + assert isinstance(model, AIMETQuantizableMixin) + + # Quantize activations + accuracy = model.quantize(dataloader, args.num_iter, model.get_evaluator()) + print(f"PSNR: {accuracy}") + + # Export encodings + model.quant_sim.save_encodings_to_json(Path() / "build", module.MODEL_ID) diff --git a/scripts/examples/test_numerics_mobilenet_v2_quantized.py b/scripts/examples/test_numerics_mobilenet_v2_quantized.py new file mode 100644 index 00000000..55ba699d --- /dev/null +++ b/scripts/examples/test_numerics_mobilenet_v2_quantized.py @@ -0,0 +1,177 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +""" +Run it with pytest --on-device +""" +from typing import Tuple + +import numpy as np +import pytest +import qai_hub as hub +import torch +from torch.utils.data import DataLoader, random_split +from tqdm import tqdm + +from qai_hub_models.datasets.imagenette import ImagenetteDataset +from qai_hub_models.models.mobilenet_v2_quantized.model import MobileNetV2Quantizable +from qai_hub_models.utils.inference import compile_zoo_model_to_hub +from qai_hub_models.utils.measurement import get_model_size_mb + + +def on_device(func): + # Skip tests if '--on-device' is not in the command line arguments + return pytest.mark.skipif( + "'--on-device' not in sys.argv", reason="needs --on-device option to run" + )(func) + + +@pytest.fixture(scope="module") +def data_loaders(): + dataset = ImagenetteDataset() + calib_len = int(0.1 * len(dataset)) + test_len = len(dataset) - calib_len + # Deterministic random split + calib_dataset, test_dataset = random_split( + dataset, [calib_len, test_len], generator=torch.Generator().manual_seed(42) + ) + calib_loader = DataLoader(calib_dataset, batch_size=32, shuffle=False) + test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False) + return calib_loader, test_loader + + +@pytest.fixture(scope="module") +def test_data(data_loaders) -> Tuple[torch.Tensor, torch.Tensor, hub.Dataset]: + calib_loader, test_loader = data_loaders + num_test = 1000 + + img_batches, label_batches = [], [] + total_samples = 0 + for images, labels in tqdm(test_loader): + img_batches.append(images) + label_batches.append(labels) + total_samples += images.size(0) + if total_samples >= 1000: + break + img_test = torch.cat(img_batches, dim=0)[:num_test] + label_test = torch.cat(label_batches, dim=0)[:num_test] + input_name = list( + MobileNetV2Quantizable.from_pretrained(aimet_encodings=None) + .get_input_spec() + .keys() + )[0] + data_entries = {input_name: np.split(img_test.numpy(), img_test.shape[0])} + hub_ds = hub.upload_dataset(data_entries) + return img_test, label_test, hub_ds + + +def test_dataloader_is_deterministic(data_loaders): + """Test that the calibration-test split and the loading are deterministic""" + calib_loader, test_loader = data_loaders + img, labels = next(iter(calib_loader)) + expected_calib_labels = [701, 569, 482, 571, 482] + assert labels[:5].tolist() == expected_calib_labels + + expected_test_labels = [569, 0, 217, 571, 701] + img, labels = next(iter(test_loader)) + assert labels[:5].tolist() == expected_test_labels + + +@pytest.fixture(scope="module") +def quantized_model(data_loaders, test_data): + """ + Create encoding from calibration data and returned quantized model with + validated off-target accuracy computed on QuantSim + """ + img_test, label_test, hub_dataset = test_data + calib_loader, test_loader = data_loaders + model = MobileNetV2Quantizable.from_pretrained(aimet_encodings=None) + + # Calibration in quantization + num_calib_batches = 3 + calib_accuracy = model.quantize( + calib_loader, num_calib_batches, evaluator=model.get_evaluator() + ) + np.testing.assert_allclose(0.76, calib_accuracy, atol=0.01) + + # QuantSim evaluation on eval set + evaluator = model.get_evaluator() + + batch_size = 32 + for i in tqdm(list(range(0, img_test.size(0), batch_size)), desc="QuantSim eval"): + img_batch = img_test[i : i + batch_size] + label_batch = label_test[i : i + batch_size] + + sim_out = model(img_batch).detach() + evaluator.add_batch(sim_out, label_batch) + + sim_acc = evaluator.get_accuracy_score() + print(f"{sim_acc=}") + np.testing.assert_allclose(0.78125, sim_acc, atol=0.01) + return model + + +@on_device +@pytest.mark.parametrize( + "target_runtime,hub_needs_calib_data,expected_size_mb,expected_acc", + [ + ("onnx-tflite", False, 3.806, 0), + ("torch-tflite", False, 7.0891, 0.719), + ("onnx-qnn", False, 3.844, 0.76), + ("torch-qnn", True, 3.82, 0.7618), + ], +) +def test_make_encoding_w8a8_accuracy( + quantized_model, + data_loaders, + target_runtime, + hub_needs_calib_data, + expected_size_mb, + expected_acc, + test_data, +): + """ + 1. Export and compile quantized_model on Hub. + 2. Run inference on Hub on test. + + Note: We don't run profile job to get perf here but leave that to the score card. + """ + model = quantized_model + + img_test, label_test, hub_dataset = test_data + calib_loader, test_loader = data_loaders + + # calibration data + calibration_data = None + if hub_needs_calib_data: + # AIMET export has missing encoding and needs calibration data + num_calib_batches = 3 + calib_imgs = [] + for b, (img_calib, labels) in enumerate(iter(calib_loader)): + if b >= num_calib_batches: + break + img_np = img_calib.numpy() + calib_imgs.extend(np.split(img_np, img_np.shape[0])) + calibration_data = {list(model.get_input_spec().keys())[0]: calib_imgs} + + # On-device inference + device = hub.Device("Samsung Galaxy S23") + hub_model = compile_zoo_model_to_hub( + model=model, + device=device, + target_runtime=target_runtime, + calibration_data=calibration_data, + ) + + # Make sure model is quantized + tgt_model_size_mb = get_model_size_mb(hub_model.model) + np.testing.assert_allclose(expected_size_mb, tgt_model_size_mb, rtol=0.1) + + # Check on-device accuracy + hub_out = hub_model(hub_dataset) + evaluator = model.get_evaluator() + evaluator.add_batch(hub_out, label_test) + hub_acc = evaluator.get_accuracy_score() + print(f"{target_runtime=}, {hub_acc=}") + np.testing.assert_allclose(expected_acc, hub_acc, atol=0.01) diff --git a/scripts/examples/yolov6_evaluation.py b/scripts/examples/yolov6_evaluation.py new file mode 100644 index 00000000..6ecb2fe7 --- /dev/null +++ b/scripts/examples/yolov6_evaluation.py @@ -0,0 +1,39 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +""" +This is a sample script showing how to take a AIMET model zoo model without +pre-computed activations, and compute those activations using QAIHM. +This script assumes the model is added to QAIHM, but is missing quantization parameters. +Packages to install: pycocotools, object-detection-metrics==0.4.post1, shapely +""" + +from torch.utils.data import DataLoader + +from qai_hub_models.datasets.coco import CocoDataset, collate_fn +from qai_hub_models.evaluators.detection_evaluator import DetectionEvaluator +from qai_hub_models.models.yolov6.model import YoloV6 + +if __name__ == "__main__": + # Load dataset. + dataset = CocoDataset() + # Pass it to data loader + dataloader = DataLoader( + dataset, batch_size=1, shuffle=True, collate_fn=collate_fn, drop_last=False + ) + + # Load model + model = YoloV6.from_pretrained() + + # Instantiate the evaluator + evaluator = DetectionEvaluator( + image_height=640, + image_width=640, + nms_score_threshold=0.3, + nms_iou_threshold=0.5, + ) + + # Pass batches of data through the model. + evaluator.add_from_dataset(model, dataloader, eval_iterations=1000) + print(f"mAP: {evaluator.mAP:.1%}") diff --git a/scripts/quantize_ffnet.py b/scripts/quantize_ffnet.py new file mode 100644 index 00000000..e0786b03 --- /dev/null +++ b/scripts/quantize_ffnet.py @@ -0,0 +1,81 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse +from pathlib import Path + +import torch + +from qai_hub_models.models._shared.cityscapes_segmentation.app import ( + _load_cityscapes_loader, +) +from qai_hub_models.models.ffnet_40s_quantized.model import FFNet40SQuantizable +from qai_hub_models.models.ffnet_54s_quantized.model import FFNet54SQuantizable +from qai_hub_models.models.ffnet_78s_quantized.model import FFNet78SQuantizable + +FFNET_VARIANTS = { + "ffnet_40s": FFNet40SQuantizable, + "ffnet_54s": FFNet54SQuantizable, + "ffnet_78s": FFNet78SQuantizable, +} + + +""" +This is a sample script showing how to take a AIMET model zoo model without +pre-computed activations, and compute those activations using QAISM. + +This script assumes the model is added to QAISM, but is missing quantization parameters. +""" +if __name__ == "__main__": + # Args + parser = argparse.ArgumentParser() + parser.add_argument( + "--variant", + choices=FFNET_VARIANTS.keys(), + required=True, + help="FFNet variant", + ) + parser.add_argument( + "--cityscapes-path", + required=True, + help="Local path to Cityscapes (where leftImg8bit_trainvaltest.zip and gtFine_trainvaltest.zip are unzipped). Download from https://www.cityscapes-dataset.com/downloads/", + ) + parser.add_argument( + "--output-dir", + type=str, + default=None, + help="Directory where encodings should be stored. Defaults to ./build.", + ) + parser.add_argument( + "--output-name", + type=str, + default=None, + help="Encodings filename. Defaults to _encodings.", + ) + parser.add_argument( + "--num-iter", type=int, default=None, help="number of dataset iterations to use" + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="Name of the model folder to compute encodings.", + ) + args = parser.parse_args() + + torch.manual_seed(args.seed) + + # Load data loader + loader = _load_cityscapes_loader(args.cityscapes_path) + + # Load model (with trained unquantized weights and without encodings) + FFNetQuantizable_cls = FFNET_VARIANTS[args.variant] + model = FFNetQuantizable_cls.from_pretrained(aimet_encodings=None) + + # Quantize weights and activations + model.quantize(loader, num_samples=args.num_iter, requantize_model_weights=True) + + output_path = args.output_dir or str(Path() / "build") + output_name = args.output_name or f"{args.variant}_quantized_encodings" + model.quant_sim.save_encodings_to_json(output_path, output_name) diff --git a/scripts/tasks/changes.py b/scripts/tasks/changes.py new file mode 100644 index 00000000..dbbf2fcb --- /dev/null +++ b/scripts/tasks/changes.py @@ -0,0 +1,222 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os +from typing import Iterable, Set + +from .constants import ( + PY_PACKAGE_MODELS_ROOT, + PY_PACKAGE_RELATIVE_MODELS_ROOT, + PY_PACKAGE_RELATIVE_SRC_ROOT, + REPO_ROOT, +) +from .github import on_github +from .util import new_cd, run, run_and_get_output + + +def get_python_import_expression(filepath: str) -> str: + """ + Given a filepath, return the expression used to import the file + in other modules. + + For example, qiasm_model_zoo/models/trocr/model.py -> + qiasm_model_zoo.models.trocr.model + """ + + rel_path = os.path.relpath(filepath, PY_PACKAGE_RELATIVE_SRC_ROOT) + init_suffix = "/__init__.py" + if rel_path.endswith(init_suffix): + rel_path = rel_path[: -len(init_suffix)] + else: + rel_path = rel_path[: -len(".py")] + return rel_path.replace("/", ".") + + +def resolve_affected_models( + changed_files, + include_model: bool = True, + include_demo: bool = True, + include_export: bool = True, + include_tests: bool = True, + include_generated_tests: bool = True, +) -> Iterable[str]: + """ + Given a list of changed python files, performs a Depth-First Search (DFS) + over the qai_hub_models directory to figure out which directories were affected. + + The source nodes are the files that were directly changed, and there's + an edge from file A to file B if file B imports from file A. + + Note: If a zoo module is imported using a relative path, the dependency will not + be detected. Imports should be done using "from qai_stac_models." + in order to detect that current file depends on . + + changed_files: List of filepaths to files that changed. Paths are + relative to the root of this repository. + """ + seen: Set[str] = set() + while len(changed_files) > 0: + # Pop off stack + curr_file = changed_files.pop() + seen.add(curr_file) + + file_import = get_python_import_expression(curr_file) + grep_out = run_and_get_output( + f"grep -r --include='*.py' '{file_import}' {PY_PACKAGE_RELATIVE_SRC_ROOT}", + check=False, + ) + if grep_out.strip() == "": + continue + + # Determine which files depend on the current file, and thus + # also may be affected by the current change + # i.e. resolve the edges of the current node for DFS + dependent_files = set() + for grep_result in grep_out.strip().split("\n"): + dependent_file = grep_result.split(":")[0] + dependent_files.add(dependent_file) + + # Add new nodes to stack + for dependent_file in dependent_files: + if dependent_file not in seen: + changed_files.append(dependent_file) + + changed_models = set() + for f in seen: + if f.startswith(PY_PACKAGE_RELATIVE_MODELS_ROOT): + if not include_model and os.path.basename(f) == "model.py": + continue + if not include_export and os.path.basename(f) == "export.py": + continue + if not include_tests and os.path.basename(f) == "test.py": + continue + if ( + not include_generated_tests + and os.path.basename(f) == "test_generated.py" + ): + continue + if not include_demo and os.path.basename(f) == "demo.py": + continue + + model_name = f[len(PY_PACKAGE_RELATIVE_MODELS_ROOT) :].split("/")[1] + if os.path.exists( + os.path.join(PY_PACKAGE_MODELS_ROOT, model_name, "model.py") + ): + changed_models.add(model_name) + return changed_models + + +def get_changed_files_in_package() -> Iterable[str]: + """ + Returns the list of changed files in zoo based on git tracking. + """ + with new_cd(REPO_ROOT): + os.makedirs("build/model-zoo/", exist_ok=True) + changed_files_path = "build/changed-qaihm-files.txt" + if not on_github(): + run( + f"git diff $(git merge-base --fork-point origin/main) --name-only > {changed_files_path}" + ) + if os.path.exists(changed_files_path): + with open(changed_files_path, "r") as f: + return [ + file + for file in f.read().split("\n") + if file.startswith(PY_PACKAGE_RELATIVE_SRC_ROOT) + and file.endswith(".py") + ] + return [] + + +def get_models_to_test_export() -> Iterable[str]: + """ + The models for which to test export (i.e. compilation to .tflite). + Current heuristic is to only do this for models where model.py or + export.py changed. + """ + return get_changed_models( + include_model=True, + include_demo=False, + include_export=True, + include_tests=False, + include_generated_tests=True, + ) + + +def get_models_with_export_file_changes() -> Iterable[str]: + """ + The models for which to test export (i.e. compilation to .tflite). + Current heuristic is to only do this for models where model.py or + export.py changed. + """ + return get_changed_models( + include_model=False, + include_demo=False, + include_export=True, + include_tests=False, + include_generated_tests=True, + ) + + +def get_models_with_changed_definitions() -> Iterable[str]: + """ + The models for which to run non-generated (demo / model) tests. + """ + return get_changed_models( + include_model=True, + include_demo=False, + include_export=False, + include_tests=False, + include_generated_tests=False, + ) + + +def get_models_to_run_general_tests() -> Iterable[str]: + """ + The models for which to run non-generated (demo / model) tests. + """ + return get_changed_models( + include_model=True, + include_demo=True, + include_export=False, + include_tests=True, + include_generated_tests=False, + ) + + +def get_changed_models( + include_model: bool = True, + include_demo: bool = True, + include_export: bool = True, + include_tests: bool = True, + include_generated_tests: bool = True, +) -> Iterable[str]: + """ + Resolve which models within zoo have changed to figure which ones need to be tested. + + First figures out which files have changed and then does a recursive search + through all files that import from changed files. Then filters the final list + to model directories to know which ones that need to be tested. + + Returns a list of model IDs (folder names) that have changed. + """ + return resolve_affected_models( + get_changed_files_in_package(), + include_model, + include_demo, + include_export, + include_tests, + include_generated_tests, + ) + + +def get_all_models() -> Iterable[str]: + """ + Resolve model IDs (folder names) of all models in QAIHM. + """ + model_names = set() + for model_name in os.listdir(PY_PACKAGE_MODELS_ROOT): + if os.path.exists(os.path.join(PY_PACKAGE_MODELS_ROOT, model_name, "model.py")): + model_names.add(model_name) + return model_names diff --git a/scripts/tasks/constants.py b/scripts/tasks/constants.py new file mode 100644 index 00000000..62ac36aa --- /dev/null +++ b/scripts/tasks/constants.py @@ -0,0 +1,34 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os + +from .util import run_and_get_output + +# Env Variable +STORE_ROOT_ENV_VAR = "QAIHM_STORE_ROOT" + +# Repository +REPO_ROOT = run_and_get_output("git rev-parse --show-toplevel") +VENV_PATH = os.path.join(REPO_ROOT, "qaihm-dev") +BUILD_ROOT = os.path.join(REPO_ROOT, "build") + +# Dependent Wheels +QAI_HUB_LATEST_PATH = os.path.join(BUILD_ROOT, "qai_hub-latest-py3-none-any.whl") + +# Package paths relative to repository root +PY_PACKAGE_RELATIVE_SRC_ROOT = "qai_hub_models" +PY_PACKAGE_RELATIVE_MODELS_ROOT = os.path.join(PY_PACKAGE_RELATIVE_SRC_ROOT, "models") + +# Absolute package paths +PY_PACKAGE_INSTALL_ROOT = REPO_ROOT +PY_PACKAGE_SRC_ROOT = os.path.join( + PY_PACKAGE_INSTALL_ROOT, PY_PACKAGE_RELATIVE_SRC_ROOT +) +PY_PACKAGE_LOCAL_CACHE = os.environ.get( + STORE_ROOT_ENV_VAR, os.path.join(os.path.expanduser("~"), ".qaihm") +) +PY_PACKAGE_MODELS_ROOT = os.path.join( + PY_PACKAGE_INSTALL_ROOT, PY_PACKAGE_RELATIVE_MODELS_ROOT +) diff --git a/scripts/tasks/github.py b/scripts/tasks/github.py new file mode 100644 index 00000000..1ae9e038 --- /dev/null +++ b/scripts/tasks/github.py @@ -0,0 +1,29 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os + +from .util import Colors, echo + + +def on_github(): + return "GITHUB_ACTION" in os.environ + + +def start_group(group_name): + if on_github(): + echo(f"::group::{group_name}") + else: + echo(f"{Colors.GREEN}{group_name}{Colors.OFF}") + + +def end_group(): + if on_github(): + echo("::endgroup::") + + +def set_github_output(key, value): + if on_github(): + with open(os.environ["GITHUB_OUTPUT"], "a") as fh: + print(f"{key}={value}", file=fh) diff --git a/scripts/tasks/plan.py b/scripts/tasks/plan.py new file mode 100644 index 00000000..fb707672 --- /dev/null +++ b/scripts/tasks/plan.py @@ -0,0 +1,171 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import datetime +import functools +import re +import time +from typing import Callable, Dict, List, Optional, Tuple + +from .task import Task +from .util import echo + +ALL_TASKS: List[str] = [] +PUBLIC_TASKS: List[str] = [] +TASK_DEPENDENCIES: Dict[str, List[str]] = {} +TASK_DESCRIPTIONS: Dict[str, str] = {} +SUMMARIZERS: List[str] = [] + + +def task(func): + ALL_TASKS.append(func.__name__) + return func + + +def public_task(description: str): + def add_task(func): + PUBLIC_TASKS.append(func.__name__) + TASK_DESCRIPTIONS[func.__name__] = description + task(func) + return func + + return add_task + + +def depends(deps: List[str]): + def add_dep(func): + TASK_DEPENDENCIES[func.__name__] = deps + return func + + return add_dep + + +def summarizer(func): + SUMMARIZERS.append(func.__name__) + return func + + +class Step: + """A named Task within a Plan.""" + + def __init__(self, step_id: str, task: Task): + self._step_id = step_id + self._task = task + + def __repr__(self) -> str: + return self._step_id + + @property + def step_id(self) -> str: + return self._step_id + + @property + def task(self) -> Task: + return self._task + + +class Plan: + """An ordered list of Tasks to execute.""" + + _steps: List[Step] + _skips: List[re.Pattern] + _plan_duration = Optional[datetime.timedelta] + _step_durations: List[Tuple[str, datetime.timedelta]] + + def __init__(self) -> None: + self._steps = [] + self._skips = [] + self._plan_duration = None + self._step_durations = [] + + def add_step(self, step_id: str, task: Task) -> str: + if self.count_step(step_id) > 10: + raise RuntimeError( + f"Refusing to add step '{step_id}' more than 10 times. Perhaps the planner is in an infinite loop?" + ) + self._steps.append(Step(step_id, task)) + return step_id + + def count_step(self, step_id: str) -> int: + step_count = 0 + for s in self._steps: + if s.step_id == step_id: + step_count += 1 + return step_count + + def for_each(self, func: Callable[[str, Task], None]) -> None: + for s in self._steps: + func(s.step_id, s.task) + + def has_step(self, step_id: str) -> bool: + for s in self._steps: + if s.step_id == step_id: + return True + return False + + def is_skipped(self, step_id: str) -> bool: + return any([r.match(step_id) for r in self._skips]) + + def print(self) -> None: + for step in self._steps: + step_msg = step.step_id + if not step.task.does_work(): + step_msg += " (no-op)" + if self.is_skipped(step.step_id): + step_msg += " (skipped)" + print(step_msg) + + def print_report(self) -> None: + """Print a report on how long steps in the plan took.""" + + if len(self._step_durations) < 1: + return + + step_id_lens = [len(s) for s, d in self._step_durations] + max_step_id_len = functools.reduce(lambda a, b: a if a > b else b, step_id_lens) # type: ignore + print(f"{'Step':^{max_step_id_len}} {'Duration':^14}") + print(f"{'-':-^{max_step_id_len}} {'-':-^14}") + for step_id, duration in self._step_durations: + print(f"{step_id:<{max_step_id_len}} {str(duration):<14}") + if self._plan_duration: + print(f"{'-':-^{max_step_id_len}} {'-':-^14}") + print(f"{'Total':<{max_step_id_len}} {str(self._plan_duration):<14}") + + def run(self) -> None: + start_time = time.monotonic() + + def run_task(step_id: str, task: Task) -> None: + if self.is_skipped(step_id): + echo(f"Skipping {step_id}") + else: + step_start_time = time.monotonic() + + caught: Optional[Exception] = None + try: + task.run() + except Exception as ex: + caught = ex + step_end_time = time.monotonic() + if task.does_work(): + self._step_durations.append( + ( + step_id, + datetime.timedelta(seconds=step_end_time - step_start_time), + ) + ) + if caught is not None: + raise caught + + try: + self.for_each(run_task) + finally: + end_time = time.monotonic() + self._plan_duration = datetime.timedelta(seconds=end_time - start_time) + + def skip(self, pattern: str) -> None: + self._skips.append(re.compile(pattern)) + + @property + def steps(self) -> List[str]: + return [s.step_id for s in self._steps] diff --git a/scripts/tasks/release.py b/scripts/tasks/release.py new file mode 100644 index 00000000..00289045 --- /dev/null +++ b/scripts/tasks/release.py @@ -0,0 +1,260 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +import pathlib +import shutil +from typing import Dict, Optional + +from .constants import BUILD_ROOT +from .task import CompositeTask +from .venv import ( + CreateVenvTask, + RunCommandsTask, + RunCommandsWithVenvTask, + SyncLocalQAIHMVenvTask, +) + +qaihm_path = pathlib.Path(__file__).parent.parent.parent / "qai_hub_models" +version_path = qaihm_path / "_version.py" +version_locals: Dict[str, str] = {} +exec(open(version_path).read(), version_locals) +__version__ = version_locals["__version__"] + +DEFAULT_RELEASE_DIRECTORY = "./build/release" +RELEASE_DIRECTORY_VARNAME = "QAIHM_RELEASE_DIR" +REMOTE_REPOSITORY_URL_VARNAME = "QAIHM_REMOTE_URL" +PYPI_VARNAME = "QAIHM_PYPI_URL" + + +def _get_release_dir(): + """Get the path to the release directory.""" + return os.environ.get(RELEASE_DIRECTORY_VARNAME, DEFAULT_RELEASE_DIRECTORY) + + +def _get_release_repository_dir(): + """Get the path to the repository root in the release directory.""" + return os.path.join(_get_release_dir(), "repository") + + +def _get_wheel_dir(): + """Get the path to the wheels folder in the release directory.""" + return os.path.join(_get_release_dir(), "wheel") + + +class ReleaseTask(CompositeTask): + """ + Create a public version of the repository. + """ + + def __init__( + self, + venv: Optional[str], + python_executable: Optional[str], + build_repository: bool = True, + push_repository: bool = True, + build_wheel: bool = True, + publish_wheel: bool = True, + ): + # Verify environment variables first + if push_repository and REMOTE_REPOSITORY_URL_VARNAME not in os.environ: + raise ValueError( + f"Specify a remote repository by setting env var '${REMOTE_REPOSITORY_URL_VARNAME}'" + ) + if publish_wheel and PYPI_VARNAME not in os.environ: + raise ValueError(f"Specify a pypi by setting env var '${PYPI_VARNAME}'") + + # Do the release + tasks = [] + if build_repository: + tasks.append(BuildPublicRepositoryTask(venv, python_executable)) + if build_wheel: + tasks.append(BuildWheelTask(venv, python_executable)) + if push_repository: + tasks.append(PushRepositoryTask()) + if publish_wheel: + tasks.append(PublishWheelTask(venv, python_executable)) + + super().__init__(f"Release QAIHM {__version__}", tasks) + + +class BuildPublicRepositoryTask(CompositeTask): + """ + Create a public version of the repository. + """ + + def __init__(self, venv: Optional[str], python_executable: Optional[str]): + tasks = [] + + if not venv: + # Create Venv + venv = os.path.join(BUILD_ROOT, "test", "release_venv") + tasks.append(CreateVenvTask(venv, python_executable)) + tasks.append(SyncLocalQAIHMVenvTask(venv, ["dev"], include_aimet=False)) + + # Setup output directories + release_dir = _get_release_dir() + repo_output_dir = _get_release_repository_dir() + if os.path.exists(repo_output_dir): + shutil.rmtree(repo_output_dir) + + # Build Public Repository + tasks.append( + RunCommandsWithVenvTask( + "Run Release Script", + venv=venv, + env=os.environ, + commands=[ + f"python qai_hub_models/scripts/build_release.py --output-dir {repo_output_dir}" + ], + ) + ) + + super().__init__(f"Build Public Repository in: {release_dir}", tasks) + + +class PushRepositoryTask(CompositeTask): + """ + Publishes the repository in the provided release directory. + If no directory is provided, assumes the release directory defined above. + """ + + def __init__(self): + tasks = [] + + # Remote URL + remote_url = os.environ.get(REMOTE_REPOSITORY_URL_VARNAME, None) + if not remote_url: + raise ValueError( + f"Specify a remote by setting envrionment variable '${REMOTE_REPOSITORY_URL_VARNAME}'" + ) + + env = os.environ.copy() + env["QAIHM_TAG"] = f"v{__version__}" + commands = [ + "git init", + ] + + # Git Credential Setup (Optional) + if "QAIHM_GIT_NAME" in env: + commands.append('git config --local user.name "${QAIHM_GIT_NAME}"') + if "QAIHM_REPO_GH_EMAIL" in env: + commands.append('git config --local user.email "${QAIHM_REPO_GH_EMAIL}"') + if "QAIHM_GIT_CRED_HELPER" in env: + commands.append( + 'git config --local credential.helper "!f() { sleep 1; ${QAIHM_GIT_CRED_HELPER}; }; f"' + ) + + commands += [ + # Fetch origin + f"git remote add origin {remote_url}", + "git fetch origin", + # Checkout and commit main + "git reset origin/main", # this checks out main "symbolically" (no on-disk source tree changes) + "git add -u", # Remove any deleted files from the index + "git add -f *", + """git commit -m "$QAIHM_TAG + +Signed-off-by: $QAIHM_REPO_GH_SIGN_OFF_NAME <$QAIHM_REPO_GH_EMAIL>" """, + # Verify Tag does not exist + "if [ $(git tag -l '$QAIHM_TAG') ];" + "then echo 'Tag $QAIHM_TAG already exists. Aborting release.';" + "exit 1;" + "fi", + # Push to remote + "git push -u origin HEAD:main", + "git tag $QAIHM_TAG", + "git push --tags", + ] + + # Push Release + tasks.append( + RunCommandsTask( + "Push Release", + env=env, + cwd=_get_release_repository_dir(), + commands=commands, + ) + ) + + super().__init__(f"Push Release to {remote_url}", tasks) + + +class BuildWheelTask(CompositeTask): + """ + Creates a wheel from the provided directory. + If no directory is provided, assumes the release directory defined above. + """ + + def __init__(self, venv: Optional[str], python_executable: Optional[str]): + tasks = [] + + if not venv: + # Create Venv + venv = os.path.join(BUILD_ROOT, "test", "release_venv") + tasks.append(CreateVenvTask(venv, python_executable)) + tasks.append(SyncLocalQAIHMVenvTask(venv, ["dev"], include_aimet=False)) + + # Build Wheel + repo_dir = _get_release_repository_dir() + wheel_dir = _get_wheel_dir() + relative_wheel_dir = os.path.relpath(wheel_dir, repo_dir) + + if os.path.exists(wheel_dir): + shutil.rmtree(wheel_dir) + + tasks.append( + RunCommandsWithVenvTask( + "Build Wheel", + venv=venv, + env=os.environ, + commands=[ + f"cd {repo_dir} && " + f"python setup.py " + f"build --build-base {relative_wheel_dir} " + f"egg_info --egg-base {relative_wheel_dir} " + f"bdist_wheel --dist-dir {relative_wheel_dir}", + ], + ) + ) + + super().__init__(f"Build Wheel to: {wheel_dir}", tasks) + + +class PublishWheelTask(CompositeTask): + """ + Releases a wheel from the provided directory. + If no directory is provided, assumes the release directory defined above. + """ + + def __init__(self, venv: Optional[str], python_executable: Optional[str]): + tasks = [] + + if not venv: + # Create Venv + venv = os.path.join(BUILD_ROOT, "test", "release_venv") + tasks.append(CreateVenvTask(venv, python_executable)) + tasks.append(SyncLocalQAIHMVenvTask(venv, ["dev"], include_aimet=False)) + + pypi = os.environ.get(PYPI_VARNAME, None) + if not pypi: + raise ValueError( + f"Set desired pypi via environment variable '${PYPI_VARNAME}'" + ) + + tasks.append( + RunCommandsWithVenvTask( + "Build Wheel", + venv=venv, + env=os.environ, + commands=[ + "pip install twine", + f"twine upload --repository-url {pypi} {os.path.join(_get_wheel_dir(), '*.whl')}", + ], + ) + ) + + super().__init__(f"Releasing Wheels in {pypi}", tasks) diff --git a/scripts/tasks/task.py b/scripts/tasks/task.py new file mode 100644 index 00000000..ae4cc754 --- /dev/null +++ b/scripts/tasks/task.py @@ -0,0 +1,261 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import os +import subprocess +from abc import ABC, abstractmethod +from pathlib import Path +from typing import Callable, Dict, List, Optional, Union + +from .github import end_group, start_group +from .util import BASH_EXECUTABLE, default_parallelism, echo, have_root + +REPO_ROOT = Path(__file__).parent.parent.parent +TEST_RESULTS_DIR = os.path.join(REPO_ROOT, "build", "test-results") +COVERAGE_DIR = os.path.join(REPO_ROOT, "build", "test-coverage") + + +class Task(ABC): + def __init__(self, group_name: Optional[str]) -> None: + self.group_name = group_name + + @abstractmethod + def does_work(self) -> bool: + """ + Return True if this task actually does something (e.g., runs commands). + """ + + @abstractmethod + def run_task(self) -> None: + """ + Entry point for implementations: perform the task's action. + """ + + def run(self) -> None: + """ + Entry point for callers: perform any startup/teardown tasks and call run_task. + """ + if self.group_name: + start_group(self.group_name) + self.run_task() + if self.group_name: + end_group() + + +class FailTask(Task): + """A Task that unconditionally fails.""" + + def __init__(self, message: str) -> None: + super().__init__(group_name=None) + self._message = message + + def does_work(self) -> bool: + return True + + def run_task(self) -> None: + raise RuntimeError(self._message) + + +class ListTasksTask(Task): + def __init__(self, tasks: List[str]) -> None: + super().__init__(group_name=None) + self.tasks = tasks + + def does_work(self) -> bool: + return False + + def run_task(self) -> None: + from . import plan + + for task_name in sorted(self.tasks): + print(task_name) + description = plan.TASK_DESCRIPTIONS.get(task_name, None) + if description: + print(f" {description}") + + +class NoOpTask(Task): + """A Task that does nothing.""" + + def __init__(self, group_name: Optional[str] = None) -> None: + super().__init__(group_name=group_name) + + def does_work(self) -> bool: + return False + + def run_task(self) -> None: + pass + + +class RunCommandsTask(Task): + """ + A Task that runs a list of commands using the shell. + """ + + def __init__( + self, + group_name: Optional[str], + commands: Union[List[str], str], + as_root: bool = False, + env: Optional[Dict[str, str]] = None, + cwd: Optional[str] = None, + ) -> None: + super().__init__(group_name) + if isinstance(commands, str): + self.commands = [commands] + else: + self.commands = commands + + if as_root and not have_root(): + self.commands = [f"sudo {c}" for c in commands] + + self.cwd = cwd + self.env = env + + def does_work(self) -> bool: + return True + + def run_task(self) -> None: + for command in self.commands: + self._run_command(command) + + def _run_command(self, command: str) -> None: + echo(f"bnt $ {command}") + subprocess.run( + command, + shell=True, + check=True, + cwd=self.cwd, + env=self.env, + executable=BASH_EXECUTABLE, + ) + + +class RunCommandsWithVenvTask(RunCommandsTask): + """ + A Task that runs a list of commands using the shell with a specific Python + virtual environment enabled. + """ + + def __init__( + self, + group_name: Optional[str], + venv: Optional[str], + commands: Union[List[str], str], + env: Optional[Dict[str, str]] = None, + ) -> None: + super().__init__(group_name, commands, env=env) + self.venv = venv + + def run_task(self) -> None: + for command in self.commands: + if self.venv is not None: + venv_command = f"source {self.venv}/bin/activate && {command}" + echo(f"bnt $ {venv_command}") + subprocess.run( + venv_command, + shell=True, + check=True, + executable=BASH_EXECUTABLE, + env=self.env, + ) + else: + self._run_command(command) + + +class PyTestTask(RunCommandsWithVenvTask): + """A task to run pytest""" + + def __init__( + self, + group_name: Optional[str], + venv: Optional[str], + files_or_dirs: str, + report_name: str, + ignore: Optional[Union[str, List[str]]] = None, + omit: Optional[Union[str, List[str]]] = None, + parallel: Optional[Union[bool, int]] = None, + extra_args: Optional[str] = None, + env: Optional[Dict[str, str]] = None, + skip_coverage: bool = False, + ) -> None: + pytest_options = f"--name={report_name}" + + if omit is not None: + pytest_options += f" --omit={omit}" + + if ignore: + if isinstance(ignore, str): + ignore = [ignore] + ignores = [f"--ignore={i}" for i in ignore] + pytest_options += f" {' '.join(ignores)}" + + if parallel: + if isinstance(parallel, bool): + parallel = default_parallelism() + pytest_options += f" -n {parallel}" + # Don't run tests that don't support parallelism + pytest_options += ' -m "not serial"' + + pytest_options += " -ra -vvv" + + if extra_args: + pytest_options += f" {extra_args}" + + if skip_coverage: + pytest_options += " --no-cov" + + pytest_options += f" {files_or_dirs}" + + command = f"{REPO_ROOT}/scripts/util/pytest_with_coverage.sh {pytest_options} " + + super().__init__(group_name, venv, command, env) + + +class CompositeTask(Task): + """ + A Task composed of a list of other Tasks. + """ + + def __init__(self, group_name: Optional[str], tasks: List[Task]) -> None: + super().__init__(group_name) + self.tasks = tasks + + def does_work(self) -> bool: + return any([t.does_work() for t in self.tasks]) + + def run_task(self) -> None: + for task in self.tasks: + task.run() + + +class ConditionalTask(Task): + """ + A Task that runs one of two alternatives, depending on the result of + a predicate function call. + """ + + def __init__( + self, + group_name: Optional[str], + condition: Callable[[], bool], + true_task: Task, + false_task: Task, + ) -> None: + super().__init__(group_name) + self.condition = condition + self.true_task = true_task + self.false_task = false_task + + def does_work(self) -> bool: + if self.condition(): + return self.true_task.does_work() + else: + return self.false_task.does_work() + + def run_task(self) -> None: + if self.condition(): + self.true_task.run() + else: + self.false_task.run() diff --git a/scripts/tasks/test.py b/scripts/tasks/test.py new file mode 100644 index 00000000..84246a4c --- /dev/null +++ b/scripts/tasks/test.py @@ -0,0 +1,253 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +from tempfile import TemporaryDirectory +from typing import Iterable, Optional + +from .constants import ( + BUILD_ROOT, + PY_PACKAGE_MODELS_ROOT, + PY_PACKAGE_SRC_ROOT, + STORE_ROOT_ENV_VAR, +) +from .task import CompositeTask, PyTestTask, RunCommandsTask +from .util import can_support_aimet, model_needs_aimet +from .venv import ( + CreateVenvTask, + SyncLocalQAIHMVenvTask, + SyncModelRequirementsVenvTask, + SyncModelVenvTask, +) + + +class PyTestUtilsTask(PyTestTask): + """ + Pytest utils. + """ + + def __init__(self, venv: Optional[str]): + super().__init__( + "Test Utils", + venv=venv, + report_name="utils-tests", + files_or_dirs=f"{PY_PACKAGE_SRC_ROOT}/test/test_utils", + parallel=True, + ) + + +class PyTestScriptsTask(PyTestTask): + """ + Pytest scripts. + """ + + def __init__(self, venv: Optional[str]): + super().__init__( + group_name="Test Scripts", + venv=venv, + report_name="scripts-tests", + files_or_dirs=f"{PY_PACKAGE_SRC_ROOT}/scripts", + parallel=True, + ) + + +class PyTestE2eHubTask(CompositeTask): + """ + Runs e2e tests on Hub that's not specific to any model. + """ + + def __init__(self, venv: Optional[str]): + # Create temporary directory for storing cloned & downloaded test artifacts. + with TemporaryDirectory() as tmpdir: + env = os.environ.copy() + env[STORE_ROOT_ENV_VAR] = tmpdir + + # Standard Test Suite + tasks = [ + PyTestTask( + group_name="E2e on Hub", + venv=venv, + report_name="e2e-on-hub", + files_or_dirs=f"{PY_PACKAGE_SRC_ROOT}/test/e2e/", + parallel=False, + env=env, + ) + ] + super().__init__("E2e on Hub Tests", tasks) + + +class PyTestModelTask(CompositeTask): + """ + Run all tests for a single model. + """ + + def __init__( + self, + model_name: str, + python_executable: str, + test_export: bool, + venv: str + | None = None, # If None, creates a fresh venv for each model instead of using 1 venv for all models. + use_shared_cache=False, # If True, uses a shared cache rather than the global QAIHM cache. + export_func: str = "compile", + skip_standard_unit_test: bool = False, + ): + tasks = [] + + if model_needs_aimet(model_name) and not can_support_aimet(): + tasks.append( + RunCommandsTask( + f"Skip Model {model_name}", + f'echo "Skipping Tests For Model {model_name} -- AIMET is required, but AIMET is not supported on this platform."', + ) + ) + else: + # Create test environment + if not venv: + model_venv = os.path.join(BUILD_ROOT, "test", "model_envs", model_name) + tasks.append(CreateVenvTask(model_venv, python_executable)) + # Creates a new environment from scratch + tasks.append( + SyncModelVenvTask(model_name, model_venv, include_dev_deps=True) + ) + else: + model_venv = venv + # Only install requirements.txt into existing venv + tasks.append( + SyncModelRequirementsVenvTask( + model_name, model_venv, pip_force_install=False + ) + ) + + # Create temporary directory for storing cloned & downloaded test artifacts. + with TemporaryDirectory() as tmpdir: + env = os.environ.copy() + if not use_shared_cache: + env[STORE_ROOT_ENV_VAR] = tmpdir + + # Standard Test Suite + model_dir = os.path.join(PY_PACKAGE_MODELS_ROOT, model_name) + model_test_without_export = os.path.join(model_dir, "test.py") + if ( + os.path.exists(model_test_without_export) + and not skip_standard_unit_test + ): + tasks.append( + PyTestTask( + group_name=f"Model: {model_name}", + venv=model_venv, + report_name=f"model-{model_name}-tests", + files_or_dirs=model_test_without_export, + parallel=False, + extra_args="-s", + env=env, + ) + ) + + # Export Test Suite + if test_export and os.path.isfile( + os.path.join(model_dir, "test_generated.py") + ): + tasks.append( + PyTestTask( + group_name=f"Model Export: ({model_name})", + venv=model_venv, + report_name=f"model-export-{model_name}-tests", + files_or_dirs=model_dir, + parallel=False, + extra_args=f"-s -m {export_func}", + env=env, + ) + ) + + if not venv: + tasks.append( + RunCommandsTask( + f"Remove virtual environment at {model_venv}", + f"rm -rf {model_venv}", + ) + ) + + super().__init__(f"Model Tests: {model_name}", [task for task in tasks]) + + +class PyTestModelsTask(CompositeTask): + """ + Run tests for the provided set of models. + """ + + def __init__( + self, + python_executable: str, + models_for_testing: Iterable[str], + models_to_test_export: Iterable[str], + base_test_venv: str | None = None, # Env with QAIHM installed + venv_for_each_model: bool = True, # Create a fresh venv for each model instead of using the base test venv instead. + use_shared_cache: bool = False, # Use the global QAIHM cache rather than a temporary one for tests. + export_func: str = "compile", + skip_standard_unit_test: bool = False, + ): + tasks = [] + + # Whether or not export tests will be run asynchronously + # (submit all jobs for all models at once, rather than one model at a time). + test_hub_async: bool = os.environ.get("TEST_HUB_ASYNC", 0) + + if test_hub_async and export_func == "compile": + # Clean previous (cached) compile test jobs. + tasks.append( + RunCommandsTask( + "Delete stored compile jobs from past test runs.", + f"> {os.environ['COMPILE_JOBS_FILE']}", + ) + ) + + has_venv = base_test_venv is not None + if not has_venv and (not venv_for_each_model or test_hub_async): + # Create Venv + base_test_venv = os.path.join(BUILD_ROOT, "test", "base_venv") + tasks.append(CreateVenvTask(base_test_venv, python_executable)) + tasks.append( + SyncLocalQAIHMVenvTask(base_test_venv, ["dev"], include_aimet=False) + ) + + print(f"Tests to be run for directories: {models_for_testing}") + for model_name in models_for_testing: + # Run standard test suite for this model. + tasks.append( + PyTestModelTask( + model_name, + python_executable, + model_name in models_to_test_export, + venv=None if venv_for_each_model else base_test_venv, + use_shared_cache=use_shared_cache, + export_func=export_func, + skip_standard_unit_test=skip_standard_unit_test, + ) + ) + + if test_hub_async and export_func == "compile": + # Wait for compile test jobs to finish; verify success + tasks.append( + PyTestTask( + group_name="Verify Compile Jobs Success", + venv=base_test_venv, + report_name="compile-jobs-success", + files_or_dirs=os.path.join( + PY_PACKAGE_SRC_ROOT, "test", "test_async_compile_jobs.py" + ), + parallel=False, + extra_args="-s", + ) + ) + + if not has_venv: + # Cleanup venv + tasks.append( + RunCommandsTask(base_test_venv, f"rm -rf {base_test_venv}") + ) + + super().__init__("All Per-Model Tests", [task for task in tasks]) diff --git a/scripts/tasks/util.py b/scripts/tasks/util.py new file mode 100644 index 00000000..5f6bc438 --- /dev/null +++ b/scripts/tasks/util.py @@ -0,0 +1,131 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import contextlib +import os +import platform +import subprocess +import sys + + +class Colors: + GREEN = "\033[0;32m" + RED = "\033[0;31m" + YELLOW = "\033[0;33m" + OFF = "\033[0m" + + +@contextlib.contextmanager +def new_cd(x): + d = os.getcwd() + + # This could raise an exception, but it's probably + # best to let it propagate and let the caller + # deal with it, since they requested x + os.chdir(x) + + try: + yield + + finally: + # This could also raise an exception, but you *really* + # aren't equipped to figure out what went wrong if the + # old working directory can't be restored. + os.chdir(d) + + +def can_support_aimet(platform: str = sys.platform) -> bool: + return ( + platform == "linux" or platform == "linux2" + ) and sys.version_info.minor == 8 # python 3.8 only + + +def model_needs_aimet(model_name: str) -> bool: + return "quantized" in model_name.lower() + + +def default_parallelism() -> int: + """A conservative number of processes across which to spread pytests desiring parallelism.""" + from .github import on_github # avoid circular import + + cpu_count = os.cpu_count() + if not cpu_count: + return 1 + + # In CI, saturate the machine + if on_github(): + return cpu_count + + # When running locally, leave a little CPU for other uses + return max(1, int(cpu_count - 2)) + + +# Convenience function for printing to stdout without buffering. +def echo(value, **args): + print(value, flush=True, **args) + + +def have_root() -> bool: + return os.geteuid() == 0 + + +def on_linux(): + return platform.uname().system == "Linux" + + +def on_mac(): + return platform.uname().system == "Darwin" + + +def process_output(command): + return command.stdout.decode("utf-8").strip() + + +def run(command): + return subprocess.run(command, shell=True, check=True, executable=BASH_EXECUTABLE) + + +def run_and_get_output(command, check=True): + return process_output( + subprocess.run( + command, + stdout=subprocess.PIPE, + shell=True, + check=check, + executable=BASH_EXECUTABLE, + ) + ) + + +def run_with_venv(venv, command, env=None): + if venv is not None: + subprocess.run( + f"source {venv}/bin/activate && {command}", + shell=True, + check=True, + executable=BASH_EXECUTABLE, + env=env, + ) + else: + run(command) + + +def run_with_venv_and_get_output(venv, command): + if venv is not None: + return process_output( + subprocess.run( + f"source {venv}/bin/activate && {command}", + stdout=subprocess.PIPE, + shell=True, + check=True, + executable=BASH_EXECUTABLE, + ) + ) + else: + return run_and_get_output(command) + + +BASH_EXECUTABLE = process_output( + subprocess.run("which bash", stdout=subprocess.PIPE, shell=True, check=True) +) diff --git a/scripts/tasks/venv.py b/scripts/tasks/venv.py new file mode 100644 index 00000000..d3634f5d --- /dev/null +++ b/scripts/tasks/venv.py @@ -0,0 +1,164 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +from __future__ import annotations + +import os +import subprocess +from typing import Iterable + +from .constants import ( + PY_PACKAGE_INSTALL_ROOT, + PY_PACKAGE_MODELS_ROOT, + PY_PACKAGE_SRC_ROOT, + QAI_HUB_LATEST_PATH, + REPO_ROOT, +) +from .task import CompositeTask, RunCommandsTask, RunCommandsWithVenvTask +from .util import can_support_aimet, model_needs_aimet + + +class CreateVenvTask(RunCommandsTask): + def __init__(self, venv_path: str, python_executable: str) -> None: + super().__init__( + f"Creating virtual environment at {venv_path}", + f"source {REPO_ROOT}/scripts/util/env_create.sh --python={python_executable} --venv={venv_path} --no-sync", + ) + + +def is_package_installed(package_name: str, venv_path: str | None = None) -> bool: + if venv_path is not None: + command = f'. {venv_path}/bin/activate && python -c "import {package_name}"' + else: + command = f'python -c "import {package_name}"' + + try: + subprocess.check_call(command, shell=True) + return True + except subprocess.CalledProcessError: + return False + + +class SyncLocalQAIHMVenvTask(CompositeTask): + """Sync the provided environment with local QAIHM and the provided extras.""" + + def __init__( + self, + venv_path: str | None, + extras: Iterable[str] = [], + include_aimet: bool = can_support_aimet(), + ) -> None: + tasks = [] + + # Install AIMET first to avoid installing two versions of torch (one from AIMET, one from QAIHM). + if include_aimet: + if can_support_aimet(): + if is_package_installed("aimet_torch", venv_path): + tasks.append( + RunCommandsTask( + group_name="AIMET Installation Warning", + commands=[ + 'echo "WARNING: Skipping AIMET Install because it is already installed."' + ], + ) + ) + else: + tasks.append( + RunCommandsWithVenvTask( + group_name="Install AIMET", + venv=venv_path, + commands=[ + f'"{PY_PACKAGE_SRC_ROOT}/scripts/install_aimet_cpu.sh"' + ], + ) + ) + + else: + tasks.append( + RunCommandsTask( + group_name="AIMET Installation Warning", + commands=[ + 'echo "WARNING: Skipping AIMET Install because it is not supported on this platform."' + ], + ) + ) + + qai_hub_wheel_url = os.environ.get("QAI_HUB_WHEEL_URL", None) + if not is_package_installed("qai_hub", venv_path): + if qai_hub_wheel_url is None: + if os.path.exists(QAI_HUB_LATEST_PATH): + qai_hub_wheel_url = QAI_HUB_LATEST_PATH + + if qai_hub_wheel_url: + # Install local QAI Hub wheel if it exists, instead of pulling it from PyPi. + tasks.append( + RunCommandsWithVenvTask( + group_name="Install QAI Hub (Pre-Release)", + venv=venv_path, + commands=[f'pip install "{qai_hub_wheel_url}"'], + ) + ) + + extras_str = f"[{','.join(extras)}]" if extras else "" + tasks.append( + RunCommandsWithVenvTask( + group_name=f"Install QAIHM{extras_str}", + venv=venv_path, + commands=[ + f'pip install -e "{PY_PACKAGE_INSTALL_ROOT}{extras_str}" -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.13/index.html' + ], + ) + ) + + super().__init__( + f"Create Local QAIHM{extras_str} Virtual Environment at {venv_path}", + [task for task in tasks], + ) + + +class SyncModelVenvTask(SyncLocalQAIHMVenvTask): + """Sync the provided environment with local QAIHM and the provided extras needed for the model_name.""" + + def __init__( + self, + model_name, + venv_path, + include_dev_deps: bool = False, + only_model_requirements: bool = False, + ) -> None: + extras = [] + if include_dev_deps: + extras.append("dev") + if os.path.exists( + os.path.join(PY_PACKAGE_MODELS_ROOT, model_name, "requirements.txt") + ): + extras.append(model_name) + + super().__init__( + venv_path, + extras, + model_needs_aimet(model_name), + ) + + +class SyncModelRequirementsVenvTask(RunCommandsWithVenvTask): + """Sync the provided environment with requirements from model_name's requirements.txt. + Will not re-install QAI Hub Models. Intended for speeding up CI compared to building an entirely new env for each model.""" + + def __init__(self, model_name, venv_path, pip_force_install: bool = True) -> None: + requirements_txt = os.path.join( + PY_PACKAGE_MODELS_ROOT, model_name, "requirements.txt" + ) + if os.path.exists(requirements_txt): + commands = [ + f'pip install {"--force-reinstall" if pip_force_install else None} -r "{requirements_txt}" -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.13/index.html' + ] + else: + commands = [] + + super().__init__( + group_name=f"Install Model Requirements for {model_name}", + venv=venv_path, + commands=commands, + ) diff --git a/scripts/util/common.sh b/scripts/util/common.sh new file mode 100644 index 00000000..0c7ac1d2 --- /dev/null +++ b/scripts/util/common.sh @@ -0,0 +1,99 @@ +# Common utilities + +# shellcheck disable=SC2034 # various definitions appear unused in this included source. + +REPO_ROOT=$(git rev-parse --show-toplevel) + +COLOR_GREEN='\033[0;32m' +COLOR_GREY='\033[0;37m' +COLOR_RED='\033[0;31m' +COLOR_RED_BOLD='\033[0;1;31m' +COLOR_RED_REVERSED_VIDEO='\033[0;7;31m' +COLOR_YELLOW='\033[0;33m' +COLOR_YELLOW_BOLD='\033[0;1;33m' +COLOR_OFF='\033[0m' + +FORMAT_BOLD='\033[0;1m' +FORMAT_UNDERLINED='\033[0;4m' +FORMAT_BLINKING='\033[0;5m' +FORMAT_REVERSE_VIDEO='\033[0;7m' + +# +# Emit a message to stderr. +# +function log_err() { + echo -e "${COLOR_RED_REVERSED_VIDEO}$*${COLOR_OFF}" 1>&2 +} + +# +# Emit a message to stderr. +# +function log_warn() { + echo -e "${COLOR_YELLOW_BOLD}$*${COLOR_OFF}" 1>&2 +} + +# +# Emit a message to stderr. +# +function log_info() { + echo -e "${FORMAT_BOLD}$*${COLOR_OFF}" 1>&2 +} + +# +# Emit a message to stderr. +# +function log_debug() { + echo -e "${COLOR_GREY}$*${COLOR_OFF}" 1>&2 +} + +# +# Emit a log message and exit with non-zero return. +# +function die() { + log_err "$*" + exit 1 +} + +# +# Run something as root, using sudo if necessary. +# +function run_as_root() +{ + # Don't use sudo if user is root already (e.g., in docker) + if [ "${EUID}" -eq 0 ]; then + log_debug "We're already root; running ${*} without sudo." + "${@}" + else + log_debug "We're ${EUID}; running ${*} via sudo." + sudo "${@}" + fi +} + +# +# Enable trace logging via set -x +# Args +# 1: [Default: $QAIHM_BUILD_XTRACE] If set to non-empty, enable tracing +# +# shellcheck disable=SC2120 +function set_xtrace() { + local enable="${1:-${QAIHM_BUILD_XTRACE:-}}" + if [ -n "${enable}" ]; then + set -x + fi +} + +# +# Enable bash strict mode and conditionally set -x. +# @see http://redsymbol.net/articles/unofficial-bash-strict-mode/ +# @see set_xtrace +# +function set_strict_mode() { + set -euo pipefail + set_xtrace +} + +function pretty_print_arr() { + arr=("$@") + + echo "[$(echo "${arr[@]}" | tr ' ' ',')]" +} diff --git a/scripts/util/env_create.sh b/scripts/util/env_create.sh new file mode 100755 index 00000000..b315026e --- /dev/null +++ b/scripts/util/env_create.sh @@ -0,0 +1,48 @@ +# shellcheck source=/dev/null # we are statically sourcing a script. +# This can be sourced and hence does not specify an interpreter. + +orig_flags=$- + +set -e + +# Path to the virtual environment, relative to the repository root. +ENV_PATH="qaihm-dev" + +SYNC=1 + +PYTHON="python3.8" + +# command flag options +# Parse command line configure flags ------------------------------------------ +while [ $# -gt 0 ] + do case $1 in + --venv=*) ENV_PATH=${1##--venv=} ;; + --no-sync) SYNC=0 ;; + --python=*) PYTHON=${1##--python=} ;; + *) echo "Bad opt $1." && exit 1;; + esac + shift +done + +if [ ! -d "$ENV_PATH" ]; then + mkdir -p "$(dirname "$ENV_PATH")" + + echo "Creating virtual env $ENV_PATH." + $PYTHON -m venv "$ENV_PATH" + + echo "Activating virtual env." + source "$ENV_PATH/bin/activate" +else + source "$ENV_PATH/bin/activate" + echo "Env created already. Skipping creation." +fi + +if [ $SYNC -eq 1 ]; then + source scripts/util/env_sync.sh --venv="$ENV_PATH" +fi + +# Unset -e so our shell doesn't close the next time something exits with +# non-zero status. +if [[ ! "${orig_flags}" =~ e ]]; then + set +e +fi diff --git a/scripts/util/env_sync.sh b/scripts/util/env_sync.sh new file mode 100644 index 00000000..4c6fb60d --- /dev/null +++ b/scripts/util/env_sync.sh @@ -0,0 +1,22 @@ +# This should be sourced and hence does not specify an interpreter. + +REPO_ROOT=$(git rev-parse --show-toplevel) + +. "${REPO_ROOT}/scripts/util/common.sh" + +set_strict_mode + +# Path to the virtual environment, relative to the repository root. +ENV_PATH="qaihm-dev" + +# command flag options +# Parse command line configure flags ------------------------------------------ +while [ $# -gt 0 ] + do case $1 in + --venv=*) ENV_PATH=${1##--venv=} ;; + *) echo "Bad opt $1." && exit 1;; + esac + shift +done + +python3 "${REPO_ROOT}/scripts/build_and_test.py" --venv="${ENV_PATH}" install_deps diff --git a/scripts/util/github.sh b/scripts/util/github.sh new file mode 100644 index 00000000..619c2baa --- /dev/null +++ b/scripts/util/github.sh @@ -0,0 +1,43 @@ +REPO_ROOT=$(git rev-parse --show-toplevel) + +. "${REPO_ROOT}/scripts/util/common.sh" + +GITHUB_ACTION=${GITHUB_ACTION:-} + +on_ci() { + if [ -n "${GITHUB_ACTION}" ]; then + echo "1" + fi +} + +start_group() { + group_name=$1 + + if [ -n "$GITHUB_ACTION" ]; then + echo "::group::$group_name" + else + echo -e "${COLOR_GREEN}$group_name${COLOR_OFF}" + fi +} + +end_group() { + if [ -n "$GITHUB_ACTION" ]; then + echo "::endgroup::" + fi +} + +set_github_output() { + if [ -n "$GITHUB_ACTION" ]; then + echo "$1=$2" >> "$GITHUB_OUTPUT" + fi +} + +warn() { + message=$1 + + if [ -n "$GITHUB_ACTION" ]; then + echo "::warning::$message" + else + echo -e "${COLOR_RED}$message${COLOR_OFF}" + fi +} diff --git a/scripts/util/make_coverage_config.py b/scripts/util/make_coverage_config.py new file mode 100644 index 00000000..1c789089 --- /dev/null +++ b/scripts/util/make_coverage_config.py @@ -0,0 +1,39 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse +import configparser + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--base", type=str, help="Use this coveragerc as a base and add to it." + ) + parser.add_argument("--omit", type=str, help="Comma-separate omit directories") + parser.add_argument("--data_file", type=str, help="Output coverage data file") + parser.add_argument( + "-o", "--output", type=str, help="Save new coveragerc to this folder." + ) + + args = parser.parse_args() + + orig_coveragerc = args.base + new_coveragerc = args.output + omit = args.omit.split(",") + data_file = args.data_file + + config = configparser.ConfigParser() + config.read(orig_coveragerc) + cur_omit = config.get("run", "omit").split(",") + if data_file is not None: + config.set("run", "data_file", data_file) + if omit is not None: + config.set("run", "omit", ",".join(cur_omit + omit)) + with open(new_coveragerc, "w") as f: + config.write(f) + + +if __name__ == "__main__": + main() diff --git a/scripts/util/pytest_with_coverage.sh b/scripts/util/pytest_with_coverage.sh new file mode 100755 index 00000000..7863f986 --- /dev/null +++ b/scripts/util/pytest_with_coverage.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +REPO_ROOT=$(git rev-parse --show-toplevel) + +# Load helpers +. "${REPO_ROOT}/scripts/util/common.sh" +. "${REPO_ROOT}/scripts/util/github.sh" + +set_strict_mode + + +print_help() { + echo "pytest_with_coverage.sh --name=[...] --omit=[...] PYTEST_ARGS" + echo "" + echo "--name=[...] Test report name." + echo "--omit=[...] Comma-seprated directories." +} + +NAME="unnamed" +OMIT="" + +for i in "$@"; do + case $i in + --name=*) + NAME="${i#*=}" + shift + ;; + --omit=*) + OMIT="${i#*=}" + shift + ;; + -h|--help) + print_help + shift + exit 0 + ;; + *) + ;; + esac +done + + +COV_CONFIG="$(mktemp).coveragerc" +COVERAGE_DIR="${REPO_ROOT}/build/test-coverage" +RESULTS_DIR="${REPO_ROOT}/build/test-results" + +mkdir -p "$COVERAGE_DIR" "$RESULTS_DIR" + +DATA_FILE="${COVERAGE_DIR}/.coverage.${NAME}" +JUNIT_REPORT="${RESULTS_DIR}/${NAME}.xml" + +python "${REPO_ROOT}/scripts/util/make_coverage_config.py" \ + --base "${REPO_ROOT}/.coveragerc" \ + --data_file "${DATA_FILE}" \ + --omit "${OMIT}" \ + --output "${COV_CONFIG}" + +# Coverage can be turned off by passing `--no-cov` as part of $@ +pytest \ + -rxXs \ + -p no:warnings \ + --junitxml="${JUNIT_REPORT}" \ + --durations=20 \ + --durations-min=0.5 \ + --cov \ + --cov-report= \ + --cov-config="${COV_CONFIG}" \ + "$@" diff --git a/scripts/util/run_mypy.sh b/scripts/util/run_mypy.sh new file mode 100755 index 00000000..5c4d8f98 --- /dev/null +++ b/scripts/util/run_mypy.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# shellcheck source=/dev/null + +REPO_ROOT=$(git rev-parse --show-toplevel) + +. "${REPO_ROOT}/scripts/util/common.sh" + +set_strict_mode + + +cd "$(dirname "$0")/../.." + +venv="${VENV_PATH:-qaihm-dev}" +echo "Activating venv in ${venv}" +source "${venv}/bin/activate" + +paths=(qai_hub_models) +for path in "${paths[@]}"; do + pathToCheck="${path}" + echo "Running mypy on ${pathToCheck}" + mypy --warn-unused-configs --config-file="${REPO_ROOT}/mypy.ini" "${pathToCheck}" +done diff --git a/scripts/util/write_changed_files.py b/scripts/util/write_changed_files.py new file mode 100755 index 00000000..f4d2fafb --- /dev/null +++ b/scripts/util/write_changed_files.py @@ -0,0 +1,49 @@ +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- +import argparse +import os + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--files", + type=str, + help="Files that were changed in the commits of the PR.", + required=True, + ) + parser.add_argument( + "--path", type=str, help="Path for the file to be created.", required=True + ) + + args = parser.parse_args() + list2d_filenames = args.files + + # We get back a two-dimensional array, with a list of + # changed files for each commit that has been traced back. + # For this usecase, we need changed files in the commit so + # flattening and deduplicating it. + list2d_filenames = [ + "".join(unsanitized_filenames) + for unsanitized_filenames in list2d_filenames.split(",") + if unsanitized_filenames != "" + ] + flattened_filenames = [ + sanitized_filenames.replace("[", "").replace("]", "") + for sanitized_filenames in list2d_filenames + ] + flattened_filenames = list(set(flattened_filenames)) + filenames = [] + for filename in flattened_filenames: + _, ext = os.path.splitext(filename) + # Avoid running for yaml and md files. + if ext not in {".yaml", ".md"}: + filenames.append(filename) + + filenames = "\n".join(filenames) + + # Make the directory if not present. + os.makedirs(os.path.dirname(args.path), exist_ok=True) + with open(args.path, mode="wt", encoding="utf-8") as file: + file.write(filenames) diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..989a9ca0 --- /dev/null +++ b/setup.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# --------------------------------------------------------------------- +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# --------------------------------------------------------------------- + +import glob +import pathlib +from typing import Dict, List + +from setuptools import find_packages, setup + +r_file = "requirements.txt" + +qaihm_path = pathlib.Path(__file__).parent / "qai_hub_models" +qaihm_dir = qaihm_path / "models" +requirements_path = qaihm_path / r_file + +version_path = qaihm_path / "_version.py" +version_locals: Dict[str, str] = {} +exec(open(version_path).read(), version_locals) + + +# Non-Python Files to Add to the Wheel +data_file_extensions = ["yaml", "txt", "json", "diff"] + + +def get_data_files() -> List[str]: + data_files = [] + for ext in data_file_extensions: + data_files.extend( + glob.glob(f"{str(qaihm_path.absolute())}/**/*.{ext}", recursive=True) + ) + for i in range(0, len(data_files)): + data_files[i] = data_files[i].split("/qai_hub_models/")[1] + return data_files + + +# Extras dictionary definition. +extras_require = { + "dev": [ + line.strip() for line in open(qaihm_path / "requirements-dev.txt").readlines() + ] +} + +# Create extra for every model that requires one. +for model_dir in qaihm_dir.iterdir(): + if not model_dir.is_file() and (model_dir / r_file).exists(): + extra_with_dash = model_dir.name.replace("_", "-") + reqs = [line.strip() for line in open(model_dir / r_file).readlines()] + extras_require[model_dir.name] = reqs + extras_require[extra_with_dash] = reqs + + +description = "Models optimized for export to run on device." +long_description = (pathlib.Path(__file__).parent / "README.md").read_text() +setup( + name="qai_hub_models", + version=version_locals["__version__"], + description=description, + long_description=long_description, + long_description_content_type="text/markdown", + author="Qualcomm® Technologies, Inc.", + url="https://github.com/quic/ai-hub-models", + packages=find_packages(), + python_requires=">=3.8, <3.11", + package_data={"qai_hub_models": get_data_files()}, + include_package_data=True, + install_requires=[line.strip() for line in open(requirements_path).readlines()], + extras_require=extras_require, + license="MIT", +)