diff --git a/.github/workflows/quic-organization-repolinter.yml b/.github/workflows/quic-organization-repolinter.yml
deleted file mode 100644
index a9dd91c4..00000000
--- a/.github/workflows/quic-organization-repolinter.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-name: QuIC Organization Repolinter
-
-on:
- push:
- branches: [ "main" ]
- pull_request:
- branches: [ "main" ]
-
-jobs:
- repolinter:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout Repo
- uses: actions/checkout@v2
- - name: Verify repolinter config file is present
- id: check_files
- uses: andstor/file-existence-action@v1
- with:
- files: "repolint.json"
- - name: Run Repolinter with local repolint.json
- if: steps.check_files.outputs.files_exists == 'true'
- uses: todogroup/repolinter-action@v1
- with:
- config_file: "repolint.json"
- - name: Run Repolinter with default ruleset
- if: steps.check_files.outputs.files_exists == 'false'
- uses: todogroup/repolinter-action@v1
- with:
- config_url: "https://raw.githubusercontent.com/quic/.github/main/repolint.json"
-
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..318eb6a6
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,11 @@
+Copyright 2024 Qualcomm Innovation Center, Inc.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/LICENSE.pdf b/LICENSE.pdf
deleted file mode 100644
index 705dfc9c..00000000
Binary files a/LICENSE.pdf and /dev/null differ
diff --git a/README.md b/README.md
index 4b1699a7..04135cb6 100644
--- a/README.md
+++ b/README.md
@@ -1,30 +1,373 @@
-# Qualcomm AI Stack Models
-
-## Introduction
-
-
-
-
-Qualcomm AI Stack Models contains following parts.
-
-1. models-for-accuracy - Models verified for accuracy on target. Developers can directly use these models.
-2. models-for-solutions - Models verified for functionality - will be merged with models-accuracy in future.
-3. AI-Solutions - End-to-End AI solutions using (2) above, across Qualcomm Platforms.
-
-## Workflow for AI Solutions
-
-1. Use notebooks in 'models-for-solutions' to prepare models in compatible format
-2. Use source code in 'ai-solutions' and models from step-1 to create end-to-end solutions
-
-## Report Issues
-
-Please report issues by raising an _issue_ in the GitHub respository.
-
-## Team
-
-Qualcomm AI Stack Model is a project maintained by Qualcomm Innovation Center, Inc.
-
-## License
-
-Please see the [LICENSE](LICENSE.pdf) for more details.
+[![Qualcomm® AI Hub Models](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/quic-logo.jpg)](https://aihub.qualcomm.com)
+
+# Qualcomm® AI Hub Models
+
+The [Qualcomm® AI Hub Models](https://aihub.qualcomm.com/) are a collection of
+state-of-the-art machine learning models optimized for performance (latency,
+memory etc.) and ready to deploy on Qualcomm® devices.
+
+* Explore models optimized for on-device deployment of vision, speech, text, and genenrative AI.
+* View open-source recipes to quantize, optimize, and deploy these models on-device.
+* Browse through [performance metrics](https://aihub.qualcomm.com/models) captured for these models on several devices.
+* Access the models through [Hugging Face](https://huggingface.co/qualcomm).
+* [Sign up](https://aihub.qualcomm.com/) to run these models on hosted Qualcomm® devices.
+
+Supported runtimes
+* [TensorFlow Lite](https://www.tensorflow.org/lite)
+* [Qualcomm AI Engine Direct](https://www.qualcomm.com/developer/artificial-intelligence#overview)
+
+Supported operating systems:
+* Android 11+
+
+Supported compute units
+* CPU, GPU, NPU (includes [Hexagon DSP](https://developer.qualcomm.com/software/hexagon-dsp-sdk/dsp-processor), [HTP](https://developer.qualcomm.com/hardware/qualcomm-innovators-development-kit/ai-resources-overview/ai-hardware-cores-accelerators))
+
+Supported precision
+* Floating Points: FP16
+* Integer: INT8 (8-bit weight and activation on select models), INT4 (4-bit weight, 16-bit activation on select models)
+
+Supported chipsets
+* [Snapdragon 845](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-845-mobile-platform), [Snapdragon 855/855+](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-855-mobile-platform), [Snapdragon 865/865+](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-865-plus-5g-mobile-platform), [Snapdragon 888/888+](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-888-5g-mobile-platform)
+* [Snapdragon 8 Gen 1](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-8-gen-1-mobile-platform), [Snapdragon 8 Gen 2](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-8-gen-2-mobile-platform), [Snapdragon 8 Gen 3](https://www.qualcomm.com/products/mobile/snapdragon/smartphones/snapdragon-8-series-mobile-platforms/snapdragon-8-gen-3-mobile-platform)
+
+Select supported devices
+* Samsung Galaxy S21 Series, Galaxy S22 Series, Galaxy S23 Series, Galaxy S24 Series
+* Xiaomi 12, 13
+* Google Pixel 3, 4, 5
+
+and many more.
+
+## Installation
+
+We currently support **Python >=3.8 and <= 3.10.** We recommend using a Python
+virtual environment
+([miniconda](https://docs.anaconda.com/free/miniconda/miniconda-install/) or
+[virtualenv](https://virtualenv.pypa.io/en/latest/)).
+
+You can setup a virtualenv using:
+```
+python -m venv qai_hub_models_env && source qai_hub_models_env/bin/activate
+```
+
+Once the environment is setup, you can install the base package using:
+
+```shell
+pip install qai_hub_models
+```
+
+Some models (e.g. [YOLOv7](https://github.com/WongKinYiu/yolov7)) require
+additional dependencies. You can install those dependencies automatically
+using:
+
+```shell
+pip install "qai_hub_models[yolov7]"
+```
+
+## Getting Started
+
+Each model comes with the following set of CLI demos:
+* Locally runnable PyTorch based CLI demo to validate the model off device.
+* On-device CLI demo that produces a model ready for on-device deployment and runs the model on a hosted Qualcomm® device (needs [sign up](https://aihub.qualcomm.com/)).
+
+All the models produced by these demos are freely available on [Hugging
+Face](https://huggingface.co/qualcomm) or through our
+[website](https://aihub.qualcomm.com/models). See the individual model readme
+files (e.g. [YOLOv7](qai_hub_models/models/yolov7/README.md)) for more
+details.
+
+### Local CLI Demo with PyTorch
+
+[All models](#model-directory) contain CLI demos that run the model in
+**PyTorch** locally with sample input. Demos are optimized for code clarity
+rather than latency, and run exclusively in PyTorch. Optimal model latency can
+be achieved with model export via [Qualcomm® AI
+Hub](https://www.aihub.qualcomm.com).
+
+```shell
+python -m qai_hub_models.models.yolov7.demo
+```
+
+For additional details on how to use the demo CLI, use the `--help` option
+```shell
+python -m qai_hub_models.models.yolov7.demo --help
+```
+
+See the [model directory](#model-directory) below to explore all other models.
+
+---
+
+Note that most ML use cases require some pre and post-processing that are not
+part of the model itself. A python reference implementation of this is provided
+for each model in `app.py`. Apps load & pre-process model input, run model
+inference, and post-process model output before returning it to you.
+
+Here is an example of how the PyTorch CLI works for [YOLOv7](https://github.com/WongKinYiu/yolov7):
+
+```python
+from PIL import Image
+from qai_hub_models.models.yolov7 import Model as YOLOv7Model
+from qai_hub_models.models.yolov7 import App as YOLOv7App
+from qai_hub_models.utils.asset_loaders import load_image
+from qai_hub_models.models.yolov7.demo import IMAGE_ADDRESS
+
+# Load pre-trained model
+torch_model = YOLOv7Model.from_pretrained()
+
+# Load a simple PyTorch based application
+app = YOLOv7App(torch_model)
+image = load_image(IMAGE_ADDRESS, "yolov7")
+
+# Perform prediction on a sample image
+pred_image = app.predict(image)[0]
+Image.fromarray(pred_image).show()
+
+```
+
+### CLI demo to run on hosted Qualcomm® devices
+
+[Some models](#model-directory) contain CLI demos that run the model on a hosted
+Qualcomm® device using [Qualcomm® AI Hub](https://aihub.qualcomm.com).
+
+To run the model on a hosted device, [sign up for access to Qualcomm® AI
+Hub](https://aihub.qualcomm.com). Sign-in to Qualcomm® AI Hub with your
+Qualcomm® ID. Once signed in navigate to Account -> Settings -> API Token.
+
+With this API token, you can configure your client to run models on the cloud
+hosted devices.
+
+```shell
+qai-hub configure --api_token API_TOKEN
+```
+Navigate to [docs](https://app.aihub.qualcomm.com/docs/) for more information.
+
+The on-device CLI demo performs the following:
+* Exports the model for on-device execution.
+* Profiles the model on-device on a cloud hosted Qualcomm® device.
+* Runs the model on-device on a cloud hosted Qualcomm® device and compares accuracy between a local CPU based PyTorch run and the on-device run.
+* Downloads models (and other required assets) that can be deployed on-device in an Android application.
+
+```shell
+python -m qai_hub_models.models.yolov7.export
+```
+
+Many models may have initialization parameters that allow loading custom
+weights and checkpoints. See `--help` for more details
+
+```shell
+python -m qai_hub_models.models.yolov7.export --help
+```
+
+#### How does this export script work?
+
+As described above, the script above compiles, optimizes, and runs the model on
+a cloud hosted Qualcomm® device. The demo uses [Qualcomm® AI Hub's Python
+APIs](https://app.aihub.qualcomm.com/docs/).
+
+
+
+Here is a simplified example of code that can be used to run the entire model
+on a cloud hosted device:
+
+```python
+from typing import Tuple
+import torch
+import qai_hub as hub
+from qai_hub_models.models.yolov7 import Model as YOLOv7Model
+
+# Load YOLOv7 in PyTorch
+torch_model = YOLOv7Model.from_pretrained()
+torch_model.eval()
+
+# Trace the PyTorch model using one data point of provided sample inputs to
+# torch tensor to trace the model.
+example_input = [torch.tensor(data[0]) for name, data in torch_model.sample_inputs().items()]
+pt_model = torch.jit.trace(torch_model, example_input)
+
+# Select a device
+device = hub.Device("Samsung Galaxy S23")
+
+# Compile model on a specific device
+compile_job = hub.submit_compile_job(
+ model=pt_model,
+ device=device,
+ input_specs=torch_model.get_input_spec(),
+)
+
+# Get target model to run on a cloud hosted device
+target_model = compile_job.get_target_model()
+
+# Profile the previously compiled model
+profile_job = hub.submit_profile_job(
+ model=target_model,
+ device=device,
+)
+
+# Perform on-device inference on the cloud hosted device
+input_data = torch_model.sample_inputs()
+inference_job = hub.submit_inference_job(
+ model=target_model,
+ device=device,
+ inputs=input_data,
+)
+
+# Returns the output as dict{name: numpy}
+on_device_output = inference_job.download_output_data()
+```
+
+---
+
+### Working with source code
+
+You can clone the repository using:
+
+```shell
+git clone https://github.com/quic/ai-hub-models/blob/main
+cd main
+pip install -e .
+```
+
+Install additional dependencies to prepare a model before using the following:
+```shell
+cd main
+pip install -e ".[yolov7]"
+```
+
+All models have accuracy and end-to-end tests when applicable. These tests as
+designed to be run locally and verify that the PyTorch code produces correct
+results. To run the tests for a model:
+```shell
+python -m pytest --pyargs qai_hub_models.models.yolov7.test
+```
+---
+
+For any issues, please contact us at ai-hub-support@qti.qualcomm.com.
+
+---
+
+## Model Directory
+
+### Computer Vision
+
+| Model | README | Torch App | Device Export | CLI Demo
+| -- | -- | -- | -- | --
+| | | | |
+| **Image Classification**
+| [ResNet50](https://aihub.qualcomm.com/models/resnet50) | [qai_hub_models.models.resnet50](qai_hub_models/models/resnet50/README.md) | ✔️ | ✔️ | ✔️
+| [SqueezeNet-1_1Quantized](https://aihub.qualcomm.com/models/squeezenet1_1_quantized) | [qai_hub_models.models.squeezenet1_1_quantized](qai_hub_models/models/squeezenet1_1_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [MNASNet05](https://aihub.qualcomm.com/models/mnasnet05) | [qai_hub_models.models.mnasnet05](qai_hub_models/models/mnasnet05/README.md) | ✔️ | ✔️ | ✔️
+| [Swin-Small](https://aihub.qualcomm.com/models/swin_small) | [qai_hub_models.models.swin_small](qai_hub_models/models/swin_small/README.md) | ✔️ | ✔️ | ✔️
+| [Swin-Base](https://aihub.qualcomm.com/models/swin_base) | [qai_hub_models.models.swin_base](qai_hub_models/models/swin_base/README.md) | ✔️ | ✔️ | ✔️
+| [MobileNet-v3-Small](https://aihub.qualcomm.com/models/mobilenet_v3_small) | [qai_hub_models.models.mobilenet_v3_small](qai_hub_models/models/mobilenet_v3_small/README.md) | ✔️ | ✔️ | ✔️
+| [RegNet](https://aihub.qualcomm.com/models/regnet) | [qai_hub_models.models.regnet](qai_hub_models/models/regnet/README.md) | ✔️ | ✔️ | ✔️
+| [GoogLeNetQuantized](https://aihub.qualcomm.com/models/googlenet_quantized) | [qai_hub_models.models.googlenet_quantized](qai_hub_models/models/googlenet_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [WideResNet50-Quantized](https://aihub.qualcomm.com/models/wideresnet50_quantized) | [qai_hub_models.models.wideresnet50_quantized](qai_hub_models/models/wideresnet50_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [MobileNet-v3-Large](https://aihub.qualcomm.com/models/mobilenet_v3_large) | [qai_hub_models.models.mobilenet_v3_large](qai_hub_models/models/mobilenet_v3_large/README.md) | ✔️ | ✔️ | ✔️
+| [MobileNet-v2-Quantized](https://aihub.qualcomm.com/models/mobilenet_v2_quantized) | [qai_hub_models.models.mobilenet_v2_quantized](qai_hub_models/models/mobilenet_v2_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [ResNeXt101Quantized](https://aihub.qualcomm.com/models/resnext101_quantized) | [qai_hub_models.models.resnext101_quantized](qai_hub_models/models/resnext101_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [DenseNet-121](https://aihub.qualcomm.com/models/densenet121) | [qai_hub_models.models.densenet121](qai_hub_models/models/densenet121/README.md) | ✔️ | ✔️ | ✔️
+| [ResNet101Quantized](https://aihub.qualcomm.com/models/resnet101_quantized) | [qai_hub_models.models.resnet101_quantized](qai_hub_models/models/resnet101_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [ResNet18](https://aihub.qualcomm.com/models/resnet18) | [qai_hub_models.models.resnet18](qai_hub_models/models/resnet18/README.md) | ✔️ | ✔️ | ✔️
+| [ResNet101](https://aihub.qualcomm.com/models/resnet101) | [qai_hub_models.models.resnet101](qai_hub_models/models/resnet101/README.md) | ✔️ | ✔️ | ✔️
+| [Swin-Tiny](https://aihub.qualcomm.com/models/swin_tiny) | [qai_hub_models.models.swin_tiny](qai_hub_models/models/swin_tiny/README.md) | ✔️ | ✔️ | ✔️
+| [WideResNet50](https://aihub.qualcomm.com/models/wideresnet50) | [qai_hub_models.models.wideresnet50](qai_hub_models/models/wideresnet50/README.md) | ✔️ | ✔️ | ✔️
+| [ResNet18Quantized](https://aihub.qualcomm.com/models/resnet18_quantized) | [qai_hub_models.models.resnet18_quantized](qai_hub_models/models/resnet18_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [MobileNet-v2](https://aihub.qualcomm.com/models/mobilenet_v2) | [qai_hub_models.models.mobilenet_v2](qai_hub_models/models/mobilenet_v2/README.md) | ✔️ | ✔️ | ✔️
+| [VIT](https://aihub.qualcomm.com/models/vit) | [qai_hub_models.models.vit](qai_hub_models/models/vit/README.md) | ✔️ | ✔️ | ✔️
+| [ResNeXt50](https://aihub.qualcomm.com/models/resnext50) | [qai_hub_models.models.resnext50](qai_hub_models/models/resnext50/README.md) | ✔️ | ✔️ | ✔️
+| [EfficientNet-B0](https://aihub.qualcomm.com/models/efficientnet_b0) | [qai_hub_models.models.efficientnet_b0](qai_hub_models/models/efficientnet_b0/README.md) | ✔️ | ✔️ | ✔️
+| [Inception-v3Quantized](https://aihub.qualcomm.com/models/inception_v3_quantized) | [qai_hub_models.models.inception_v3_quantized](qai_hub_models/models/inception_v3_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [ConvNext-Tiny](https://aihub.qualcomm.com/models/convnext_tiny) | [qai_hub_models.models.convnext_tiny](qai_hub_models/models/convnext_tiny/README.md) | ✔️ | ✔️ | ✔️
+| [ResNeXt101](https://aihub.qualcomm.com/models/resnext101) | [qai_hub_models.models.resnext101](qai_hub_models/models/resnext101/README.md) | ✔️ | ✔️ | ✔️
+| [Shufflenet-v2](https://aihub.qualcomm.com/models/shufflenet_v2) | [qai_hub_models.models.shufflenet_v2](qai_hub_models/models/shufflenet_v2/README.md) | ✔️ | ✔️ | ✔️
+| [Shufflenet-v2Quantized](https://aihub.qualcomm.com/models/shufflenet_v2_quantized) | [qai_hub_models.models.shufflenet_v2_quantized](qai_hub_models/models/shufflenet_v2_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [SqueezeNet-1_1](https://aihub.qualcomm.com/models/squeezenet1_1) | [qai_hub_models.models.squeezenet1_1](qai_hub_models/models/squeezenet1_1/README.md) | ✔️ | ✔️ | ✔️
+| [GoogLeNet](https://aihub.qualcomm.com/models/googlenet) | [qai_hub_models.models.googlenet](qai_hub_models/models/googlenet/README.md) | ✔️ | ✔️ | ✔️
+| [Inception-v3](https://aihub.qualcomm.com/models/inception_v3) | [qai_hub_models.models.inception_v3](qai_hub_models/models/inception_v3/README.md) | ✔️ | ✔️ | ✔️
+| | | | |
+| **Image Editing**
+| [LaMa-Dilated](https://aihub.qualcomm.com/models/lama_dilated) | [qai_hub_models.models.lama_dilated](qai_hub_models/models/lama_dilated/README.md) | ✔️ | ✔️ | ✔️
+| | | | |
+| **Image Generation**
+| [StyleGAN2](https://aihub.qualcomm.com/models/stylegan2) | [qai_hub_models.models.stylegan2](qai_hub_models/models/stylegan2/README.md) | ✔️ | ✔️ | ✔️
+| | | | |
+| **Super Resolution**
+| [QuickSRNetLarge](https://aihub.qualcomm.com/models/quicksrnetlarge) | [qai_hub_models.models.quicksrnetlarge](qai_hub_models/models/quicksrnetlarge/README.md) | ✔️ | ✔️ | ✔️
+| [XLSR-Quantized](https://aihub.qualcomm.com/models/xlsr_quantized) | [qai_hub_models.models.xlsr_quantized](qai_hub_models/models/xlsr_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [QuickSRNetMedium](https://aihub.qualcomm.com/models/quicksrnetmedium) | [qai_hub_models.models.quicksrnetmedium](qai_hub_models/models/quicksrnetmedium/README.md) | ✔️ | ✔️ | ✔️
+| [SESR-M5](https://aihub.qualcomm.com/models/sesr_m5) | [qai_hub_models.models.sesr_m5](qai_hub_models/models/sesr_m5/README.md) | ✔️ | ✔️ | ✔️
+| [XLSR](https://aihub.qualcomm.com/models/xlsr) | [qai_hub_models.models.xlsr](qai_hub_models/models/xlsr/README.md) | ✔️ | ✔️ | ✔️
+| [Real-ESRGAN-General-x4v3](https://aihub.qualcomm.com/models/real_esrgan_general_x4v3) | [qai_hub_models.models.real_esrgan_general_x4v3](qai_hub_models/models/real_esrgan_general_x4v3/README.md) | ✔️ | ✔️ | ✔️
+| [QuickSRNetSmall](https://aihub.qualcomm.com/models/quicksrnetsmall) | [qai_hub_models.models.quicksrnetsmall](qai_hub_models/models/quicksrnetsmall/README.md) | ✔️ | ✔️ | ✔️
+| [SESR-M5-Quantized](https://aihub.qualcomm.com/models/sesr_m5_quantized) | [qai_hub_models.models.sesr_m5_quantized](qai_hub_models/models/sesr_m5_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [Real-ESRGAN-x4plus](https://aihub.qualcomm.com/models/real_esrgan_x4plus) | [qai_hub_models.models.real_esrgan_x4plus](qai_hub_models/models/real_esrgan_x4plus/README.md) | ✔️ | ✔️ | ✔️
+| [ESRGAN](https://aihub.qualcomm.com/models/esrgan) | [qai_hub_models.models.esrgan](qai_hub_models/models/esrgan/README.md) | ✔️ | ✔️ | ✔️
+| | | | |
+| **Semantic Segmentation**
+| [FFNet-40S-Quantized](https://aihub.qualcomm.com/models/ffnet_40s_quantized) | [qai_hub_models.models.ffnet_40s_quantized](qai_hub_models/models/ffnet_40s_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [FFNet-54S](https://aihub.qualcomm.com/models/ffnet_54s) | [qai_hub_models.models.ffnet_54s](qai_hub_models/models/ffnet_54s/README.md) | ✔️ | ✔️ | ✔️
+| [DDRNet23-Slim](https://aihub.qualcomm.com/models/ddrnet23_slim) | [qai_hub_models.models.ddrnet23_slim](qai_hub_models/models/ddrnet23_slim/README.md) | ✔️ | ✔️ | ✔️
+| [Yolo-v8-Segmentation](https://aihub.qualcomm.com/models/yolov8_seg) | [qai_hub_models.models.yolov8_seg](qai_hub_models/models/yolov8_seg/README.md) | ✔️ | ✔️ | ✔️
+| [FFNet-54S-Quantized](https://aihub.qualcomm.com/models/ffnet_54s_quantized) | [qai_hub_models.models.ffnet_54s_quantized](qai_hub_models/models/ffnet_54s_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [SINet](https://aihub.qualcomm.com/models/sinet) | [qai_hub_models.models.sinet](qai_hub_models/models/sinet/README.md) | ✔️ | ✔️ | ✔️
+| [FFNet-40S](https://aihub.qualcomm.com/models/ffnet_40s) | [qai_hub_models.models.ffnet_40s](qai_hub_models/models/ffnet_40s/README.md) | ✔️ | ✔️ | ✔️
+| [FFNet-78S](https://aihub.qualcomm.com/models/ffnet_78s) | [qai_hub_models.models.ffnet_78s](qai_hub_models/models/ffnet_78s/README.md) | ✔️ | ✔️ | ✔️
+| [FFNet-78S-LowRes](https://aihub.qualcomm.com/models/ffnet_78s_lowres) | [qai_hub_models.models.ffnet_78s_lowres](qai_hub_models/models/ffnet_78s_lowres/README.md) | ✔️ | ✔️ | ✔️
+| [DeepLabV3-ResNet50](https://aihub.qualcomm.com/models/deeplabv3_resnet50) | [qai_hub_models.models.deeplabv3_resnet50](qai_hub_models/models/deeplabv3_resnet50/README.md) | ✔️ | ✔️ | ✔️
+| [FFNet-78S-Quantized](https://aihub.qualcomm.com/models/ffnet_78s_quantized) | [qai_hub_models.models.ffnet_78s_quantized](qai_hub_models/models/ffnet_78s_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [Unet-Segmentation](https://aihub.qualcomm.com/models/unet_segmentation) | [qai_hub_models.models.unet_segmentation](qai_hub_models/models/unet_segmentation/README.md) | ✔️ | ✔️ | ✔️
+| [Segment-Anything-Model](https://aihub.qualcomm.com/models/sam) | [qai_hub_models.models.sam](qai_hub_models/models/sam/README.md) | ✔️ | ✔️ | ✔️
+| [FFNet-122NS-LowRes](https://aihub.qualcomm.com/models/ffnet_122ns_lowres) | [qai_hub_models.models.ffnet_122ns_lowres](qai_hub_models/models/ffnet_122ns_lowres/README.md) | ✔️ | ✔️ | ✔️
+| [FastSam-S](https://aihub.qualcomm.com/models/fastsam_s) | [qai_hub_models.models.fastsam_s](qai_hub_models/models/fastsam_s/README.md) | ✔️ | ✔️ | ✔️
+| [FCN_ResNet50](https://aihub.qualcomm.com/models/fcn_resnet50) | [qai_hub_models.models.fcn_resnet50](qai_hub_models/models/fcn_resnet50/README.md) | ✔️ | ✔️ | ✔️
+| [MediaPipe-Selfie-Segmentation](https://aihub.qualcomm.com/models/mediapipe_selfie) | [qai_hub_models.models.mediapipe_selfie](qai_hub_models/models/mediapipe_selfie/README.md) | ✔️ | ✔️ | ✔️
+| [FastSam-X](https://aihub.qualcomm.com/models/fastsam_x) | [qai_hub_models.models.fastsam_x](qai_hub_models/models/fastsam_x/README.md) | ✔️ | ✔️ | ✔️
+| | | | |
+| **Object Detection**
+| [MediaPipe-Hand-Detection](https://aihub.qualcomm.com/models/mediapipe_hand) | [qai_hub_models.models.mediapipe_hand](qai_hub_models/models/mediapipe_hand/README.md) | ✔️ | ✔️ | ✔️
+| [Yolo-v8-Detection](https://aihub.qualcomm.com/models/yolov8_det) | [qai_hub_models.models.yolov8_det](qai_hub_models/models/yolov8_det/README.md) | ✔️ | ✔️ | ✔️
+| [DETR-ResNet50-DC5](https://aihub.qualcomm.com/models/detr_resnet50_dc5) | [qai_hub_models.models.detr_resnet50_dc5](qai_hub_models/models/detr_resnet50_dc5/README.md) | ✔️ | ✔️ | ✔️
+| [DETR-ResNet101-DC5](https://aihub.qualcomm.com/models/detr_resnet101_dc5) | [qai_hub_models.models.detr_resnet101_dc5](qai_hub_models/models/detr_resnet101_dc5/README.md) | ✔️ | ✔️ | ✔️
+| [DETR-ResNet50](https://aihub.qualcomm.com/models/detr_resnet50) | [qai_hub_models.models.detr_resnet50](qai_hub_models/models/detr_resnet50/README.md) | ✔️ | ✔️ | ✔️
+| [Yolo-v7](https://aihub.qualcomm.com/models/yolov7) | [qai_hub_models.models.yolov7](qai_hub_models/models/yolov7/README.md) | ✔️ | ✔️ | ✔️
+| [Yolo-v6](https://aihub.qualcomm.com/models/yolov6) | [qai_hub_models.models.yolov6](qai_hub_models/models/yolov6/README.md) | ✔️ | ✔️ | ✔️
+| [MediaPipe-Face-Detection](https://aihub.qualcomm.com/models/mediapipe_face) | [qai_hub_models.models.mediapipe_face](qai_hub_models/models/mediapipe_face/README.md) | ✔️ | ✔️ | ✔️
+| [DETR-ResNet101](https://aihub.qualcomm.com/models/detr_resnet101) | [qai_hub_models.models.detr_resnet101](qai_hub_models/models/detr_resnet101/README.md) | ✔️ | ✔️ | ✔️
+| | | | |
+| **Pose Estimation**
+| [OpenPose](https://aihub.qualcomm.com/models/openpose) | [qai_hub_models.models.openpose](qai_hub_models/models/openpose/README.md) | ✔️ | ✔️ | ✔️
+| [MediaPipe-Pose-Estimation](https://aihub.qualcomm.com/models/mediapipe_pose) | [qai_hub_models.models.mediapipe_pose](qai_hub_models/models/mediapipe_pose/README.md) | ✔️ | ✔️ | ✔️
+| [HRNetPoseQuantized](https://aihub.qualcomm.com/models/hrnet_pose_quantized) | [qai_hub_models.models.hrnet_pose_quantized](qai_hub_models/models/hrnet_pose_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [LiteHRNet](https://aihub.qualcomm.com/models/litehrnet) | [qai_hub_models.models.litehrnet](qai_hub_models/models/litehrnet/README.md) | ✔️ | ✔️ | ✔️
+| [HRNetPose](https://aihub.qualcomm.com/models/hrnet_pose) | [qai_hub_models.models.hrnet_pose](qai_hub_models/models/hrnet_pose/README.md) | ✔️ | ✔️ | ✔️
+
+### Audio
+
+| Model | README | Torch App | Device Export | CLI Demo
+| -- | -- | -- | -- | --
+| | | | |
+| **Speech Recognition**
+| [HuggingFace-WavLM-Base-Plus](https://aihub.qualcomm.com/models/huggingface_wavlm_base_plus) | [qai_hub_models.models.huggingface_wavlm_base_plus](qai_hub_models/models/huggingface_wavlm_base_plus/README.md) | ✔️ | ✔️ | ✔️
+| [Whisper-Base](https://aihub.qualcomm.com/models/whisper_asr) | [qai_hub_models.models.whisper_asr](qai_hub_models/models/whisper_asr/README.md) | ✔️ | ✔️ | ✔️
+| | | | |
+| **Audio Enhancement**
+| [Facebook-Denoiser](https://aihub.qualcomm.com/models/facebook_denoiser) | [qai_hub_models.models.facebook_denoiser](qai_hub_models/models/facebook_denoiser/README.md) | ✔️ | ✔️ | ✔️
+
+### Multimodal
+
+| Model | README | Torch App | Device Export | CLI Demo
+| -- | -- | -- | -- | --
+| | | | |
+| [OpenAI-Clip](https://aihub.qualcomm.com/models/openai_clip) | [qai_hub_models.models.openai_clip](qai_hub_models/models/openai_clip/README.md) | ✔️ | ✔️ | ✔️
+| [TrOCR](https://aihub.qualcomm.com/models/trocr) | [qai_hub_models.models.trocr](qai_hub_models/models/trocr/README.md) | ✔️ | ✔️ | ✔️
+
+### Generative Ai
+
+| Model | README | Torch App | Device Export | CLI Demo
+| -- | -- | -- | -- | --
+| | | | |
+| **Image Generation**
+| [ControlNet](https://aihub.qualcomm.com/models/controlnet_quantized) | [qai_hub_models.models.controlnet_quantized](qai_hub_models/models/controlnet_quantized/README.md) | ✔️ | ✔️ | ✔️
+| [Stable-Diffusion](https://aihub.qualcomm.com/models/stable_diffusion_quantized) | [qai_hub_models.models.stable_diffusion_quantized](qai_hub_models/models/stable_diffusion_quantized/README.md) | ✔️ | ✔️ | ✔️
+| | | | |
+| **Text Generation**
+| [Baichuan-7B](https://aihub.qualcomm.com/models/baichuan_7b_quantized) | [qai_hub_models.models.baichuan_7b_quantized](qai_hub_models/models/baichuan_7b_quantized/README.md) | ✔️ | ✔️ | ✔️
diff --git a/ai-solutions/QCS8550-embedded-linux/README.md b/ai-solutions/QCS8550-embedded-linux/README.md
deleted file mode 100644
index 1a304a5d..00000000
--- a/ai-solutions/QCS8550-embedded-linux/README.md
+++ /dev/null
@@ -1,142 +0,0 @@
-## Table of Contents
-
-- [Table of Contents](#table-of-contents)
-- [LE Build setup](#le-build-setup)
-- [Generating ai-solutions binary](#generating-ai-solutions-binary)
-- [Running ai-solutions application](#running-ai-solutions-application)
-
-## LE Build setup
-
-1. Follow "00067.1 Release Note for QCS8550.LE.1.0" to Setup "qti-distro-rb-debug" LE.1.0 build server for QCS8550
-2. Make sure "bitbake qti-robotics-image" is successful
-3. Verify the "qti-distro-rb-debug" build by flashing on target using "fastboot". Commands to flash:
-
- ```
- cd build-qti-distro-rb-debug/tmp-glibc/deploy/images/kalama/qti-robotics-image/
- adb root
- adb reboot bootloader
-
- fastboot flash abl_a abl.elf
- fastboot flash abl_b abl.elf
- fastboot flash dtbo_a dtbo.img
- fastboot flash dtbo_b dtbo.img
- fastboot flash boot_a boot.img
- fastboot flash boot_b boot.img
- fastboot flash system_a system.img
- fastboot flash system_b system.img
- fastboot flash userdata userdata.img
- fastboot flash persist persist.img
-
- fastboot reboot
- ```
-
-## Generating ai-solutions binary
-
-1. Copy snpe-2.x folder to "/poky/meta-qti-ml-prop/recipes/snpe-sdk/files/snpe/".
- ```
- cp -r /* /poky/meta-qti-ml-prop/recipes/snpe-sdk/files/snpe/
- ```
-2. Copy "meta-qti-ai-solutions" into "/poky/" folder
- ```
- cp -r meta-qti-ai-solutions /poky/
- ```
-3. Copy SNPE,DiagLog,DlContainer,DlSystem and Wrapper.hpp
- ```
- cp -r /include/SNPE/Dl* /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/
- cp -r /include/SNPE/DiagLog/ /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/
- cp -r /include/SNPE/Wrapper.hpp /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/
- cp -r /include/SNPE/SNPE/ /poky/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/
- ```
-4. Update "snpe.bb" in "poky/meta-qti-ml-prop/recipes/snpe-sdk" folder
- 1. Make sure platform "aarch64-oe-linux-gcc11.2" is selected
- 2. Update DSP lib path
- ```
- -- install -m 0755 ${S}/lib/dsp/* ${D}/${libdir}/rfsa/adsp
- ++ install -m 0755 ${S}/lib/hexagon-v73/unsigned/lib* ${D}/${libdir}/rfsa/adsp
- ```
-5. Run the following commands
- ```
- cd /LE.PRODUCT.2.1.r1/apps_proc/poky
- export MACHINE=kalama DISTRO=qti-distro-rb-debug
- source qti-conf/set_bb_env.sh
- export PREBUILT_SRC_DIR="/prebuilt_HY11"
- bitbake qti-robotics-image
- ```
-6. Flash the latest build on target. (Note: Check if "ai-solutions" binary is generated in the "build-qti-distro-fullstack-debug/tmp-glibc/work/qrb5165_rb5-oe-linux/qti-robotics-image/1.0-r0/rootfs/usr/bin/" path)
-
-## Running ai-solutions application
-1. Execute the following commands to remount the target
- ```
- adb root
- adb disable-verity
- adb reboot
- adb root
- adb shell "mount -o remount,rw /"
- ```
-2. Push "meta-qti-ai-solutions/recipes/ai-solutions/files/app/" and "SNPE-2.14" onto the device
- ```
- adb push
- ```
-3. Execute the following commands to setup snpe on target
- ```
- adb shell
- cd
- cp -r lib/aarch64-oe-linux-gcc11.2/lib* /usr/lib/
- cp bin/aarch64-oe-linux-gcc11.2/snpe-net-run /usr/bin/
- cp -r lib/hexagon-v73/unsigned/lib* /usr/lib/rfsa/adsp/
- chmod +x /usr/bin/snpe-net-run
- snpe-net-run --version
- ```
- Expected output: SNPE v2.14.2.230905160328_61726
-4. Run ai-solutions application
- ```
- adb shell
- cd
- ai-solutions -c -i -o
- ```
- Example:
-
- ```
- ai-solutions -c data/config.json -i Sample1.jpg -o output.jpg
- ```
-
- ### Details on Input arguments:
-
- #### Sample config.json
-
- ```json
- "model-configs":[
-
- "model-name":"QSrnet-medium", -> model name which is used while enabling solution
- "model-type":"superresolution", -> To specify the use case such superresolution or detection or segmentation etc..
- "model-path":"models/quicksrnet_medium_quantized.dlc", -> Path at which model is located on target
- "runtime":"DSP", -> Select Runtime either CPU or DSP
- "input-layers":[ -> Input layer of the model
- "t.1"
- ],
- "output-layers":[
- "depth_to_space#1" -> Output layer of the model
- ],
- "output-tensors":[
- "65" -> Output node for post processing
- ]
- ]
- ```
-
- solution-config:
- ```json
- "solution-configs":[
- {
- "solution-name":"AI-Solutions", -> To identify usecase
- "model-name":"SESR", -> Specify model name to be executed
- "input-config-name":"image", -> To read input from image
- "Enable":0 -> Enable specific solution
- },
- {
- "solution-name":"AI-Solutions",
- "model-name":"SRGAN",
- "input-config-name":"image",
- "Enable":1
- }
- ]
- ```
diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/conf/layer.conf b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/conf/layer.conf
deleted file mode 100644
index 4fa44c6f..00000000
--- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/conf/layer.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-BBFILES += "${LAYERDIR}/recipes/*/*.bb ${LAYERDIR}/recipes/*/*.bbappend ${LAYERDIR}/recipes-*/*/*.bb ${LAYERDIR}/recipes-*/*/*.bbappend"
-BBPATH .= ":${LAYERDIR}"
-BBFILE_COLLECTIONS += "ai-solutions"
-BBFILE_PRIORITY_ai-solutions = "17"
-BBFILE_PATTERN_ai-solutions := "^${LAYERDIR}/"
-LAYERSERIES_COMPAT_ai-solutions = " dunfell kirkstone "
-IMAGE_INSTALL:append = " ai-solutions "
diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/ai-solutions.bb b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/ai-solutions.bb
deleted file mode 100644
index 2a4e4989..00000000
--- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/ai-solutions.bb
+++ /dev/null
@@ -1,25 +0,0 @@
-inherit cmake pkgconfig
-
-HOMEPAGE = "http://support.cdmatech.com"
-LICENSE = "Qualcomm-Technologies-Inc.-Proprietary"
-LIC_FILES_CHKSUM = "file://${COREBASE}/meta-qti-bsp-prop/files/qcom-licenses/\
-${LICENSE};md5=92b1d0ceea78229551577d4284669bb8"
-
-SUMMARY = "AI-Solutions on QCS8550"
-DESCRIPTION = "AI-Solutions"
-
-LICENSE = "Qualcomm-Technologies-Inc.-Proprietary"
-
-SRC_URI = "file://app"
-S = "${WORKDIR}/app"
-
-DEPENDS += " jsoncpp json-glib gflags gstreamer1.0 gstreamer1.0-plugins-base opencv snpe"
-
-do_install(){
- install -d ${D}/${bindir}
- install -m 0777 ${WORKDIR}/build/out/ai-solutions ${D}/${bindir}
-}
-
-INSANE_SKIP_${PN} += "arch"
-
-FILES_${PN} += "${bindir}/*"
diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMake/FindGStreamer.cmake b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMake/FindGStreamer.cmake
deleted file mode 100644
index 3e4148c0..00000000
--- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMake/FindGStreamer.cmake
+++ /dev/null
@@ -1,5 +0,0 @@
-find_package(PkgConfig)
-pkg_search_module(GLIB REQUIRED glib-2.0)
-pkg_check_modules(GSTREAMER REQUIRED gstreamer-1.0)
-pkg_check_modules(GST_APP REQUIRED gstreamer-app-1.0)
-pkg_check_modules(GST_VIDEO REQUIRED gstreamer-video-1.0)
\ No newline at end of file
diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMakeLists.txt b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMakeLists.txt
deleted file mode 100644
index 62db1972..00000000
--- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/CMakeLists.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-# CMake lowest version requirement
-cmake_minimum_required(VERSION 3.5.1)
-
-# project information
-project(AI-SOLUTIONS)
-
-include(FindPkgConfig)
-pkg_check_modules(JSONCPP REQUIRED jsoncpp)
-pkg_check_modules(JSON REQUIRED json-glib-1.0)
-pkg_check_modules(GFLAGS REQUIRED gflags)
-
-set(PROJECT_ROOT ${CMAKE_CURRENT_LIST_DIR})
-set(CMAKE_MODULE_PATH ${PROJECT_ROOT}/CMake)
-set(CMAKE_CXX_STANDARD 17)
-
-find_package(GStreamer REQUIRED)
-find_package(OpenCV REQUIRED )
-
-add_subdirectory("./src")
-
-link_directories(
- ${JSONCPP_LIBRARY_DIRS}
- ${JSON_LIBRARY_DIRS}
- ${GFLAGS_LIBRARY_DIRS}
-)
diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/data/config.json b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/data/config.json
deleted file mode 100644
index 5e887618..00000000
--- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/data/config.json
+++ /dev/null
@@ -1,345 +0,0 @@
-{
- "input-configs":[
- {
- "input-config-name":"image",
- "stream-type":"image"
- }
- ],
- "model-configs":[
- {
- "model-name":"QSrnet-small",
- "model-type":"superresolution",
- "model-path":"models/quicksrnet_small_quantized.dlc",
- "runtime":"DSP",
- "input-layers":[
- "t.1"
- ],
- "output-layers":[
- "depth_to_space#1"
- ],
- "output-tensors":[
- "41"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"QSrnet-medium",
- "model-type":"superresolution",
- "model-path":"models/quicksrnet_medium_quantized.dlc",
- "runtime":"DSP",
- "input-layers":[
- "t.1"
- ],
- "output-layers":[
- "depth_to_space#1"
- ],
- "output-tensors":[
- "65"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"QSrnet-large",
- "model-type":"superresolution",
- "model-path":"models/quicksrnet_large_quantized.dlc",
- "runtime":"DSP",
- "input-layers":[
- "t.1"
- ],
- "output-layers":[
- "depth_to_space#1"
- ],
- "output-tensors":[
- "124"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"XLSR",
- "model-type":"superresolution",
- "model-path":"models/xlsr_quantized.dlc",
- "runtime": "DSP",
- "input-layers":[
- "t.1"
- ],
- "output-layers":[
- "clipped_relu"
- ],
- "output-tensors":[
- "100"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"SESR",
- "model-type":"superresolution",
- "model-path":"models/sesr_quantized.dlc",
- "runtime":"DSP",
- "input-layers":[
- "lr"
- ],
- "output-layers":[
- "DepthToSpace_52"
- ],
- "output-tensors":[
- "sr"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"ESRGAN",
- "model-type":"superresolution",
- "model-path":"models/esrgan_quantized.dlc",
- "runtime":"DSP",
- "input-layers":[
- "keras_layer_input"
- ],
- "output-layers":[
- "convolution_168"
- ],
- "output-tensors":[
- "Identity"
- ],
- "global-threshold":0.2
- },
-
- {
- "model-name":"ssd-mobilenet-v2",
- "model-type":"detection",
- "model-path":"models/ssd_mobilenetV2_quantized.dlc",
- "runtime":"DSP",
- "nms-threshold":0.3,
- "conf-threshold":0.7,
- "input-layers":[
- "input.1"
- ],
- "output-layers":[
- "Softmax_350",
- "Concat_397"
- ],
- "output-tensors":[
- "935",
- "986"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"yolo-nas",
- "model-type":"detection",
- "model-path":"models/yolo_nas_s_quantized.dlc",
- "runtime":"DSP",
- "nms-threshold":0.4,
- "conf-threshold":0.4,
- "input-layers":[
- "input.1"
- ],
- "output-layers":[
- "/heads/Sigmoid",
- "/heads/Mul"
- ],
- "output-tensors":[
- "877",
- "885"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"yolo-x",
- "model-type":"detection",
- "model-path":"models/yolox_quantized.dlc",
- "runtime":"DSP",
- "nms-threshold":0.4,
- "conf-threshold":0.3,
- "input-layers":[
- "images"
- ],
- "output-layers":[
- "Transpose_570"
- ],
- "output-tensors":[
- "output"
- ],
- "global-threshold":0.2
- },
-
- {
- "model-name":"mbllen",
- "model-type":"lowlight",
- "model-path":"models/mbllen_quantized.dlc",
- "runtime":"DSP",
- "input-layers":[
- "input.1"
- ],
- "output-layers":[
- "/model/Clip"
- ],
- "output-tensors":[
- "352"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"ruas",
- "model-type":"lowlight",
- "model-path":"models/ruas_quantized.dlc",
- "runtime":"DSP",
- "input-layers":[
- "onnx::Pad_0"
- ],
- "output-layers":[
- "/denoise_net/Sub"
- ],
- "output-tensors":[
- "403"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"SCI",
- "model-type":"lowlight",
- "model-path":"models/sci_quantized.dlc",
- "runtime":"DSP",
- "input-layers":[
- "input.1"
- ],
- "output-layers":[
- "/Clip"
- ],
- "output-tensors":[
- "30"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"StableLLve",
- "model-type":"lowlight",
- "model-path":"models/StableLLVE_quantized.dlc",
- "runtime":"DSP",
- "input-layers":[
- "input.1"
- ],
- "output-layers":[
- "/outc/conv/Conv"
- ],
- "output-tensors":[
- "248"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"zero_dce",
- "model-type":"lowlight",
- "model-path":"models/zero_dce_quantized.dlc",
- "runtime":"DSP",
- "input-layers":[
- "input.1"
- ],
- "output-layers":[
- "/Add_7"
- ],
- "output-tensors":[
- "80"
- ],
- "global-threshold":0.2
- },
-
- {
- "model-name":"DeepLabv3Plus-resnet++",
- "model-type":"segmentation",
- "model-path":"models/DeepLabv3Plus_resnet101_quantized.dlc",
- "runtime":"DSP",
- "nms-threshold":0.4,
- "conf-threshold":0.4,
- "input-layers":[
- "input.1"
- ],
- "output-layers":[
- "Resize_284"
- ],
- "output-tensors":[
- "1089"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"DeepLabv3-resnet101",
- "model-type":"segmentation",
- "model-path":"models/deeplabv3_resnet101_quantized.dlc",
- "runtime":"DSP",
- "nms-threshold":0.4,
- "conf-threshold":0.4,
- "input-layers":[
- "input.1"
- ],
- "output-layers":[
- "/Resize_1"
- ],
- "output-tensors":[
- "1089"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"DeepLabv3-resnet50",
- "model-type":"segmentation",
- "model-path":"models/deeplabv3_resnet50_quantized.dlc",
- "runtime":"DSP",
- "nms-threshold":0.4,
- "conf-threshold":0.4,
- "input-layers":[
- "input.1"
- ],
- "output-layers":[
- "/Resize_1"
- ],
- "output-tensors":[
- "613"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"FCN_resnet101",
- "model-type":"segmentation",
- "model-path":"models/fcn_resnet101_quantized.dlc",
- "runtime":"DSP",
- "nms-threshold":0.4,
- "conf-threshold":0.4,
- "input-layers":[
- "input.1"
- ],
- "output-layers":[
- "/Resize_1"
- ],
- "output-tensors":[
- "1018"
- ],
- "global-threshold":0.2
- },
- {
- "model-name":"FCN_resnet50",
- "model-type":"segmentation",
- "model-path":"models/fcn_resnet50_quantized.dlc",
- "runtime":"DSP",
- "nms-threshold":0.4,
- "conf-threshold":0.4,
- "input-layers":[
- "input.1"
- ],
- "output-layers":[
- "/Resize_1"
- ],
- "output-tensors":[
- "542"
- ],
- "global-threshold":0.2
- }
- ],
- "solution-configs":[
- {
- "solution-name":"AI-Solutions",
- "model-name":"yolo-nas",
- "input-config-name":"image",
- "Enable":1
- }
- ]
-}
diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Configuration.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Configuration.h
deleted file mode 100644
index 6cce6ab6..00000000
--- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Configuration.h
+++ /dev/null
@@ -1,118 +0,0 @@
-// -*- mode: cpp -*-
-// =============================================================================
-// @@-COPYRIGHT-START-@@
-//
-// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved.
-// SPDX-License-Identifier: BSD-3-Clause
-//
-// @@-COPYRIGHT-END-@@
-// =============================================================================
-#ifndef CONFIGURATION_H_
-#define CONFIGURATION_H_
-
-#include
-#include
-#include
-#include "Utils.h"
-
-using namespace cv;
-using namespace std;
-
-const string input_configs = "input-configs";
-const string model_configs = "model-configs";
-const string solution_configs = "solution-configs";
-
-// Input Configs;
-const string pipeline_input_config = "input-config-name";
-const string stream_type = "stream-type";
-const string camera_url = "camera-url";
-const string skipframe = "SkipFrame";
-
-// Model Configs
-const string model_config_name = "model-name";
-const string model_type = "model-type";
-const string model_path = "model-path";
-const string runtime = "runtime";
-const string nms_threshold = "nms-threshold";
-const string conf_threshold = "conf-threshold";
-const string input_layers = "input-layers";
-const string output_layers = "output-layers";
-const string output_tensors = "output-tensors";
-
-// Solution Configs
-const string solution_name = "solution-name";
-const string model_name = "model-name";
-const string Enable = "Enable";
-const string solution_input_config = "input-config-name";
-const string output_type = "output-type";
-
-class ObjectDetectionSnpeConfig {
- public:
- string model_name;
- string model_type;
- std::string model_path;
- runtime_t runtime;
- float nmsThresh;
- float confThresh;
- std::vector labels;
- std::vector inputLayers;
- std::vector outputLayers;
- std::vector outputTensors;
-};
-
-class InputConfiguration{
- public:
- int SkipFrame;
- int StreamNumber=0;
- string StreamType;
- string Url;
- string ConfigName;
-};
-
-class SolutionConfiguration {
- public:
- string solution_name;
- string model_name;
- string input_config_name;
- bool Enable;
- string output_type;
- std::shared_ptr input_config;
- std::shared_ptr model_config;
-};
-
-class DebugConfiguration
-{
- public:
- bool DumpData=false;
- string Directory;
-};
-
-class Configuration
-{
-public:
- static Configuration &getInstance()
- {
- static Configuration instance;
- return instance;
- }
-
-private:
- Configuration() {}
-public:
- Configuration(Configuration const &) = delete;
- void operator=(Configuration const &) = delete;
-
- DebugConfiguration Debug;
- ObjectDetectionSnpeConfig Config;
- SolutionConfiguration Sol_Config;
- std::unordered_map> inputconfigs;
- std::unordered_map> modelsconfig;
- std::unordered_map> solutionsconfig;
-
- void LoadConfiguration(string file);
- int LoadInputConfig(Json::Value& input);
- int LoadModelsConfig(Json::Value& models);
- int LoadSolutionsConfig(Json::Value& solutions);
-};
-
-#endif
diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Detection.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Detection.h
deleted file mode 100644
index ffc2ad3a..00000000
--- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/Detection.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// -*- mode: cpp -*-
-// =============================================================================
-// @@-COPYRIGHT-START-@@
-//
-// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved.
-// SPDX-License-Identifier: BSD-3-Clause
-//
-// @@-COPYRIGHT-END-@@
-// =============================================================================
-#ifndef DETECTION_H
-#define DETECTION_H
-
-#include
-#include
-#include
-#include
-
-using namespace std;
-using namespace cv;
-
-struct ObjectData {
- // Bounding box information: top-left coordinate and width, height
- cv::Rect bbox;
- // Confidence of this bounding box
- float confidence = -1.0f;
- // The label of this Bounding box
- int label = -1;
- // Time cost of detecting this frame
- size_t time_cost = 0;
- uint32_t Width=512;
- uint32_t Height=512;
- cv::Mat *output=NULL;
-
-};
-
-struct Detection
-{
- cv::Rect bbox;
- float score;
- int label;
-};
-
-struct DetectionDetail
-{
- vector Result;
- string ModelName;
-};
-
-struct DetectionItem
-{
- uint32_t Width;
- uint32_t Height;
- uint32_t FrameId;
- size_t Size;
- string StreamName;
- int StreamId;
- shared_ptr ImageBuffer;
- ObjectData Results;
-};
-
-#endif
diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DetectionSnpe.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DetectionSnpe.h
deleted file mode 100644
index 0aa944ab..00000000
--- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/DetectionSnpe.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// -*- mode: cpp -*-
-// =============================================================================
-// @@-COPYRIGHT-START-@@
-//
-// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved.
-// SPDX-License-Identifier: BSD-3-Clause
-//
-// @@-COPYRIGHT-END-@@
-// =============================================================================
-#ifndef __Detection_IMPL_H__
-#define __Detection_IMPL_H__
-
-#include
-#include
-#include
-#include
-
-#include "SNPERuntime.h"
-#include "ModelInference.h"
-#include "Configuration.h"
-#include "Detection.h"
-namespace detectionsnpe
-{
- class DETECTIONSnpe
- {
- public:
- DETECTIONSnpe();
- ~DETECTIONSnpe();
- bool Initialize(const ObjectDetectionSnpeConfig& config);
- bool DeInitialize();
- bool Detect(cv::Mat input,cv::Mat& output_image,string model_name);
- bool SetScoreThresh(const float& conf_thresh, const float& nms_thresh);
- bool IsInitialized() const;
-
- private:
- bool m_isInit;
- float m_nmsThresh;
- float m_confThresh;
- std::unique_ptr m_snperuntime;
- std::vector m_inputLayers;
- std::vector m_outputLayers;
- std::vector m_outputTensors;
-
- bool PreProcessInput(const cv::Mat& frame,string model_name);
- bool PostProcess( cv::Mat image,cv::Mat& output_image,string model_name);
- float computeIoU(const cv::Rect& a, const cv::Rect& b);
- std::vector doNMS(std::vector winList, const float& nms_thresh);
- };
-
-} // namespace detection
-
-#endif // __DETECTION_IMPL_H__
diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/LowlightSnpe.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/LowlightSnpe.h
deleted file mode 100644
index e6ee6b75..00000000
--- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/LowlightSnpe.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// -*- mode: cpp -*-
-// =============================================================================
-// @@-COPYRIGHT-START-@@
-//
-// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved.
-// SPDX-License-Identifier: BSD-3-Clause
-//
-// @@-COPYRIGHT-END-@@
-// =============================================================================
-#ifndef __LOWLIGHT_IMPL_H__
-#define __LOWLIGHT_IMPL_H__
-
-#include
-#include
-#include
-#include
-
-#include "SNPERuntime.h"
-#include "ModelInference.h"
-#include "Configuration.h"
-
-namespace lowlightsnpe
-{
- class LOWLIGHTSnpe
- {
- public:
- LOWLIGHTSnpe();
- ~LOWLIGHTSnpe();
- bool Initialize(const ObjectDetectionSnpeConfig& config);
- bool DeInitialize();
- bool Detect(cv::Mat input,cv::Mat& output_image,string model_name);
- bool IsInitialized() const;
-
- private:
- bool m_isInit;
- std::unique_ptr m_snperuntime;
- std::vector m_inputLayers;
- std::vector m_outputLayers;
- std::vector m_outputTensors;
- bool PreProcessInput(const cv::Mat& frame,string model_name);
- bool PostProcess(cv::Mat& output_image,string model_name);
- };
-
-} // namespace lowlightsnpe
-
-#endif // __LOWLIGHT_IMPL_H__
diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ModelInference.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ModelInference.h
deleted file mode 100644
index 7223e7e0..00000000
--- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/ModelInference.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// -*- mode: cpp -*-
-// =============================================================================
-// @@-COPYRIGHT-START-@@
-//
-// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved.
-// SPDX-License-Identifier: BSD-3-Clause
-//
-// @@-COPYRIGHT-END-@@
-// =============================================================================
-#ifndef MODEL_INFERENCE_H_
-#define MODEL_INFERENCE_H_
-#include
-#include
-#include
-#include
-#include
-#include "Configuration.h"
-
-class ModelInference{
-public:
- ModelInference();
- ModelInference(const string model_name);
- int Initialization(const ObjectDetectionSnpeConfig& config);
- bool IsInitialized();
- bool UnInitialization();
- ~ModelInference();
- int Inference(cv::Mat input,cv::Mat& output_image,string model_name);
-private:
- void *Impl = nullptr;
- enum Models{SUPERRESOLUTION, DETECTION,LOWLIGHT,SEGMENTATION};
- int Model;
-};
-
-#endif
\ No newline at end of file
diff --git a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SNPERuntime.h b/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SNPERuntime.h
deleted file mode 100644
index 854ae9bb..00000000
--- a/ai-solutions/QCS8550-embedded-linux/meta-qti-ai-solutions/recipes/ai-solutions/files/app/inc/SNPERuntime.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// -*- mode: cpp -*-
-// =============================================================================
-// @@-COPYRIGHT-START-@@
-//
-// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved.
-// SPDX-License-Identifier: BSD-3-Clause
-//
-// @@-COPYRIGHT-END-@@
-// =============================================================================
-#ifndef _SNPERUNTIME_H_
-#define _SNPERUNTIME_H_
-
-#include
-#include
-#include