Skip to content

Commit

Permalink
video processing demo with ocr
Browse files Browse the repository at this point in the history
* added README for LLM preprocess demo


* Fix pip problem in gpu ci


* Add aes module in LLM demo


* param parser for the LLM preprocessing main


* rename the LLM preprocessing demo path


* video processing demo with ocr


See merge request: !840
  • Loading branch information
HuHeng committed Nov 6, 2024
1 parent 19b9ae2 commit 0ffe782
Show file tree
Hide file tree
Showing 12 changed files with 926 additions and 2 deletions.
4 changes: 2 additions & 2 deletions .codebase/pipelines/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ jobs:
- (cd output/demo/video_enhance && pip install basicsr==1.4.2 realesrgan && (sed -i '8s/from torchvision.transforms.functional_tensor import rgb_to_grayscale/from torchvision.transforms.functional import rgb_to_grayscale/' /usr/local/lib/python3.8/dist-packages/basicsr/data/degradations.py) && python3 enhance_demo.py)
- (cd output/demo/aesthetic_assessment && pip install onnxruntime && python3 aesthetic_assessment_demo.py)
- (cd output/demo/face_detect && cp ../../models/version-RFB-640.onnx . && trtexec --onnx=version-RFB-640.onnx --buildOnly --saveEngine=version-RFB-640.engine && cp version-RFB-640.engine ../../models && python3 detect_trt_sample.py)
- (cd output/demo/colorization_python && git clone https://github.com/eefengwei/DeOldify.git DeOldify && pip3 install -r ./DeOldify/requirements-colab.txt && pip3 install Ipython && mkdir -p ./DeOldify/models && wget -c https://huggingface.co/spensercai/DeOldify/resolve/main/ColorizeVideo_gen.pth -O ./DeOldify/models/ColorizeVideo_gen.pth && python3 deoldify_demo.py)
- (cd output/demo/colorization_python && git clone https://github.com/eefengwei/DeOldify.git DeOldify && pip install "numpy>=2.0.0,<3.0.0" && pip3 install -r ./DeOldify/requirements-colab.txt && pip3 install Ipython && mkdir -p ./DeOldify/models && wget -c https://huggingface.co/spensercai/DeOldify/resolve/main/ColorizeVideo_gen.pth -O ./DeOldify/models/ColorizeVideo_gen.pth && python3 deoldify_demo.py)

clang_build_test:
name: clang_build_test
Expand Down Expand Up @@ -330,4 +330,4 @@ jobs:
- go mod init
- commit=$(cat ../../../gosdk_version.txt) && go get github.com/babitmf/bmf-gosdk@${commit}
- go build main.go
- python3 test_go.py
- python3 test_go.py
18 changes: 18 additions & 0 deletions bmf/demo/LLM_video_preprocessing/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
It's a demo for LLM video/image generating training preprocessing.

Based on BMF, it's flexible to build and integrate algorithms into whole pipeline of preprocessing.

Two part of them are included:
1. Clip processing
The input video will be split according to scene change, and subtitles in the video will be detected and cropped by OCR module, and the video quality will be assessed by BMF provided aesthetic module.
After that, the finalized video clips will be encoded as output.
## Prerequisites
Please pip install all the dependencies in `requirement.txt`
## Run
```
python main.py --input_file <your test video>
```
There will be output info and clips to be stored in `clip_output` of current path.

2. Caption
Please reference the README in "bmf/bmf/demo/fast_caption_module"
153 changes: 153 additions & 0 deletions bmf/demo/LLM_video_preprocessing/aesmod_module.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from module_utils import SyncModule
import os
import time
import json
import pdb
import os.path as osp
import numpy as np

os.environ["OMP_NUM_THREADS"] = "8"
import onnxruntime as ort
import torch
import logging
import cv2


def get_logger():
return logging.getLogger("main")


LOGGER = get_logger()


def flex_resize_aesv2(img, desired_size=[448, 672], pad_color=[0, 0, 0]):
old_h, old_w = img.shape[:2] # old_size is in (height, width) format
if desired_size[0] >= desired_size[1]:
if old_h < old_w: # rotate the honrizontal video
img = np.rot90(img, k=1, axes=(1, 0))
else:
if old_h > old_w: # rotate the vertical video
img = np.rot90(img, k=1, axes=(1, 0))
old_h, old_w = img.shape[:2]

if old_w / old_h > (desired_size[1] / desired_size[0]):
ratio = desired_size[0] / old_h
else:
ratio = desired_size[1] / old_w
img = cv2.resize(img, None, fx=ratio, fy=ratio)
h, w, _ = img.shape
h_crop = (h - desired_size[0]) // 2
w_crop = (w - desired_size[1]) // 2
img = img[h_crop:h_crop + desired_size[0],
w_crop:w_crop + desired_size[1], :]
return img


class Aesmod:

def __init__(self, model_path, model_version, output_path):
self._frm_idx = 0
self._frm_scores = []
self._model_version = model_version
self._output_path = output_path

# model_dir = osp.join(osp.abspath(osp.dirname("__file__")), "models")
# aesmod_ort_model_path = osp.realpath(
# osp.join(model_dir, "aes_transonnx_update3.onnx")
# )
self.use_gpu = False
aesmod_ort_model_path = model_path
print(aesmod_ort_model_path)
LOGGER.info("loading aesthetic ort inference session")
self.ort_session = ort.InferenceSession(aesmod_ort_model_path)

self.resize_reso = [672, 448]

def preprocess(self, frame):
frame = flex_resize_aesv2(frame)
# print('using flex_resize_aesv2', frame.shape)
frame = (frame.astype(np.float32) / 255.0 -
np.array([0.485, 0.456, 0.406], dtype="float32")) / (np.array(
[0.229, 0.224, 0.225], dtype="float32"))
frame = np.transpose(frame, (2, 0, 1))
frame = np.expand_dims(frame, 0)
return frame

@staticmethod
def tensor_to_list(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().flatten().tolist()
else:
return tensor.cpu().flatten().tolist()

@staticmethod
def score_pred_mapping(raw_scores, raw_min=2.60, raw_max=7.42):
pred_score = np.clip(
np.sum([x * (i + 1) for i, x in enumerate(raw_scores)]), raw_min,
raw_max)
pred_score = np.sqrt(
(pred_score - raw_min) / (raw_max - raw_min)) * 100
return float(np.clip(pred_score, 0, 100.0))

def process(self, frames):
frames = [
frame
if frame.flags["C_CONTIGUOUS"] else np.ascontiguousarray(frame)
for frame in frames
]
frame = self.preprocess(frames[0])
print("after preprocess shape", frame.shape)
if not frame.flags["C_CONTIGUOUS"]:
frame = np.ascontiguousarray(frame, dtype=np.float32)

t1 = time.time()
if self.use_gpu:
with torch.no_grad():
input_batch = torch.from_numpy(frame).contiguous().cuda()
preds, _ = self.trt_model(input_batch)
raw_score = self.tensor_to_list(preds)
else:

raw_score = self.ort_session.run(None, {"input": frame})
raw_score = raw_score[0][0]
score = self.score_pred_mapping(raw_score)
self._frm_scores.append(score)
self._frm_idx += 1
t2 = time.time()
LOGGER.info(f"[Aesmod] inference time: {(t2 - t1) * 1000:0.1f} ms")
return frames[0]

def clean(self):
nr_score = round(np.mean(self._frm_scores), 2)
results = {
"aesthetic": nr_score,
"aesthetic_version": self._model_version
}
LOGGER.info(f"overall prediction {json.dumps(results)}")
with open(self._output_path, "w") as outfile:
json.dump(results, outfile, indent=4, ensure_ascii=False)


class aesmod_module(SyncModule):

def __init__(self, node=None, option=None):
output_path = option.get("result_path", 0)
params = option.get("params", {})
model_version = params.get("model_version", "v1.0")
model_path = params.get("model_path",
"../../models/aes_transonnx_update3.onnx")
self._nrp = Aesmod(model_path, model_version, output_path)
SyncModule.__init__(self,
node,
nb_in=1,
in_fmt="rgb24",
out_fmt="rgb24")

def core_process(self, frames):
return self._nrp.process(frames)

def clean(self):
self._nrp.clean()
Loading

0 comments on commit 0ffe782

Please sign in to comment.