Skip to content

Commit

Permalink
Renamed imagemanipv2_mod into imagemanipv2_multi_ops. Added imagemani…
Browse files Browse the repository at this point in the history
…pv2_all_ops example
  • Loading branch information
Erol444 committed Jan 23, 2025
1 parent 139bef8 commit b9cf2ae
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 11 deletions.
63 changes: 63 additions & 0 deletions examples/python/ImageManip/image_manip_v2_all_ops.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
import depthai as dai
import cv2

# pipeline = dai.Pipeline(dai.Device(dai.DeviceInfo('10.12.103.165')))
pipeline = dai.Pipeline()

manip_input = pipeline.create(dai.node.ImageManipV2)
manip_input.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p)
inputQueue = manip_input.inputImage.createInputQueue()

manip_ops = [
# Resize operations. If aspect ratio isn't the same, the image will be stretched/cropped/letterboxed (depending on resize mode)
# Docs here: https://docs.luxonis.com/software/depthai/resolution-techniques/
('resize_stretch', lambda conf: conf.setOutputSize(256, 200, dai.ImageManipConfigV2.ResizeMode.STRETCH)),
('resize_letterbox', lambda conf: conf.setOutputSize(256, 200, dai.ImageManipConfigV2.ResizeMode.LETTERBOX)),
('resize_center_crop', lambda conf: conf.setOutputSize(256, 200, dai.ImageManipConfigV2.ResizeMode.CENTER_CROP)),
# Crop the image topLeft (10,40) to bottomRight (310,110)
('crop', lambda conf: conf.addCrop(x=50, y=50, w=150, h=200)),
# Flip the frame vertically/horizontally
('flip_vertical', lambda conf: conf.addFlipVertical()),
('flip_horizontal', lambda conf: conf.addFlipHorizontal()),
# Scale the image by 0.7x in x and 0.5x in y
('scale', lambda conf: conf.addScale(0.7, 0.5)),
# Rotate. If center isn't specified, it will rotate around center (0.5, 0.5)
('rotate_90_deg', lambda conf: conf.addRotateDeg(90)),
('rotate_90_deg_center', lambda conf: conf.addRotateDeg(90, center=dai.Point2f(0.2, 0.3)).setOutputCenter(False)),
('transform_affine', lambda conf: conf.addTransformAffine( # Shearing
[1, 0.5,
0.2, 1])),
('transform_perspective', lambda conf: conf.addTransformPerspective(
[1.0, 0.2, 0.0, # First row
0.1, 1.0, 0.0, # Second row
0.001, 0.002, 1.0])), # Third row
('frame_type', lambda conf: conf.setFrameType(dai.ImgFrame.Type.RAW8)), # to Grayscale
]

# Dynamically create ImageManipV2 nodes, apply configurations, and set up queues
queues = {}
for name, config in manip_ops:
print(name, config)
manip = pipeline.create(dai.node.ImageManipV2)
config(manip.initialConfig)
manip_input.out.link(manip.inputImage)
queues[name] = manip.out.createOutputQueue(maxSize=4, blocking=False)


imgFrame = dai.ImgFrame()

input_frame = cv2.imread('../models/lenna.png') # 512x512
# Send 256x256 image to the device
imgFrame.setCvFrame(cv2.pyrDown(input_frame), dai.ImgFrame.Type.BGR888i)
inputQueue.send(imgFrame)

cv2.imshow('input_image', input_frame)


pipeline.start()

for name, queue in queues.items():
inFrame = queue.get()
cv2.imshow(name, inFrame.getCvFrame())

key = cv2.waitKey(0)
Original file line number Diff line number Diff line change
Expand Up @@ -13,21 +13,23 @@
manip.initialConfig.setFrameType(dai.ImgFrame.Type.NV12)
manip.setMaxOutputFrameSize(2709360)

camOut = camRgb.requestOutput((1920, 1080))
camOut.link(manip.inputImage)
camRgb.requestOutput((1920, 1080)).link(manip.inputImage)

out = manip.out.createOutputQueue()

import json
with open('pipeline.json', 'w') as f:
f.write(json.dumps(pipeline.serializeToJson()))

manipQ = manip.out.createOutputQueue()
camQ = camOut.createOutputQueue()

pipeline.start()

print(manip.initialConfig)

while True:
if manipQ.has():
cv2.imshow("Manip frame", manipQ.get().getCvFrame())
if camQ.has():
cv2.imshow("Camera frame", camQ.get().getCvFrame())
key = cv2.waitKey(1)
if key == ord('q'):
break
inFrame = out.get()
if inFrame is not None:
cv2.imshow("Show frame", inFrame.getCvFrame())
key = cv2.waitKey(1)
if key == ord('q'):
break

0 comments on commit b9cf2ae

Please sign in to comment.