Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
  • Loading branch information
karaposu committed May 15, 2024
1 parent b5d9070 commit 83e7036
Show file tree
Hide file tree
Showing 4 changed files with 67 additions and 16 deletions.
37 changes: 33 additions & 4 deletions demo.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,45 @@

from image_input_handler import UniversalImageInputHandler
# from facexformer_pipeline import facexformer_pipeline

from facexformer_pipeline.facexformer_pipeline import facexformer_pipeline
from facexformer_pipeline.facexformer_pipeline import FacexformerPipeline
from visual_debugger import VisualDebugger, Annotation, AnnotationType
import numpy as np

def main():

vd = VisualDebugger(tag="facexformer", debug_folder_path="./", active=True)
image_path = "sample_image.jpg"
uih = UniversalImageInputHandler(image_path, debug=False)
COMPATIBLE, img = uih.COMPATIBLE, uih.img
print('COMPATIBLE:', COMPATIBLE)

pipeline = FacexformerPipeline(tasks=['landmark', 'headpose'], debug=True)
results = pipeline.run_model(uih.img)

transformed_image = results['transformed_image'].numpy()
transformed_image = np.transpose(transformed_image, (1, 2, 0))
unnormalized_image = results['image']

# print(results['headpose'])

landmarks_annotation = [Annotation(type=AnnotationType.POINTS, coordinates=results["landmark_list"], color=(0, 255, 0))]
scaled_landmarks_annotation = [ Annotation(type=AnnotationType.POINTS, coordinates=results["scaled_landmarks"], color=(0, 255, 0))]

vd.visual_debug(img, landmarks_annotation, process_step="landmarks" )
vd.visual_debug(img, scaled_landmarks_annotation, process_step="scaled_landmarks")

vd.visual_debug(transformed_image, landmarks_annotation, process_step="landmarks_on_transformed")
vd.visual_debug(unnormalized_image, landmarks_annotation, process_step="landmarks_on_unnormalized_image ")

vd.visual_debug(transformed_image, scaled_landmarks_annotation, process_step="landmarks_on_transformed")
vd.visual_debug(unnormalized_image, scaled_landmarks_annotation, process_step="landmarks_on_unnormalized_image ")


facexformer_pipeline(img)
# annotations = [Annotation(type=AnnotationType.PITCH_YAW_ROLL,
# coordinates=None,
# orientation=(
# results["headpose"]["pitch"], results["headpose"]["yaw"], results["headpose"]["roll"]),
# color=(0, 255, 0))]
# vd.visual_debug(img, annotations, process_step="head_orientation")

if __name__ == "__main__":

Expand Down
30 changes: 28 additions & 2 deletions facexformer_pipeline/facexformer_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from network import FaceXFormer
from facenet_pytorch import MTCNN
from image_input_handler import UniversalImageInputHandler
from utils import denorm_points, unnormalize, adjust_bbox, visualize_head_pose, visualize_landmarks, visualize_mask
from facexformer_pipeline.utils import denorm_points, unnormalize, adjust_bbox, visualize_head_pose, visualize_landmarks, visualize_mask
from task_postprocesser import task_faceparsing, process_landmarks, task_headpose , task_attributes, task_gender, process_visibility
import torchvision.transforms as transforms
from huggingface_hub import hf_hub_download
Expand Down Expand Up @@ -124,9 +124,32 @@ def process_task_output(self, task_id, output, results, images):
results['age_gender_race_dict'] = task_gender(output[4], output[5], output[6])
elif task_id == 5:
results['visibility'] = process_visibility(output[3])

# from PIL import Image, ImageDraw
# import numpy as np

def scale_landmarks_to_original_image(self, original_image, landmarks, resized_image_size=(224, 224)):
# print("landmarks[0]:", landmarks[0])
original_width, original_height = original_image.shape[1], original_image.shape[0]
# print("original_width:", original_width, "original_height:", original_height)
resized_width, resized_height = resized_image_size
scale_x = original_width / resized_width
scale_y = original_height / resized_height

# Scale landmarks back to original image dimensions


scaled_landmarks = [(x * scale_x, y * scale_y) for (x, y) in landmarks]
scaled_landmarks_int = [(int(round(x)), int(round(y))) for (x, y) in scaled_landmarks]

# print("scaled_landmarks[0]:", scaled_landmarks[0])

return scaled_landmarks_int


def run_model(self, image, image_is_cropped=True):
results = {}

original_image=image.copy()
image = Image.fromarray(image)
if not image_is_cropped:
image = self.crop_face_area_from_image(image)
Expand All @@ -143,6 +166,9 @@ def run_model(self, image, image_is_cropped=True):
image = (image * 255).astype(np.uint8)
image = image[:, :, ::-1]
results['image'] = image
results['transformed_image'] = model_ready_image[0]

results['scaled_landmarks'] =self.scale_landmarks_to_original_image(original_image,results['landmark_list'] )
return results

def main():
Expand Down
14 changes: 5 additions & 9 deletions facexformer_pipeline/task_postprocesser.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from facenet_pytorch import MTCNN
import os
# from argparse import args
from utils import denorm_points, unnormalize, adjust_bbox, visualize_head_pose, visualize_landmarks, visualize_mask
from facexformer_pipeline.utils import denorm_points, unnormalize, adjust_bbox, visualize_head_pose, visualize_landmarks, visualize_mask

def process_visibility(visibility_output):
probs = torch.sigmoid(visibility_output[0])
Expand Down Expand Up @@ -81,15 +81,11 @@ def process_landmarks(landmark_output,images ):
landmarks_list = []
for landmark in denorm_landmarks[0]:
x, y = landmark[0], landmark[1]
landmarks_list.append((int(x.item()), int(y.item())))
# landmarks_list.append((int(x.item()), int(y.item())))
landmarks_list.append((int(round(x.item())), int(round(y.item()))))

# landmarks_dict = {}
# for index, landmark in enumerate(denorm_landmarks[0]):
# x, y = landmark[0], landmark[1]
# landmarks_dict[f"landmark_{index}"] = (x.item(), y.item())

im = visualize_landmarks(image, denorm_landmarks, (255, 255, 0))
# im = visualize_landmarks(image, denorm_landmarks, (255, 255, 0))
# save_path_viz = os.path.join(args.results_path, "landmarks.png")
cv2.imwrite("./landmarks.png", im[:, :, ::-1])
# cv2.imwrite("./landmarks.png", im[:, :, ::-1])

return landmarks_list
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

setup(
name='facexformer_pipeline', # Package name
version='0.2.1', # Version of your package
version='0.2.2', # Version of your package
author='Enes Kuzucu', # Your name

description='A module to run facexformer model as pipeline', # Short description
Expand Down

0 comments on commit 83e7036

Please sign in to comment.