-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathemotions.py
142 lines (111 loc) · 4.63 KB
/
emotions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import cv2
import numpy as np
from keras.models import load_model
from statistics import mode
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
from gaze_tracking import GazeTracking
# 아이트래커 준비
gaze = GazeTracking()
# 표정 감성 인식
USE_WEBCAM = True # If false, loads video file source
# parameters for loading data and images
emotion_model_path = './models/emotion_model.hdf5'
emotion_labels = get_labels('fer2013')
# hyper-parameters for bounding boxes shape
frame_window = 10
emotion_offsets = (20, 40)
# loading models
face_cascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_default.xml')
emotion_classifier = load_model(emotion_model_path)
# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]
# starting lists for calculating modes
emotion_window = []
# starting video streaming
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)
# Select video or webcam feed
cap = None
if (USE_WEBCAM == True):
cap = cv2.VideoCapture(0) # Webcam source
else:
cap = cv2.VideoCapture('./demo/dinner.mp4') # Video file source
while cap.isOpened(): # True:
"""
ret, bgr_image = cap.read()
#bgr_image = video_capture.read()[1]
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,
minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
for face_coordinates in faces:
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
gray_face = cv2.resize(gray_face, (emotion_target_size))
except:
continue
gray_face = preprocess_input(gray_face, True)
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_prediction = emotion_classifier.predict(gray_face)
emotion_probability = np.max(emotion_prediction)
emotion_label_arg = np.argmax(emotion_prediction)
emotion_text = emotion_labels[emotion_label_arg]
if emotion_text == 'angry':
color = emotion_probability * np.asarray((255, 0, 0))
emotion_window.append("표정이 좋지 않습니다. 긴장을 풀어주세요.")
elif emotion_text == 'sad':
color = emotion_probability * np.asarray((0, 0, 255))
emotion_window.append("표정이 좋지 않습니다. 긴장을 풀어주세요.")
elif emotion_text == 'happy':
color = emotion_probability * np.asarray((255, 255, 0))
emotion_window.append("")
elif emotion_text == 'surprise':
color = emotion_probability * np.asarray((0, 255, 255))
emotion_window.append("")
else:
color = emotion_probability * np.asarray((0, 255, 0))
emotion_window.append("표정이 좋지 않습니다. 긴장을 풀어주세요.")
tcolor = []
for i in range(3):
tcolor.append(color[i])
color = tuple(tcolor)
if len(emotion_window) > frame_window:
emotion_window.pop(0)
try:
emotion_mode = mode(emotion_window)
except:
continue
draw_bounding_box(face_coordinates, rgb_image, color)
rgb_image = draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45, font_scale=2)
bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
# 아이트래킹 코드
gaze.refresh(bgr_image)
text = ""
bgr_image = gaze.annotated_frame()
if gaze.is_blinking():
text = "Blinking"
elif gaze.is_right():
text = "Looking right"
elif gaze.is_left():
text = "Looking left"
elif gaze.is_center():
text = "Looking center"
cv2.putText(bgr_image, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
cv2.putText(bgr_image, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(bgr_image, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.imshow('window_frame', bgr_image)
"""
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()