-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathserver_test.py
302 lines (239 loc) · 9.67 KB
/
server_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
from concurrent.futures import ThreadPoolExecutor # 수정된 부분
import json
from sqlalchemy import null
import torch
from torchvision import transforms
import time
import datetime as dt
from threading import Thread
#other lib
import os
import requests # 수정된 부분
#socket
import socket
import cv2
import numpy as np
import sys
from os.path import exists
sys.path.insert(0, "yolov5_face")
from models.experimental import attempt_load
from utils.datasets import letterbox
from utils.general import check_img_size, non_max_suppression_face, scale_coords
# Check device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Get model detect
model = attempt_load("yolov5_face/yolov5n-0.5.pt", map_location=device)
# Get model recognition
from insightface.insight_face import iresnet100
weight = torch.load("insightface/resnet100_backbone.pth", map_location=device)
model_emb = iresnet100()
model_emb.load_state_dict(weight)
model_emb.to(device)
model_emb.eval()
face_preprocess = transforms.Compose([
transforms.ToTensor(), # input PIL => (3,56,56), /255.0
transforms.Resize((112, 112)),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
# Resize image
def resize_image(img0, img_size):
h0, w0 = img0.shape[:2] # orig hw
r = img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp)
imgsz = check_img_size(img_size, s=model.stride.max()) # check img_size
img = letterbox(img0, new_shape=imgsz)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x416x416
img = torch.from_numpy(img).to(device)
img = img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return img
def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2, 4, 6, 8]] -= pad[0] # x padding
coords[:, [1, 3, 5, 7, 9]] -= pad[1] # y padding
coords[:, :10] /= gain
#clip_coords(coords, img0_shape)
coords[:, 0].clamp_(0, img0_shape[1]) # x1
coords[:, 1].clamp_(0, img0_shape[0]) # y1
coords[:, 2].clamp_(0, img0_shape[1]) # x2
coords[:, 3].clamp_(0, img0_shape[0]) # y2
coords[:, 4].clamp_(0, img0_shape[1]) # x3
coords[:, 5].clamp_(0, img0_shape[0]) # y3
coords[:, 6].clamp_(0, img0_shape[1]) # x4
coords[:, 7].clamp_(0, img0_shape[0]) # y4
coords[:, 8].clamp_(0, img0_shape[1]) # x5
coords[:, 9].clamp_(0, img0_shape[0]) # y5
return coords
def get_face(input_image):
# Parameters
size_convert = 128
conf_thres = 0.4
iou_thres = 0.5
# Resize image
img = resize_image(input_image.copy(), size_convert)
# Via yolov5-face
with torch.no_grad():
pred = model(img[None, :])[0]
# Apply NMS
det = non_max_suppression_face(pred, conf_thres, iou_thres)[0]
bboxs = np.int32(scale_coords(img.shape[1:], det[:, :4], input_image.shape).round().cpu().numpy())
landmarks = np.int32(scale_coords_landmarks(img.shape[1:], det[:, 5:15], input_image.shape).round().cpu().numpy())
return bboxs, landmarks
def get_feature(face_image, training = True):
# Convert to RGB
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
# Preprocessing image BGR
face_image = face_preprocess(face_image).to(device)
# Via model to get feature
with torch.no_grad():
if training:
emb_img_face = model_emb(face_image[None, :])[0].cpu().numpy()
else:
emb_img_face = model_emb(face_image[None, :]).cpu().numpy()
# Convert to array
images_emb = emb_img_face/np.linalg.norm(emb_img_face)
return images_emb
def read_features(root_fearure_path = "feature/face_features.npz"):
data = np.load(root_fearure_path, allow_pickle=True)
images_name = data["arr1"]
images_emb = data["arr2"]
return images_name, images_emb
def make_dir_list(path) :
files = os.listdir(path)
files_dir = [f for f in files if os.path.isdir(os.path.join(path, f))]
#print(files_dir)
return files_dir
def recognition(face_image):
global isThread, score, name
# Get feature from face
query_emb = (get_feature(face_image, training=False))
# Read features
images_names, images_embs = read_features()
scores = (query_emb @ images_embs.T)[0]
id_min = np.argmax(scores)
score = scores[id_min]
name = images_names[id_min]
isThread = True
# socket에서 수신한 버퍼를 반환하는 함수
def recvall(sock, count):
# 바이트 문자열
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
def send_result_to_flask_server(name, score, time):
url = 'http://localhost:13330/image/result'
data = {
'userName': name,
'score': float(score), # score를 float() 함수를 사용하여 float 타입으로 변경
'today': time
}
headers = {
'Content-Type': 'application/json'
}
try:
response = requests.post(url, data=json.dumps(data), headers=headers)
response.raise_for_status()
print("Result sent to Flask server")
except requests.exceptions.HTTPError as err:
print(f"HTTP error occurred: {err}")
except Exception as err:
print(f"Error occurred: {err}")
# 서버의 아이피와 포트번호 지정
#HOST = ''
HOST = ''
PORT = 13000
# TCP 사용
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket created')
s.bind((HOST, PORT))
print('Socket bind complete')
# 클라이언트의 접속을 기다린다. (클라이언트 연결을 10개까지 받는다)
s.listen(10)
print('Socket now listening')
isThread = True
score = 0
name = null
def server():
# 연결, conn에는 소켓 객체, addr은 소켓에 바인드 된 주소
conn, addr = s.accept()
global isThread, score, name, now_time, recognition_res
start = time.time_ns()
frame_count = 0
fps = -1
unknown_cnt = 0
recognition_res = {}
last_recognition_times = {}
while True:
length = recvall(conn, 16)
stringData = recvall(conn, int(length))
data = np.frombuffer(stringData, dtype='uint8')
frame = cv2.imdecode(data, cv2.IMREAD_COLOR)
bboxs, landmarks = get_face(frame)
h, w, c = frame.shape
tl = 1 or round(0.002 * (h + w) / 2) + 1 # line/font thickness
clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)]
for i in range(len(bboxs)):
x1, y1, x2, y2 = bboxs[i]
cv2.rectangle(frame, (x1, y1), (x2, y2), (204, 204, 255), 2)
for x in range(5):
point_x = int(landmarks[i][2 * x])
point_y = int(landmarks[i][2 * x + 1])
cv2.circle(frame, (point_x, point_y), tl+1, clors[x], -1)
if isThread == True:
isThread = False
face_image = frame[y1:y2, x1:x2]
thread = Thread(target=recognition, args=(face_image,))
thread.start()
current_time = time.time()
if name != None:
if score < 0.51:
caption= "UNKNOWN"
unknown_cnt += 1
else:
if name not in last_recognition_times or current_time - last_recognition_times[name] >= 7:
now_time = dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
recognition_res[name] = score, now_time
caption = f"{name.split('_')[0]}:{score:.2f}:{now_time}"
path = './dataset/face-datasets'
file = make_dir_list(path)
for i in range(len(file)):
if file[i] in name :
recognition_res.update
# 결과 출력
if name in recognition_res :
print(recognition_res)
last_recognition_times[name] = current_time
send_result_to_flask_server(name, score, now_time)
# Remove records older than 60 seconds
for key in list(last_recognition_times.keys()):
if current_time - last_recognition_times[key] >= 60:
del last_recognition_times[key]
if key in recognition_res:
del recognition_res[key]
frame_count += 1
if frame_count >= 30:
end = time.time_ns()
fps = 1e9 * frame_count / (end - start)
frame_count = 0
start = time.time_ns()
if fps > 0:
fps_label = "FPS: %.2f" % fps
cv2.putText(frame, fps_label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
#cv2.imshow("Face Recognition", frame)
if cv2.waitKey(25) != -1 :
break
if __name__=="__main__":
server()