Skip to content

Commit

Permalink
Reduced the impact of eye blinks and opening the mouth on head pose a…
Browse files Browse the repository at this point in the history
…nd added new models to benchmark.
  • Loading branch information
emilianavt committed Dec 7, 2020
1 parent 8459f8b commit a0ccd9a
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 6 deletions.
6 changes: 3 additions & 3 deletions facetracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,8 +134,8 @@ def flush(self):
model_base_path = get_model_base_path(args.model_dir)
im = cv2.imread(os.path.join(model_base_path, "benchmark.bin"), cv2.IMREAD_COLOR)
results = []
for model_type in [3, 2, 1, 0, -1, -2]:
tracker = Tracker(224, 224, threshold=0.1, max_threads=args.max_threads, max_faces=1, discard_after=0, scan_every=0, silent=True, model_type=model_type, model_dir=args.model_dir, no_gaze=(model_type < 0), detection_threshold=0.1, use_retinaface=0, max_feature_updates=900, static_model=True if args.no_3d_adapt == 1 else False)
for model_type in [3, 2, 1, 0, -1, -2, -3]:
tracker = Tracker(224, 224, threshold=0.1, max_threads=args.max_threads, max_faces=1, discard_after=0, scan_every=0, silent=True, model_type=model_type, model_dir=args.model_dir, no_gaze=(model_type == -1), detection_threshold=0.1, use_retinaface=0, max_feature_updates=900, static_model=True if args.no_3d_adapt == 1 else False)
tracker.detected = 1
tracker.faces = [(0, 0, 224, 224)]
total = 0.0
Expand Down Expand Up @@ -245,7 +245,7 @@ def flush(self):
first = False
height, width, channels = frame.shape
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tracker = Tracker(width, height, threshold=args.threshold, max_threads=args.max_threads, max_faces=args.faces, discard_after=args.discard_after, scan_every=args.scan_every, silent=False if args.silent == 0 else True, model_type=args.model, model_dir=args.model_dir, no_gaze=False if args.gaze_tracking != 0 and args.model >= 0 else True, detection_threshold=args.detection_threshold, use_retinaface=args.scan_retinaface, max_feature_updates=args.max_feature_updates, static_model=True if args.no_3d_adapt == 1 else False, try_hard=args.try_hard == 1)
tracker = Tracker(width, height, threshold=args.threshold, max_threads=args.max_threads, max_faces=args.faces, discard_after=args.discard_after, scan_every=args.scan_every, silent=False if args.silent == 0 else True, model_type=args.model, model_dir=args.model_dir, no_gaze=False if args.gaze_tracking != 0 and args.model != -1 else True, detection_threshold=args.detection_threshold, use_retinaface=args.scan_retinaface, max_feature_updates=args.max_feature_updates, static_model=True if args.no_3d_adapt == 1 else False, try_hard=args.try_hard == 1)
if not args.video_out is None:
out = cv2.VideoWriter(args.video_out, cv2.VideoWriter_fourcc('F','F','V','1'), args.video_fps, (width * args.video_scale, height * args.video_scale))

Expand Down
10 changes: 7 additions & 3 deletions tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,10 +328,10 @@ def __init__(self, id, tracker):
self.id = id
self.frame_count = -1
self.tracker = tracker
self.contour_pts = [0,1,8,15,16,27,28,29,30,31,32,33,34,35,36,39,42,45]
self.contour_pts = [0,1,15,16,27,28,29,30,31,32,33,34,35]
self.face_3d = copy.copy(self.tracker.face_3d)
if self.tracker.model_type == -1:
self.contour_pts = [0,2,8,14,16,27,30,33]
self.contour_pts = [0,2,14,16,27,30,33]
self.reset()
self.alive = False
self.coord = None
Expand Down Expand Up @@ -767,9 +767,13 @@ def estimate_depth(self, face_info):
if not face_info.rotation is None:
success, face_info.rotation, face_info.translation = cv2.solvePnP(face_info.contour, image_pts, self.camera, self.dist_coeffs, useExtrinsicGuess=True, rvec=np.transpose(face_info.rotation), tvec=np.transpose(face_info.translation), flags=cv2.SOLVEPNP_ITERATIVE)
else:
# Include jaw point for initial estimate to increase stability
image_pts = np.array(lms)[face_info.contour_pts + [8], 0:2]
contour = np.array(face_info.face_3d[face_info.contour_pts + [8]])

rvec = np.array([0, 0, 0], np.float32)
tvec = np.array([0, 0, 0], np.float32)
success, face_info.rotation, face_info.translation = cv2.solvePnP(face_info.contour, image_pts, self.camera, self.dist_coeffs, useExtrinsicGuess=True, rvec=rvec, tvec=tvec, flags=cv2.SOLVEPNP_ITERATIVE)
success, face_info.rotation, face_info.translation = cv2.solvePnP(contour, image_pts, self.camera, self.dist_coeffs, useExtrinsicGuess=True, rvec=rvec, tvec=tvec, flags=cv2.SOLVEPNP_ITERATIVE)

rotation = face_info.rotation
translation = face_info.translation
Expand Down

0 comments on commit a0ccd9a

Please sign in to comment.