Skip to content

Commit

Permalink
Added cleaned up experimental and data processing scripts.
Browse files Browse the repository at this point in the history
  • Loading branch information
tursmanor committed Jul 15, 2020
1 parent 364ff29 commit e94a3ad
Show file tree
Hide file tree
Showing 47 changed files with 51,221 additions and 1 deletion.
96 changes: 96 additions & 0 deletions Data-Processing/cnn_face_detector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example shows how to run a CNN based face detector using dlib. The
# example loads a pretrained model and uses it to find faces in images. The
# CNN model is much more accurate than the HOG based model shown in the
# face_detector.py example, but takes much more computational power to
# run, and is meant to be executed on a GPU to attain reasonable speed.
#
# You can download the pre-trained model from:
# http://dlib.net/files/mmod_human_face_detector.dat.bz2
#
# The examples/faces folder contains some jpg images of people. You can run
# this program on them and see the detections by executing the
# following command:
# ./cnn_face_detector.py mmod_human_face_detector.dat ../examples/faces/*.jpg
#
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
#
# Compiling dlib should work on any operating system so long as you have
# CMake installed. On Ubuntu, this can be done easily by running the
# command:
# sudo apt-get install cmake
#
# Also note that this example requires Numpy which can be installed
# via the command:
# pip install numpy

'''
This detector returns a mmod_rectangles object. This object contains a list of mmod_rectangle objects.
These objects can be accessed by simply iterating over the mmod_rectangles object
The mmod_rectangle object has two member variables, a dlib.rectangle object, and a confidence score.
It is also possible to pass a list of images to the detector.
- like this: dets = cnn_face_detector([image list], upsample_num, batch_size = 128)
In this case it will return a mmod_rectangless object.
This object behaves just like a list of lists and can be iterated over.
'''

import sys
import dlib
import os
import cv2

print("Dlib using cuda?")
print(dlib.DLIB_USE_CUDA)

cnn_face_detector = dlib.cnn_face_detection_model_v1(sys.argv[1])
outDir = sys.argv[3]
numImg = len([name for name in os.listdir(sys.argv[2]) if os.path.isfile(os.path.join(sys.argv[2], name))])
padding = 100

boundingFile = open(sys.argv[4] + "bounding-boxes.txt","w+")
saveCrop = False


for f in range(5000,numImg + 1):
number = '{0:04d}'.format(f)
filename = sys.argv[2] + "frames" + number + ".jpg"

print("Processing file: {}".format(f))
img = dlib.load_rgb_image(filename)
dets = cnn_face_detector(img, 1)
h, w = img.shape[:2]

#print("Number of faces detected: {}".format(len(dets)))
sortedDets = sorted(dets, key=lambda a: a.confidence, reverse=True)

# Only keep most confident face-- we only expect one face per frame
if(len(dets) == 0):
print('No faces detected. Using last detection result.')
else:
d = sortedDets[0]

if (saveCrop):
y1 = max(d.rect.top() - padding, 0)
y2 = min(d.rect.bottom() + padding, h)
x1 = max(d.rect.left() - padding, 0)
x2 = min(d.rect.right() + padding, w)
cropImg = img[y1:y2, x1:x2]

cropImg = cv2.cvtColor(cropImg, cv2.COLOR_BGR2RGB)
cv2.imwrite(outDir + "%04d.jpg" % f,cropImg)

# Save detection box coordinates
boundingFile.write('%d, %d, %d, %d\n' % (d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom()))

boundingFile.close()
231 changes: 231 additions & 0 deletions Data-Processing/data-pipeline.bash
Original file line number Diff line number Diff line change
@@ -0,0 +1,231 @@
#!/usr/bin/env bash
# Run format: bash -i data-pipeline <Video folder> <Script folder> <Audio filename for lipgan>
# Script will:
# 1. Split video into frames
# 2. Crop around the faces in these frames, and save the bounding box coordinates to a txt file
# 3. Run 2D landmark detection on these cropped images
# 4. Save a visual of the results
# 5. Deepfake creation, with steps 1-3 repeated
#
# Assumes 6 cameras with 29.9fps mp4 video, named camera1.MP4, ..., camera6.MP4
#
# Written by Eleanor Tursman
# Last updated 7/2020

VIDEO_LOCATION=${1:-""}
SCRIPT_LOCATION=${2:-""}
AUDIO_FILENAME=${3:-""}

# Exit the moment a command fails
set -euo pipefail

cd "${VIDEO_LOCATION}"

################## Convert video to frames ##################
echo "Converting video to frames..."

mkdir -p "${VIDEO_LOCATION}cam1-frames"
mkdir -p "${VIDEO_LOCATION}cam2-frames"
mkdir -p "${VIDEO_LOCATION}cam3-frames"
mkdir -p "${VIDEO_LOCATION}cam4-frames"
mkdir -p "${VIDEO_LOCATION}cam5-frames"
mkdir -p "${VIDEO_LOCATION}cam6-frames"

ffmpeg -i "camera1.MP4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam1-frames/frames%04d.jpg"
echo "Video 1 done."
ffmpeg -i "camera2.MP4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam2-frames/frames%04d.jpg"
echo "Video 2 done."
ffmpeg -i "camera3.MP4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam3-frames/frames%04d.jpg"
echo "Video 3 done."
ffmpeg -i "camera4.MP4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam4-frames/frames%04d.jpg"
echo "Video 4 done."
ffmpeg -i "camera5.MP4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam5-frames/frames%04d.jpg"
echo "Video 5 done."
ffmpeg -i "camera6.MP4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam6-frames/frames%04d.jpg"
echo "Video 6 done."

################## Get face bounding boxes ##################
echo "Cropping faces..."

mkdir -p "${VIDEO_LOCATION}cam1-cropped"
mkdir -p "${VIDEO_LOCATION}cam2-cropped"
mkdir -p "${VIDEO_LOCATION}cam3-cropped"
mkdir -p "${VIDEO_LOCATION}cam4-cropped"
mkdir -p "${VIDEO_LOCATION}cam5-cropped"
mkdir -p "${VIDEO_LOCATION}cam6-cropped"
mkdir -p "${VIDEO_LOCATION}/bounding-boxes"

python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam1-frames/" "${VIDEO_LOCATION}cam1-cropped/" "${VIDEO_LOCATION}bounding-boxes/cam1-"
echo "Video 1 done."
python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam2-frames/" "${VIDEO_LOCATION}cam2-cropped/" "${VIDEO_LOCATION}bounding-boxes/cam2-"
echo "Video 2 done."
python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam3-frames/" "${VIDEO_LOCATION}cam3-cropped/" "${VIDEO_LOCATION}bounding-boxes/cam3-"
echo "Video 3 done."
python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam4-frames/" "${VIDEO_LOCATION}cam4-cropped/" "${VIDEO_LOCATION}bounding-boxes/cam4-"
echo "Video 4 done."
python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam5-frames/" "${VIDEO_LOCATION}cam5-cropped/" "${VIDEO_LOCATION}bounding-boxes/cam5-"
echo "Video 5 done."
python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam6-frames/" "${VIDEO_LOCATION}cam6-cropped/" "${VIDEO_LOCATION}bounding-boxes/cam6-"
echo "Video 6 done."
#
# ################## Get 2D landmarks ##################
echo "Running 2D landmark detection..."

mkdir -p "${VIDEO_LOCATION}cam1-landmarks"
mkdir -p "${VIDEO_LOCATION}cam2-landmarks"
mkdir -p "${VIDEO_LOCATION}cam3-landmarks"
mkdir -p "${VIDEO_LOCATION}cam4-landmarks"
mkdir -p "${VIDEO_LOCATION}cam5-landmarks"
mkdir -p "${VIDEO_LOCATION}cam6-landmarks"

python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam1-frames/" "${VIDEO_LOCATION}cam1-landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam1-bounding-boxes.txt"
echo "Video 1 done."
python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam2-frames/" "${VIDEO_LOCATION}cam2-landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam2-bounding-boxes.txt"
echo "Video 2 done."
python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam3-frames/" "${VIDEO_LOCATION}cam3-landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam3-bounding-boxes.txt"
echo "Video 3 done."
python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam4-frames/" "${VIDEO_LOCATION}cam4-landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam4-bounding-boxes.txt"
echo "Video 4 done."
python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam5-frames/" "${VIDEO_LOCATION}cam5-landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam5-bounding-boxes.txt"
echo "Video 5 done."
python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam6-frames/" "${VIDEO_LOCATION}cam6-landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam6-bounding-boxes.txt"
echo "Video 6 done."

################## Visual of 2D landmarks on original input ##################
echo "Creating visual of results..."

mkdir -p "${VIDEO_LOCATION}/visualization"

python3 "${SCRIPT_LOCATION}visualize-landmarks.py" "${VIDEO_LOCATION}"
ffmpeg -i "${VIDEO_LOCATION}visualization/%04d.jpg" -loglevel quiet -vf scale=1366:768 -q:v 2 "${VIDEO_LOCATION}landmarks-visual.mp4"

################## Create deepfake ##################
echo "Creating deepfake with LipGan..."

mkdir -p "${VIDEO_LOCATION}cam1-lipgan"
mkdir -p "${VIDEO_LOCATION}cam2-lipgan"
mkdir -p "${VIDEO_LOCATION}cam3-lipgan"
mkdir -p "${VIDEO_LOCATION}cam4-lipgan"
mkdir -p "${VIDEO_LOCATION}cam5-lipgan"
mkdir -p "${VIDEO_LOCATION}cam6-lipgan"

# To run lipgan on our machine, we needed to downsample & restrict the video length to the first 4000 frames
ffmpeg -i "${VIDEO_LOCATION}camera1.MP4" -ss 0.0 -frames:v 4000 -framerate 29.97 -vf scale=1280:720 -crf 0 "${VIDEO_LOCATION}cam1-lipgan/cam1-first-4k.mp4"
echo "Video 1 done"
ffmpeg -i "${VIDEO_LOCATION}camera2.MP4" -ss 0.0 -frames:v 4000 -framerate 29.97 -vf scale=1280:720 -crf 0 "${VIDEO_LOCATION}cam2-lipgan/cam2-first-4k.mp4"
echo "Video 2 done"
ffmpeg -i "${VIDEO_LOCATION}camera3.MP4" -ss 0.0 -frames:v 4000 -framerate 29.97 -vf scale=1280:720 -crf 0 "${VIDEO_LOCATION}cam3-lipgan/cam3-first-4k.mp4"
echo "Video 3 done"
ffmpeg -i "${VIDEO_LOCATION}camera4.MP4" -ss 0.0 -frames:v 4000 -framerate 29.97 -vf scale=1280:720 -crf 0 "${VIDEO_LOCATION}cam3-lipgan/cam4-first-4k.mp4"
echo "Video 4 done"
ffmpeg -i "${VIDEO_LOCATION}camera5.MP4" -ss 0.0 -frames:v 4000 -framerate 29.97 -vf scale=1280:720 -crf 0 "${VIDEO_LOCATION}cam5-lipgan/cam5-first-4k.mp4"
echo "Video 5 done"
ffmpeg -i "${VIDEO_LOCATION}camera6.MP4" -ss 0.0 -frames:v 4000 -framerate 29.97 -vf scale=1280:720 -crf 0 "${VIDEO_LOCATION}cam6-lipgan/cam6-first-4k.mp4"
echo "Video 6 done"

# Create new bboxes based on landmark fit
python "${SCRIPT_LOCATION}get-new-bboxes.py" 1 "${VIDEO_LOCATION}"
python "${SCRIPT_LOCATION}get-new-bboxes.py" 2 "${VIDEO_LOCATION}"
python "${SCRIPT_LOCATION}get-new-bboxes.py" 3 "${VIDEO_LOCATION}"
python "${SCRIPT_LOCATION}get-new-bboxes.py" 4 "${VIDEO_LOCATION}"
python "${SCRIPT_LOCATION}get-new-bboxes.py" 5 "${VIDEO_LOCATION}"
python "${SCRIPT_LOCATION}get-new-bboxes.py" 6 "${VIDEO_LOCATION}"

source ~/anaconda3/etc/profile.d/conda.sh
conda activate lipgan

cd "${SCRIPT_LOCATION}../LipGAN/"

python batch_inference.py --checkpoint_path logs/lipgan_best_residual.h5 --face "${VIDEO_LOCATION}cam1-lipgan/cam1-first-4k.mp4" --fps 29.97 --audio "audio/${AUDIO_FILENAME}.wav" --mat "audio/${AUDIO_FILENAME}.mat" --results_dir results/ --bboxes "${VIDEO_LOCATION}/bounding-boxes/cam1-lipgan-bounding-boxes.txt"
mv results/result_voice.mp4 "${VIDEO_LOCATION}cam1-lipgan/cam1-lipgan.mp4"

python batch_inference.py --checkpoint_path logs/lipgan_best_residual.h5 --face "${VIDEO_LOCATION}cam2-lipgan/cam2-first-4k.mp4" --fps 29.97 --audio "audio/${AUDIO_FILENAME}.wav" --mat "audio/${AUDIO_FILENAME}.mat" --results_dir results/ --bboxes "${VIDEO_LOCATION}/bounding-boxes/cam2-lipgan-bounding-boxes.txt"
mv results/result_voice.mp4 "${VIDEO_LOCATION}cam2-lipgan/cam2-lipgan.mp4"

python batch_inference.py --checkpoint_path logs/lipgan_best_residual.h5 --face "${VIDEO_LOCATION}cam3-lipgan/cam3-first-4k.mp4" --fps 29.97 --audio "audio/${AUDIO_FILENAME}.wav" --mat "audio/${AUDIO_FILENAME}.mat" --results_dir results/ --bboxes "${VIDEO_LOCATION}/bounding-boxes/cam3-lipgan-bounding-boxes.txt"
mv results/result_voice.mp4 "${VIDEO_LOCATION}cam3-lipgan/cam3-lipgan.mp4"

python batch_inference.py --checkpoint_path logs/lipgan_best_residual.h5 --face "${VIDEO_LOCATION}cam4-lipgan/cam4-first-4k.mp4" --fps 29.97 --audio "audio/${AUDIO_FILENAME}.wav" --mat "audio/${AUDIO_FILENAME}.mat" --results_dir results/ --bboxes "${VIDEO_LOCATION}/bounding-boxes/cam4-lipgan-bounding-boxes.txt"
mv results/result_voice.mp4 "${VIDEO_LOCATION}cam4-lipgan/cam4-lipgan.mp4"

python batch_inference.py --checkpoint_path logs/lipgan_best_residual.h5 --face "${VIDEO_LOCATION}cam5-lipgan/cam5-first-4k.mp4" --fps 29.97 --audio "audio/${AUDIO_FILENAME}.wav" --mat "audio/${AUDIO_FILENAME}.mat" --results_dir results/ --bboxes "${VIDEO_LOCATION}/bounding-boxes/cam5-lipgan-bounding-boxes.txt"
mv results/result_voice.mp4 "${VIDEO_LOCATION}cam5-lipgan/cam5-lipgan.mp4"

python batch_inference.py --checkpoint_path logs/lipgan_best_residual.h5 --face "${VIDEO_LOCATION}cam6-lipgan/cam6-first-4k.mp4" --fps 29.97 --audio "audio/${AUDIO_FILENAME}.wav" --mat "audio/${AUDIO_FILENAME}.mat" --results_dir results/ --bboxes "${VIDEO_LOCATION}/bounding-boxes/cam6-lipgan-bounding-boxes.txt"
mv results/result_voice.mp4 "${VIDEO_LOCATION}cam6-lipgan/cam6-lipgan.mp4"

conda deactivate

################## Process deepfake ##################
echo "Converting deepfake video to frames..."

mkdir -p "${VIDEO_LOCATION}cam1-lipgan/frames"
mkdir -p "${VIDEO_LOCATION}cam2-lipgan/frames"
mkdir -p "${VIDEO_LOCATION}cam3-lipgan/frames"
mkdir -p "${VIDEO_LOCATION}cam4-lipgan/frames"
mkdir -p "${VIDEO_LOCATION}cam5-lipgan/frames"
mkdir -p "${VIDEO_LOCATION}cam6-lipgan/frames"

ffmpeg -i "${VIDEO_LOCATION}cam1-lipgan/cam1-lipgan.mp4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam1-lipgan/frames/frames%04d.jpg"
echo "Video 1 done."
ffmpeg -i "${VIDEO_LOCATION}cam2-lipgan/cam2-lipgan.mp4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam2-lipgan/frames/frames%04d.jpg"
echo "Video 2 done."
ffmpeg -i "${VIDEO_LOCATION}cam3-lipgan/cam3-lipgan.mp4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam3-lipgan/frames/frames%04d.jpg"
echo "Video 3 done."
ffmpeg -i "${VIDEO_LOCATION}cam4-lipgan/cam4-lipgan.mp4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam4-lipgan/frames/frames%04d.jpg"
echo "Video 4 done."
ffmpeg -i "${VIDEO_LOCATION}cam5-lipgan/cam5-lipgan.mp4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam5-lipgan/frames/frames%04d.jpg"
echo "Video 5 done."
ffmpeg -i "${VIDEO_LOCATION}cam6-lipgan/cam6-lipgan.mp4" -loglevel quiet -q:v 2 "${VIDEO_LOCATION}cam6-lipgan/frames/frames%04d.jpg"
echo "Video 6 done."

echo "Cropping faces..."

mkdir -p "${VIDEO_LOCATION}cam1-lipgan/cropped"
mkdir -p "${VIDEO_LOCATION}cam2-lipgan/cropped"
mkdir -p "${VIDEO_LOCATION}cam3-lipgan/cropped"
mkdir -p "${VIDEO_LOCATION}cam4-lipgan/cropped"
mkdir -p "${VIDEO_LOCATION}cam6-lipgan/cropped"

python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam1-lipgan/frames/" "${VIDEO_LOCATION}cam1-lipgan/cropped/" "${VIDEO_LOCATION}bounding-boxes/cam1-post-lipgan-"
echo "Video 1 done."
python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam2-lipgan/frames/" "${VIDEO_LOCATION}cam2-lipgan/cropped/" "${VIDEO_LOCATION}bounding-boxes/cam2-post-lipgan-"
echo "Video 2 done."
python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam3-lipgan/frames/" "${VIDEO_LOCATION}cam3-lipgan/cropped/" "${VIDEO_LOCATION}bounding-boxes/cam3-post-lipgan-"
echo "Video 3 done."
python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam4-lipgan/frames/" "${VIDEO_LOCATION}cam4-lipgan/cropped/" "${VIDEO_LOCATION}bounding-boxes/cam4-post-lipgan-"
echo "Video 4 done."
python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam5-lipgan/frames/" "${VIDEO_LOCATION}cam5-lipgan/cropped/" "${VIDEO_LOCATION}bounding-boxes/cam5-post-lipgan-"
echo "Video 5 done."
python "${SCRIPT_LOCATION}cnn_face_detector.py" "${SCRIPT_LOCATION}mmod_human_face_detector.dat" "${VIDEO_LOCATION}cam6-lipgan/frames/" "${VIDEO_LOCATION}cam6-lipgan/cropped/" "${VIDEO_LOCATION}bounding-boxes/cam6-post-lipgan-"
echo "Video 6 done."

echo "Running 2D landmark detection..."

mkdir -p "${VIDEO_LOCATION}cam1-lipgan/landmarks"
mkdir -p "${VIDEO_LOCATION}cam2-lipgan/landmarks"
mkdir -p "${VIDEO_LOCATION}cam3-lipgan/landmarks"
mkdir -p "${VIDEO_LOCATION}cam4-lipgan/landmarks"
mkdir -p "${VIDEO_LOCATION}cam5-lipgan/landmarks"
mkdir -p "${VIDEO_LOCATION}cam6-lipgan/landmarks"

python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam1-lipgan/frames/" "${VIDEO_LOCATION}cam1-lipgan/landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam1-post-lipgan-bounding-boxes.txt"
echo "Video 1 done."
python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam2-lipgan/frames/" "${VIDEO_LOCATION}cam2-lipgan/landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam2-post-lipgan-bounding-boxes.txt"
echo "Video 2 done."
python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam3-lipgan/frames/" "${VIDEO_LOCATION}cam3-lipgan/landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam3-post-lipgan-bounding-boxes.txt"
echo "Video 3 done."
python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam4-lipgan/frames/" "${VIDEO_LOCATION}cam4-lipgan/landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam4-post-lipgan-bounding-boxes.txt"
echo "Video 4 done."
python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam5-lipgan/frames/" "${VIDEO_LOCATION}cam5-lipgan/landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam5-post-lipgan-bounding-boxes.txt"
echo "Video 5 done."
python3 "${SCRIPT_LOCATION}detectFeatures.py" "${VIDEO_LOCATION}cam6-lipgan/frames/" "${VIDEO_LOCATION}cam6-lipgan/landmarks/" "${VIDEO_LOCATION}bounding-boxes/cam6-post-lipgan-bounding-boxes.txt"
echo "Video 6 done."








38 changes: 38 additions & 0 deletions Data-Processing/detectFeatures.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# From: https://github.com/1adrianb/face-alignment

import face_alignment
from skimage import io
import numpy as np
import sys
import os

inDir = sys.argv[1]
outDir = sys.argv[2]
bounding = sys.argv[3]

# Uncropped image at a time, dlib detector
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D)

boundingFile = open(bounding, "r")

numImg = len([name for name in os.listdir(inDir) if os.path.isfile(os.path.join(inDir, name))])
for f in range(1,numImg+1):
filename = inDir + "frames" + '{0:04d}'.format(f) + ".jpg"

box = boundingFile.readline().split(',')
boxInt = [[int(box[0]),int(box[1]), int(box[2]), int(box[3])]]

print("Processing file: {}".format(f))
img = io.imread(filename)
pred = fa.get_landmarks_from_image(img,detected_faces=boxInt)
pred = np.array(pred)

if (pred.size == 1):
print("No face detected")
else:
curData = np.reshape(pred,(68,-1))

np.save(outDir + 'landmarks2D-' + '{0:04d}'.format(f) + '.npy',curData)


boundingFile.close()
Loading

0 comments on commit e94a3ad

Please sign in to comment.