-
Notifications
You must be signed in to change notification settings - Fork 1
/
faceTracking.py
executable file
·85 lines (68 loc) · 2.14 KB
/
faceTracking.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
'''
File name: faceTracking.py
Author:
Date created:
'''
'''
File clarification:
Generate a video with tracking features and bounding box for face regions
- Input rawVideo: the video contains one or more faces
- Output tracked_video: the generated video with tracked features and bounding box for face regions
'''
import numpy as np
import cv2
import matplotlib.pyplot as plt
from detectFace import detectFace
from getFeatures import getFeatures
from estimateAllTranslation import estimateAllTranslation
from applyGeometricTransformation import applyGeometricTransformation
from helper import *
def faceTracking(rawVideo):
cap = cv2.VideoCapture(rawVideo)
output = None
pre_img = None
# first frame
ret, cur_img = cap.read()
bbox = detectFace(cur_img)
startXs, startYs = getFeatures(cur_img, bbox)
# initialize video writer
h, w, l = cur_img.shape
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
# change tracked_video name for each run
tracked_video = './Output_Video/tracked_video.avi'
output = cv2.VideoWriter(tracked_video, fourcc, 20, (w, h), True)
# draw box on first frame
imgwbox = drawBox(cur_img, bbox)
output.write(imgwbox)
pre_img = cur_img
count = 0
while(cap.isOpened()):
ret, cur_img = cap.read()
if not ret:
break
newXs, newYs = estimateAllTranslation(startXs, startYs, pre_img, cur_img)
Xs, Ys, newbbox = applyGeometricTransformation(startXs, startYs, newXs, newYs, bbox)
box_features = np.array([])
for i in range(len(Xs)):
box_features = np.append(box_features, len(Xs[i]))
print sum(box_features)
if sum(box_features) < 10:
newbbox = detectFace(cur_img)
Xs, Ys = getFeatures(cur_img, newbbox)
pre_img = cur_img
startXs = Xs
startYs = Ys
bbox = newbbox
imgwbox = drawBox(cur_img, bbox)
output.write(imgwbox)
# print video record
print ('{} frame finished').format(count)
count += 1
# close video writer
cv2.destroyAllWindows()
cap.release()
output.release()
return tracked_video
if __name__ == '__main__':
rawvideo = "./Datasets/Difficult/StrangerThings.mp4"
faceTracking(rawvideo)