forked from udacity/AIND-CV-FacialKeypoints
-
Notifications
You must be signed in to change notification settings - Fork 0
/
utils.py
84 lines (74 loc) · 3.07 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
def load_data(test=False):
"""
Loads data from FTEST if *test* is True, otherwise from FTRAIN.
Important that the files are in a `data` directory
"""
FTRAIN = 'data/training.csv'
FTEST = 'data/test.csv'
fname = FTEST if test else FTRAIN
df = read_csv(os.path.expanduser(fname)) # load dataframes
# The Image column has pixel values separated by space; convert
# the values to numpy arrays:
df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))
df = df.dropna() # drop all rows that have missing values in them
X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1]
X = X.astype(np.float32)
X = X.reshape(-1, 96, 96, 1) # return each images as 96 x 96 x 1
if not test: # only FTRAIN has target columns
y = df[df.columns[:-1]].values
y = (y - 48) / 48 # scale target coordinates to [-1, 1]
X, y = shuffle(X, y, random_state=42) # shuffle train data
y = y.astype(np.float32)
else:
y = None
return X, y
def plot_data(img, landmarks, axis):
"""
Plot image (img), along with normalized facial keypoints (landmarks)
"""
axis.imshow(np.squeeze(img), cmap='gray') # plot the image
landmarks = landmarks * 48 + 48 # undo the normalization
# Plot the keypoints
axis.scatter(landmarks[0::2],
landmarks[1::2],
marker='o',
c='c',
s=40)
def plot_keypoints(img_path,
face_cascade=cv2.CascadeClassifier('haarcascade_frontalface_alt.xml'),
model_path='my_model.h5'):
# TODO: write a function that plots keypoints on arbitrary image containing human
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
ax.imshow(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB))
if len(faces) == 0:
plt.title('no faces detected')
elif len(faces) > 1:
plt.title('too many faces detected')
for (x,y,w,h) in faces:
rectangle = cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,0),2)
ax.imshow(cv2.cvtColor(rectangle, cv2.COLOR_BGR2RGB))
elif len(faces) == 1:
plt.title('one face detected')
x,y,w,h = faces[0]
bgr_crop = img[y:y+h, x:x+w]
orig_shape_crop = bgr_crop.shape
gray_crop = cv2.cvtColor(bgr_crop, cv2.COLOR_BGR2GRAY)
resize_gray_crop = cv2.resize(gray_crop, (96, 96)) / 255.
model = load_model(model_path)
landmarks = np.squeeze(model.predict(
np.expand_dims(np.expand_dims(resize_gray_crop, axis=-1), axis=0)))
ax.scatter(((landmarks[0::2] * 48 + 48)*orig_shape_crop[0]/96)+x,
((landmarks[1::2] * 48 + 48)*orig_shape_crop[1]/96)+y,
marker='o', c='c', s=40)
plt.show()