-
Notifications
You must be signed in to change notification settings - Fork 32
/
Copy pathface_id_from_webcam.py
129 lines (99 loc) · 4.17 KB
/
face_id_from_webcam.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import os
import logging
import numpy
import cv2
log = logging.getLogger()
log.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
log.addHandler(sh)
VIDEO_DEVICE_INT = 0
LBPH_LABEL_NUM = 0
def _get_color_and_gray_frame_helper(capture_device):
ret, frame = capture_device.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
return frame, gray
def _detect_faces_helper(face_classifier, gray_frame):
"""
Using shameful helper to make universal changes to the
detect method
"""
height, width = gray_frame.shape
min_size = (int(height/4), int(width/4))
return face_classifier.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=2, flags=cv2.CASCADE_SCALE_IMAGE, minSize=min_size)
def main():
haarcascade_filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'haarcascade_frontalface_default.xml')
face_classifier = cv2.CascadeClassifier(haarcascade_filepath)
# should really test here and see if this number works
print('Using device number: {}'.format(VIDEO_DEVICE_INT))
capture_device = cv2.VideoCapture(VIDEO_DEVICE_INT)
train_face = False
print('If you want to train face, press `P`. else press `Q`')
# This is the best loop ever! So much control
while True:
frame, gray = _get_color_and_gray_frame_helper(capture_device)
faces = _detect_faces_helper(face_classifier, gray)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 0, 0), 2)
cv2.imshow('YOUR FACE', frame)
c = cv2.waitKey(1) & 0xFF
if c == ord('q'):
break
elif c == ord('p'):
train_face = True
break
cv2.destroyAllWindows()
face_recognizer = cv2.face.createLBPHFaceRecognizer()
# If training for face
if train_face:
print('Training for Yo Face!')
images = []
number_of_images = 35
labels = numpy.zeros(shape=(1, number_of_images), dtype=numpy.int32)
# loop, 35 (?) times
for _ in range(number_of_images):
# don't really need the color frame here, just get gray
_, gray = _get_color_and_gray_frame_helper(capture_device)
# check for faces
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
# since we're assuming one face, if more assume bad data
if len(faces) > 1:
log.debug('Faces in image', faces)
log.debug('More than one face shown!')
break
(x, y, w, h) = faces[0]
# This gets the region of interest out using the gray scale
face_region_of_interest = gray[x:x+w, y:y+h]
images.append(face_region_of_interest)
face_recognizer.train(images, labels)
face_recognizer.save('individual_face.xml')
if not train_face:
train_face_filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'individual_face.xml')
#log.debug('face filepath', train_face_filepath)
"""
if os.getenv("TRAINED_FACE"):
print('Loading trained face from environmental variable')
train_face_filepath = os.getenv("TRAINED_FACE")
"""
face_recognizer.load(train_face_filepath)
print("Identifying your face!")
precision = 0
while True:
color, gray = _get_color_and_gray_frame_helper(capture_device)
faces = _detect_faces_helper(face_classifier, gray)
log.debug('detected faces in Identify face', faces)
try:
for (x, y, w, h) in faces:
cv2.rectangle(color, (x,y), (x+w, y+h), (255, 0, 0), 2)
face_region_of_interest = gray[x:x+w, y:y+h]
precision, label = face_recognizer.predict(face_region_of_interest)
print('Estimated Trained Face : {}, {}, {}'.format(precision, label, w))
cv2.imshow('FACE', face_region_of_interest)
except cv2.error:
pass
if cv2.waitKey(3) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == '__main__':
main()