-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathface_recognition_adapter.py
122 lines (89 loc) · 4.8 KB
/
face_recognition_adapter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
#!/usr/bin/python3
import argparse
import numpy as np
import ctypes as C
import cv2
import os
import sys
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('path', help='Path to the image for Face Recognition')
return parser
current_path=os.path.dirname(os.path.abspath(sys.argv[0]))
path_library=current_path+'/libface_recognition.so'
#face_recognition = C.cdll.LoadLibrary('/home/anastasiia/Documents/project/web/libface_recognition.so')
face_recognition = C.cdll.LoadLibrary(path_library)
classesSequence = ['Asyok', 'daryafret', 'Nastya', 'Malinka', 'Ion', 'unknown']
def recognize_faces(image, path_for_calculate_map, path_for_result_detection_net, ground_truth):
(rows, cols, depth) = (image.shape[0], image.shape[1], image.shape[2])
detection_results = np.zeros(dtype=np.uint8, shape=(rows, cols, depth))
recognition_results = np.zeros(dtype=np.uint8, shape=(rows, cols, depth))
path_map = path_for_calculate_map.encode('utf-8')
path_res = path_for_result_detection_net.encode('utf-8')
ground_truth_data = None
if ground_truth:
ground_truth_data = np.zeros(dtype=np.int32, shape=(24))
for key, value in ground_truth.items():
if key == 'Unknown':
key = 'unknown'
index = classesSequence.index(key)
index *= 4
ground_truth_data[index] = value['x']
ground_truth_data[index + 1] = value['y']
ground_truth_data[index + 2] = value['width']
ground_truth_data[index + 3] = value['height']
face_recognition.recognizeFaces(image.ctypes.data_as(C.POINTER(C.c_ubyte)), rows, cols,
detection_results.ctypes.data_as(C.POINTER(C.c_ubyte)),
recognition_results.ctypes.data_as(C.POINTER(C.c_ubyte)),
C.c_char_p(path_map),
C.c_char_p(path_res),
ground_truth_data.ctypes.data_as(C.POINTER(C.c_int)))
else:
face_recognition.recognizeFaces(image.ctypes.data_as(C.POINTER(C.c_ubyte)), rows, cols,
detection_results.ctypes.data_as(C.POINTER(C.c_ubyte)),
recognition_results.ctypes.data_as(C.POINTER(C.c_ubyte)),
C.c_char_p(path_map),
C.c_char_p(path_res),
ground_truth_data)
print(ground_truth_data)
aligned_faces_count = face_recognition.getAlignedFacesCount()
align_width = np.zeros(dtype=np.uint32, shape=(1, aligned_faces_count))
align_height = np.zeros(dtype=np.uint32, shape=(1, aligned_faces_count))
face_recognition.getAlignedFacesSizes(align_width.ctypes.data_as(C.POINTER(C.c_uint)),
align_height.ctypes.data_as(C.POINTER(C.c_uint)))
align_cols = 0
align_rows = 0
for i in range(aligned_faces_count):
align_cols += align_width[0][i]
align_rows += align_height[0][i]
align_data = np.zeros(dtype=np.uint8, shape=(1, align_rows * align_cols * depth))
face_recognition.getAlignedFaces(align_data.ctypes.data_as(C.POINTER(C.c_ubyte)))
align_results = []
for i in range(aligned_faces_count):
width = align_width[0][i]
height = align_height[0][i]
size = width * height * depth
face = align_data[:, :size]
align_data = align_data[:, size:]
align_results.append(face.reshape(height, width, depth))
face_recognition.getFaceRecognitionTime.restype = C.c_double
recognition_time = face_recognition.getFaceRecognitionTime()
face_recognition.clear()
return detection_results, recognition_results, align_results, recognition_time
def set_class_for_image(label):
face_recognition.setCurrentClass(C.c_char_p(label.encode('utf-8')))
def clear_class():
face_recognition.clearCurrentClass()
def dump_feature_vectors_to_json(json_path):
face_recognition.dumpFeatureVectorsToJson(C.c_char_p(json_path.encode('utf-8')))
if __name__ == '__main__':
image_path = get_parser().parse_args().path
image = cv2.imread(image_path)
fname=os.path.basename(image_path)
fnameWithoutExt=os.path.splitext(fname)[0]
path_for_calculate_map=current_path+"/mAP-master/input/detection-results/"+fnameWithoutExt+".txt"
path_for_result_detection_net=current_path+"/mAP-master/data/predicted/"+fnameWithoutExt+".txt"
detects, recogns, aligns, time = recognize_faces(image,path_for_calculate_map,path_for_result_detection_net, None);
cv2.imshow('Recognized faces', recogns)
cv2.waitKey()
print("Total time in ms: " + str(time))