Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add files via upload -- Improvements #3

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 64 additions & 0 deletions faces-train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import cv2
import os
import numpy as np
from PIL import Image
import pickle

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(BASE_DIR, "images")

face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()

current_id = 0
label_ids = {}
y_labels = []
x_train = []
files_to_remove = []
dirs_to_remove = []

for root, dirs, files in os.walk(image_dir):
i = 0
for file in files:
if len(files) == 0:
dirs_to_remove.append(root)
if file.lower().endswith("png") or file.lower().endswith("jpg") or file.lower().endswith("gif"):
path = os.path.join(root, file)
label = os.path.basename(root).replace(" ", "-").lower()
print(label, path)
if not label in label_ids:
label_ids[label] = current_id
current_id += 1
id_ = label_ids[label]
pil_image = Image.open(path).convert("L") # grayscale
size = (320, 180)
final_image = pil_image.resize(size, Image.ANTIALIAS)
image_array = np.array(final_image, "uint8")
#Histrogram equalization
equalization = cv2.equalizeHist(image_array)
#Bilaterally filtered
filtered = cv2.bilateralFilter(equalization, 9, 10, 10)
faces = face_cascade.detectMultiScale(filtered, scaleFactor=1.05, minNeighbors=6)

if len(faces) != 0:
for (x, y, w, h) in faces:
roi = filtered[y:y + h, x:x + w]
x_train.append(roi)
y_labels.append(id_)
print("adding face for: " + label)
else:
files_to_remove.append(os.path.join(root, file))
with open("pickles/face-labels.pickle", 'wb') as f:
pickle.dump(label_ids, f)

recognizer.train(x_train, np.array(y_labels))
recognizer.save("recognizers/face-trainner.yml")

#cleanup unusuable images
for fileLoc in files_to_remove:
print("deleting file: " + fileLoc)
#os.remove(fileLoc)
for dirsLoc in dirs_to_remove:
print ("deleting file: " + dirsLoc)
# os.removedirs(dirsLoc)
print ("removing " + str(len(files_to_remove)) + "files")
104 changes: 104 additions & 0 deletions faces.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
import numpy as np
import cv2
import pickle
from collections import Counter

face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt.xml')
eye_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_eye.xml')
smile_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_smile.xml')

recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("recognizers/face-trainner.yml")

labels = {"person_name": 1}
with open("pickles/face-labels.pickle", 'rb') as f:
og_labels = pickle.load(f)
labels = {v: k for k, v in og_labels.items()}

font = cv2.FONT_HERSHEY_SIMPLEX
colorWhite = (255, 255, 255)
colorRed = (0, 0, 255)
colorYellow = (0, 200, 200)
colorGreen = (0, 255, 0)
thisColor = colorWhite
stroke = 1
result_array = []
result_dict = {}
result_interval = 15


cap = cv2.VideoCapture(0)

while (True):
# Capture frame-by-frame
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Histrogram equalization
equalization = cv2.equalizeHist(gray)
# Bilaterally filtered
filtered = cv2.bilateralFilter(equalization, 9, 10, 10)
faces = face_cascade.detectMultiScale(filtered, scaleFactor=1.05, minNeighbors=6)

for (x, y, w, h) in faces:
# print(x,y,w,h)
roi_gray = filtered[y:y + h, x:x + w] # (ycord_start, ycord_end)
roi_color = frame[y:y + h, x:x + w]

# recognize? deep learned model predict keras tensorflow pytorch scikit learn
id_, conf = recognizer.predict(roi_gray)
if 60 <= conf <= 100:

name = labels[id_]
if name == 'brandon' and conf >= 95:
thisColor = colorRed
elif 60 <= conf <= 80:
thisColor = colorYellow
elif 80 <= conf <= 99:
thisColor = colorGreen
else:
thisColor = colorWhite

if result_interval % 10 == 0:
if name not in result_dict:
result_dict.update({name: [int(conf)]})
else:
result_dict[name].append(int(conf))

stroke = 2
combined_name = name + " confidence: " + str(int(conf)) +"%"
cv2.putText(frame, combined_name, (x, y), font, .7, thisColor, stroke, cv2.LINE_AA)

if name == 'brandon' and conf >= 95:
thisColor = colorRed
else:
thisColor = colorWhite

end_cord_x = x + w
end_cord_y = y + h
cv2.rectangle(frame, (x, y), (end_cord_x, end_cord_y), thisColor, stroke)
else:
end_cord_x = x + w
end_cord_y = y + h
cv2.rectangle(frame, (x, y), (end_cord_x, end_cord_y), colorWhite, stroke)
cv2.putText(frame, "unknown", (x, y), font, .7, colorWhite, stroke, cv2.LINE_AA)

# Display the resulting frame
cv2.imshow('frame', frame)
result_interval += 1
if cv2.waitKey(20) & 0xFF == ord('q'):
break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

data = Counter(result_dict)
conf_level_dict = dict(data.most_common(1))

print(conf_level_dict)

#keepers
print(list(conf_level_dict.keys())[0])
print(sum(list(conf_level_dict.values())[0]) / len(list(conf_level_dict.values())[0]))


25 changes: 25 additions & 0 deletions image_capture.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import cv2
import os

cap = cv2.VideoCapture(0)
#update folder location for each user
directory = "images/this.guy/"

i = 0;
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
if i % 15 == 0:
if not os.path.exists(directory):
os.makedirs(directory)
cv2.imwrite(os.path.join(directory, 'this.guy'+str(i)+'.png'), frame)

# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
i+=1

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()