<p></p>"""
Face-Recognition module
Finds and recognises the faces based on 128 face encodings
Uses a folder will all the known face images to find encodings.
File name is used as the Ouptut name.
Based on the face-recognition package developed by Adam Geitgey
Github: https://github.com/ageitgey/face_recognition
-Install
1:sudo apt-get update
2:sudo apt-get install python3-pip cmake libopenblas-dev liblapack-dev libjpeg-dev
3:git clone https://github.com/JetsonHacksNano/installSwapfile
4:./installSwapfile/installSwapfile.sh
Restart
5:pip3 install Cython
6:wget http://dlib.net/files/dlib-19.17.tar.bz2
7:tar jxvf dlib-19.17.tar.bz2
8:cd dlib-19.17
9:gedit dlib/cuda/cudnn_dlibapi.cpp
Comment the line: //forward_algo = forward_best_algo;
10:sudo python3 setup.py install
11:sudo pip3 install face_recognition
Source: https://medium.com/@ageitgey/build-a-hardware-based-face-recognition-
system-for-150-with-the-nvidia-jetson-nano-and-python-a25cb8c891fd
by: Murtaza Hassan
Website: www.murtazahassan.com
Youtube: Murtaza's Workshop - Robotics and AI
"""
import cv2
import numpy as np
import face_recognition
import os
def findEncodings(path):
#images = []
classNames = []
encodeList = []
myList = os.listdir(path)
print(myList)
for cl in myList:
curImg = cv2.imread(f'{path}/{cl}')
#images.append(curImg)
classNames.append(os.path.splitext(cl)[0])
curImg = cv2.cvtColor(curImg,cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(curImg)[0]
encodeList.append(encode)
print("Encoding Completed")
return encodeList, classNames
def recognizeFaces(img, encodeList, classNames, scaleFactor=0.25):
imgFaces = img.copy()
imgS = cv2.resize(img,(0,0), None, scaleFactor, scaleFactor)
imgS = cv2.cvtColor(imgS,cv2.COLOR_BGR2RGB)
facesCurrentImage = face_recognition.face_locations(imgS)
encodeCurImage = face_recognition.face_encodings(imgS,facesCurrentImage)
names = []
for encodeFace, faceLoc in zip(encodeCurImage, facesCurrentImage):
result = face_recognition.compare_faces(encodeList,encodeFace)
faceDis = face_recognition.face_distance(encodeList,encodeFace)
matchIndex = np.argmin(faceDis)
if result[matchIndex]:
name = classNames[matchIndex].upper()
color = (0,255,0)
else:
color = (0,0,255)
name= 'unknown'
names.append(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = int(y1/scaleFactor), int(x2/scaleFactor), int(y2/scaleFactor), int(x1/scaleFactor)
cv2.rectangle(imgFaces,(x1,y1),(x2,y2),color,2)
cv2.rectangle(imgFaces,(x1,y2-35),(x2,y2),color,cv2.FILLED)
cv2.putText(imgFaces, name, (x1+6, y2-6), cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)
return imgFaces, names
def main():
encodeList, classNames = findEncodings("ImagesAttendance")
#print(classNames)
frameWidth = 640
frameHeight= 480
## for Raspberry Pi V2 Camera
flip = 0
## The Camera Parameters should be in a single line
camSet='nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3264, height=2464,
format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+'
! video/x-raw, width='+str(frameWidth)+', height='+str(frameHeight)+',
format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'
cap = cv2.VideoCapture(camSet)
while True:
sccuess, img = cap.read()
imgFaces, names = recognizeFaces(img, encodeList, classNames)
cv2.imshow("Image",imgFaces)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
if __name__ == "__main__":
main()<p>