Face-Recognition using OpenCV

This python program will allow you to detect human face and also recognize face with person name :








Requirements:
  1. Python3
  2. OpenCV (library)
  3. Numpy (library)
  4. Pillow (library)
  5. Pickle (library)
Main Logic : 

             Face detection is a computer vision technology that helps to locate/visualize human faces in digital images. This technique is a specific use case of object detection system that deals with detecting instances of semantic objects of a certain class (such as humans, buildings or cars) in digital images and videos. With the advent of technology, face detection has gained a lot of importance especially in fields like photography, security, and marketing.


Code :

* face_train.py * - Training File

# import useful packages
import os
import cv2
from PIL import Image
import numpy as np
import pickle

# set directory path for store training images
# store images at current folder --> images --> create folder as person name and store that person images
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
img_dir = os.path.join(BASE_DIR, "images")

# load OpenCV face detector
detector = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt.xml')

# create LBPH face recognizer
recognizer = cv2.face.LBPHFaceRecognizer_create()

# create variables for store images and labels
current_id = 0
label_ids = {}
y_labels = []
x_train = []

# open image directory
for root, dirs, files in os.walk(img_dir):

# open subdirectory of directory

for file in files:

# if files in subdirectory have extention like .jpg, .png etc
if file.endswith("JPG") or file.endswith("jpg") or file.endswith("png") or file.endswith("PNG"):

# create labels from path
path = os.path.join(root, file)
label = os.path.basename(root).replace(" ","_").lower()

# if label not in labels dictionary then append label with id
if not label in label_ids:
label_ids[label] = current_id
current_id += 1
id_ = label_ids[label]

# convert image in grayscale
pil_img = Image.open(path).convert("L") 
size = (480,480)

# create final image
final_img = pil_img.resize(size, Image.ANTIALIAS)

# create image numpy array
img_array = np.array(final_img, "uint8")

# detect face from image
faces = detector.detectMultiScale(img_array, scaleFactor=1.5, minNeighbors=5)

# store image and label as training image and label
for (x, y, w, h) in faces:
roi = img_array[y:y+h, x:x+w]
x_train.append(roi)
y_labels.append(id_)


# pickle training data
with open("labels.pickle", 'wb') as f:
pickle.dump(label_ids, f)

# train dataset
recognizer.train(x_train, np.array(y_labels))

# save training data
recognizer.save("trainner.yml")


* face.py * - Output File

# import useful packages
import numpy as np
import cv2
import pickle

# load OpenCV face detector, create face reci=ognizer and read training data
detector=cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainner.yml")

# create dictionary named labels and store training data
labels={"person_name": 1}
with open("labels.pickle", 'rb') as f:
orig_labels = pickle.load(f)
labels = {v:k for k,v in orig_labels.items()}

# Capture Video here
cap = cv2.VideoCapture(0)

while True:
# read frame(image) from video
ret, frame = cap.read()

# convert video in grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

# detect multiple faces in video
faces = detector.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)

# for face in video
for (x,y,w,h) in faces:
# select region of interest in grayscale video and original video
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]

# predict face using recognizer
id_,conf = recognizer.predict(roi_gray)

# if confidence is greater than 40 them put text of label above face
if conf>=40 and conf<=90:
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255,255,255)
stroke = 2
acc = str(name)
cv2.putText(frame, acc, (x,y-10), font, 1, color, stroke, cv2.LINE_AA)

# store face when detect in video
img_item = "img.png"
cv2.imwrite(img_item, roi_color)

# draw rectengle on face in video
color = (255, 0, 0)       #BGR
stroke = 2
width = x+w        #ending cord. x
height = y+h         #ending cord. y
cv2.rectangle(frame, (x, y), (width, height), color, stroke)

# display frame with rectangle and predicted name of person
cv2.imshow('frame', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break

# release cap object and destroy all windows
cap.release()
cv2.destroyAllWindows()



Comments

Post a Comment