人臉活體檢測人臉識別:眨眼+張口

專注的阿熊發表於2022-09-27

1 :原理

計算當出現 1 次眨眼或 1 次張嘴就判斷為活人,記錄下一幀的人臉圖片,和要判定的人員圖片進行比對,獲取比對後的相似度,進行判斷是否是同一個人,為了增加判斷的速度,才用 2 幀進行一次活體檢測判斷。

2 :程式碼實現

import face_recognition

from imutils import face_utils

import numpy as np

import dlib

import cv2

import sys

# 初始化眨眼次數

blink_total = 0

# 初始化張嘴次數

mouth_total = 0

# 設定圖片儲存路徑

pic_path = r'images\viode_face.jpg'

# 圖片數量

pic_total = 0

# 初始化眨眼的連續幀數以及總的眨眼次數

blink_counter = 0

# 初始化張嘴狀態為閉嘴

mouth_status_open = 0

def getFaceEncoding(src):

     image = face_recognition.load_image_file(src)  # 載入人臉圖片

     # 獲取圖片人臉定位 [(top,right,bottom,left )]

     face_locations = face_recognition.face_locations(image)

     img_ = image[face_locations[0][0]:face_locations[0][2], face_locations[0][3]:face_locations[0][1]]

     img_ = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB)

     # display(img_)

     face_encoding = face_recognition.face_encodings(image, face_locations)[0]  # 對人臉圖片進行編碼

     return face_encoding

def simcos(a, b):

     a = np.array(a)

     b = np.array(b)

     dist = np.linalg.norm(a - b)  # 二範數

     sim = 1.0 / (1.0 + dist)  #

     return sim

# 提供對外比對的介面 返回比對的相似度

def comparison(face_src1, face_src2):

     xl1 = getFaceEncoding(face_src1)

     xl2 = getFaceEncoding(face_src2)

     value = simcos(xl1, xl2)

     print(value)

# 眼長寬比例

def eye_aspect_ratio(eye):

     # (|e1-e5|+|e2-e4|) / (2|e0-e3|)

     A = np.linalg.norm(eye[1] - eye[5])

     B = np.linalg.norm(eye[2] - eye[4])

     C = np.linalg.norm(eye[0] - eye[3])

     ear = (A + B) / (2.0 * C)

     return ear

# 嘴長寬比例

def mouth_aspect_ratio(mouth):

     A = np.linalg.norm(mouth[1] - mouth[7])  # 61, 67

     B = np.linalg.norm(mouth[3] - mouth[5])  # 63, 65

     C = np.linalg.norm(mouth[0] - mouth[4])  # 60, 64

     mar = (A + B) / (2.0 * C)

     return mar

#   進行活體檢測(包含眨眼和張嘴)

#  filePath 影片路徑

def liveness_detection():

     global blink_total  # 使用 global 宣告 blink_total ,在函式中就可以修改全域性變數的值

     global mouth_total

     global pic_total

     global blink_counter

     global mouth_status_open

     # 眼長寬比例值

     EAR_THRESH = 0.15

     EAR_CONSEC_FRAMES_MIN = 1

     EAR_CONSEC_FRAMES_MAX = 5  # EAR 小於閾值時,接連多少幀一定發生眨眼動作

     # 嘴長寬比例值

     MAR_THRESH = 0.2

     # 人臉檢測器

     detector = dlib.get_frontal_face_detector()

     # 特徵點檢測器

     predictor = dlib.shape_predictor("model/shape_predictor_68_face_landmarks.dat")

     # 獲取左眼的特徵點

     (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]

     # 獲取右眼的特徵點

     (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

     # 獲取嘴巴特徵點

     (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["inner_mouth"]

     vs = cv2.VideoCapture(video_path)

     # 總幀數 (frames)

     frames = vs.get(cv2.CAP_PROP_FRAME_COUNT)

     frames_total = int(frames)

     for i in range(frames_total):

         ok, frame = vs.read(i)  # 讀取影片流的一幀

         if not ok:

             break

         if frame is not None and i % 2 == 0:

             # 圖片轉換成灰色(去除色彩干擾,讓圖片識別更準確)

             gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

             rects = detector(gray, 0)  # 人臉檢測

             # 只能處理一張人臉

             if len(rects) == 1:

                 if pic_total == 0:

                     cv2.imwrite(pic_path, frame)  # 儲存為影像 , 儲存名為 資料夾名 _ 數字(第幾個檔案) .jpg

                     cv2.waitKey(1)

                     pic_total += 1

                 shape = predictor(gray, rects[0])  # 儲存 68 個特徵點座標的 <class 'dlib.dlib.full_object_detection'> 物件

                 shape = face_utils.shape_to_np(shape)  # shape 轉換為 numpy 陣列,陣列中每個元素為特徵點座標

                 left_eye = shape[lStart:lEnd]  # 取出左眼對應的特徵點

                 right_eye = shape[rStart:rEnd]  # 取出右眼對應的特徵點

                 left_ear = eye_aspect_ratio(left_eye)  # 計算左眼 EAR

                 right_ear = eye_aspect_ratio(right_eye)  # 計算右眼 EAR

                 ear = (left_ear + right_ear) / 2.0   # 求左右眼 EAR 的均值

                 mouth = shape[mStart:mEnd]   # 取出嘴巴對應的特徵點

                 mar =外匯跟單gendan5.com mouth_aspect_ratio(mouth)  # 求嘴巴 mar 的均值

                 # EAR 低於閾值,有可能發生眨眼,眨眼連續幀數加一次

                 if ear < EAR_THRESH:

                     blink_counter += 1

                 # EAR 高於閾值,判斷前面連續閉眼幀數,如果在合理範圍內,說明發生眨眼

                 else:

                     if EAR_CONSEC_FRAMES_MIN <= blink_counter <= EAR_CONSEC_FRAMES_MAX:

                         blink_total += 1

                     blink_counter = 0

                 # 透過張、閉來判斷一次張嘴動作

                 if mar > MAR_THRESH:

                     mouth_status_open = 1

                 else:

                     if mouth_status_open:

                         mouth_total += 1

                     mouth_status_open = 0

             elif len(rects) == 0 and i == 90:

                 print("No face!")

                 break

             elif len(rects) > 1:

                 print("More than one face!")

         # 判斷眨眼次數大於 2 、張嘴次數大於 1 則為活體 , 退出迴圈

         if blink_total >= 1 or mouth_total >= 1:

             break

     cv2.destroyAllWindows()

     vs.release()

# video_path, src = sys.argv[1], sys.argv[2]

video_path = r'video\face13.mp4'      # 輸入的 video 資料夾位置

# src = r'C:\Users\666\Desktop\zz5.jpg'

liveness_detection()

print(" 眨眼次數》》 ", blink_total)

print(" 張嘴次數》》 ", mouth_total)

# comparison(pic_path, src)

來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/69946337/viewspace-2916364/,如需轉載,請註明出處,否則將追究法律責任。

相關文章