Spaces:
Running
Running
File size: 3,351 Bytes
96c28ce 43503a9 96c28ce 43503a9 b1a3048 1eacbe2 43503a9 91ef71e 4882360 43503a9 b1a3048 3fa47cd 24e70f6 4bd3008 24e70f6 50611a5 43503a9 91d97ae 43503a9 36cf366 3169240 e2a3ddd 96c28ce 43503a9 96c28ce 41b0143 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import cv2
import numpy as np
import face_recognition
import os
from datetime import datetime
import gradio as gr
def faceEncodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
def Attandance(text,video,image):
data=cv2.VideoCapture(video)
totalframescount = data.get(cv2.CAP_PROP_FRAME_COUNT)
framecount=0
names=[]
path = text
images = []
personNames = []
myList = os.listdir(path)
unkownEncodings=[]
print(myList)
for cu_img in myList:
current_Img = cv2.imread(f'{path}/{cu_img}')
images.append(current_Img)
personNames.append(os.path.splitext(cu_img)[0])
print(personNames)
encodeListKnown = faceEncodings(images)
print('All Encodings Complete!!!')
if video is not None:
cap = cv2.VideoCapture(video)
index=1
while True:
try:
if framecount>totalframescount:
break
elif framecount%15==0:
ret, frame = cap.read()
#faces = cv2.resize(frame, (0, 0), None, 0.25, 0.25)
faces = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
facesCurrentFrame = face_recognition.face_locations(faces)
encodesCurrentFrame = face_recognition.face_encodings(faces, facesCurrentFrame)
for encodeFace, faceLoc in zip(encodesCurrentFrame, facesCurrentFrame):
matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
# print(faceDis)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = personNames[matchIndex].upper()
if names.count(name) == 0:
names.append(name)
framecount=framecount+1
cv2.waitKey(1)
except:
break
return ' '.join(names)
else:
try:
#faces = cv2.resize(frame, (0, 0), None, 0.25, 0.25)
faces = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
facesCurrentFrame = face_recognition.face_locations(faces)
encodesCurrentFrame = face_recognition.face_encodings(faces, facesCurrentFrame)
for encodeFace, faceLoc in zip(encodesCurrentFrame, facesCurrentFrame):
matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
# print(faceDis)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = personNames[matchIndex].upper()
if names.count(name) == 0:
names.append(name)
cv2.waitKey(1)
except:
pass
return ' '.join(names)
demo=gr.Interface(fn=Attandance,
inputs=["text","video","image"],
outputs="text",
title="Face Attendance",
)
demo.launch(debug=True)
|