Spaces:
Sleeping
Sleeping
NyanSwanAung23
commited on
Commit
·
32e999f
1
Parent(s):
5774c1e
change model loading methods
Browse files- app.py +9 -10
- classifier.py +31 -23
- haarcascade_frontalface_default.xml +0 -0
- model.h5 +0 -3
app.py
CHANGED
@@ -14,6 +14,7 @@ img_examples = [["examples/group_test.png"],
|
|
14 |
["examples/sad-1.png"],
|
15 |
["examples/sad-2.png"]]
|
16 |
|
|
|
17 |
|
18 |
def predict(video_in, image_in_video, image_in_img):
|
19 |
|
@@ -25,26 +26,25 @@ def predict(video_in, image_in_video, image_in_img):
|
|
25 |
fname, fext = os.path.splitext(img_path)
|
26 |
|
27 |
if fext == ".mp4":
|
28 |
-
|
29 |
else:
|
30 |
-
|
31 |
|
32 |
-
output_path, pred_time =
|
33 |
gr.Markdown(f"Total prediction time: ", pred_time)
|
34 |
return output_path, pred_time
|
35 |
|
36 |
if video_in:
|
37 |
fname, fext = os.path.splitext(video_in)
|
38 |
if fext == ".mp4":
|
39 |
-
|
40 |
else:
|
41 |
-
|
42 |
|
43 |
-
output_path, pred_time =
|
44 |
gr.Markdown(f"Total prediction time: ", pred_time)
|
45 |
return output_path, pred_time
|
46 |
|
47 |
-
|
48 |
def toggle(choice):
|
49 |
if choice == "webcam":
|
50 |
return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
|
@@ -67,10 +67,9 @@ with gr.Blocks() as demo:
|
|
67 |
video_out = gr.Video(format='mp4')
|
68 |
|
69 |
run_btn = gr.Button("Predict")
|
70 |
-
|
71 |
-
run_btn.click(fn=predict, inputs=[video_in], outputs=[video_out, pred_tbox])
|
72 |
gr.Examples(fn=predict, examples=video_examples, inputs=[
|
73 |
-
video_in], outputs=[video_out
|
74 |
|
75 |
with gr.Tab("Image"):
|
76 |
with gr.Row():
|
|
|
14 |
["examples/sad-1.png"],
|
15 |
["examples/sad-2.png"]]
|
16 |
|
17 |
+
ai_models = Classifier()
|
18 |
|
19 |
def predict(video_in, image_in_video, image_in_img):
|
20 |
|
|
|
26 |
fname, fext = os.path.splitext(img_path)
|
27 |
|
28 |
if fext == ".mp4":
|
29 |
+
ai_models.check_file_type(is_image=False, input_path=img_path)
|
30 |
else:
|
31 |
+
ai_models.check_file_type(is_image=True, input_path=img_path)
|
32 |
|
33 |
+
output_path, pred_time = ai_models.run()
|
34 |
gr.Markdown(f"Total prediction time: ", pred_time)
|
35 |
return output_path, pred_time
|
36 |
|
37 |
if video_in:
|
38 |
fname, fext = os.path.splitext(video_in)
|
39 |
if fext == ".mp4":
|
40 |
+
ai_models.check_file_type(is_image=False, input_path=video_in)
|
41 |
else:
|
42 |
+
ai_models.check_file_type(is_image=True, input_path=video_in)
|
43 |
|
44 |
+
output_path, pred_time = ai_models.run()
|
45 |
gr.Markdown(f"Total prediction time: ", pred_time)
|
46 |
return output_path, pred_time
|
47 |
|
|
|
48 |
def toggle(choice):
|
49 |
if choice == "webcam":
|
50 |
return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
|
|
|
67 |
video_out = gr.Video(format='mp4')
|
68 |
|
69 |
run_btn = gr.Button("Predict")
|
70 |
+
run_btn.click(fn=predict, inputs=[video_in], outputs=[video_out])
|
|
|
71 |
gr.Examples(fn=predict, examples=video_examples, inputs=[
|
72 |
+
video_in], outputs=[video_out])
|
73 |
|
74 |
with gr.Tab("Image"):
|
75 |
with gr.Row():
|
classifier.py
CHANGED
@@ -3,33 +3,38 @@ import os
|
|
3 |
import numpy as np
|
4 |
from tensorflow.keras.optimizers import Adam
|
5 |
from tensorflow.keras.models import load_model
|
|
|
6 |
import time
|
7 |
|
8 |
|
9 |
class Classifier:
|
10 |
-
def __init__(self
|
11 |
-
self.is_image =
|
12 |
-
self.
|
13 |
-
|
14 |
-
self.classifier = load_model('model.h5')
|
15 |
-
self.face_detector = cv2.CascadeClassifier(
|
16 |
-
'haarcascade_frontalface_default.xml')
|
17 |
-
cv2.ocl.setUseOpenCL(False)
|
18 |
-
|
19 |
self.labels = {0: "Angry", 1: "Disgusted", 2: "Fearful",
|
20 |
3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
|
21 |
-
|
|
|
|
|
|
|
22 |
self.input_path = input_path
|
23 |
-
self.output_path = "
|
24 |
-
os.makedirs("
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
def predict(self, frame):
|
35 |
|
@@ -40,12 +45,15 @@ class Classifier:
|
|
40 |
for (x, y, w, h) in faces:
|
41 |
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
|
42 |
roi_gray = gray[y:y + h, x:x + w]
|
|
|
|
|
|
|
43 |
cropped_img = np.expand_dims(np.expand_dims(
|
44 |
-
cv2.resize(
|
45 |
prediction = self.classifier.predict(cropped_img)
|
46 |
maxindex = int(np.argmax(prediction))
|
47 |
cv2.putText(frame, self.labels[maxindex], (x+20, y-60),
|
48 |
-
cv2.FONT_HERSHEY_SIMPLEX,
|
49 |
|
50 |
return frame
|
51 |
|
@@ -64,7 +72,7 @@ class Classifier:
|
|
64 |
fps = int(cap.get(5))
|
65 |
|
66 |
# Define the codec and create a VideoWriter object to save the output video as .MOV
|
67 |
-
fourcc = cv2.VideoWriter_fourcc(
|
68 |
out = cv2.VideoWriter(self.output_path, fourcc, fps,
|
69 |
(frame_width, frame_height))
|
70 |
|
|
|
3 |
import numpy as np
|
4 |
from tensorflow.keras.optimizers import Adam
|
5 |
from tensorflow.keras.models import load_model
|
6 |
+
from tensorflow.keras.models import model_from_json
|
7 |
import time
|
8 |
|
9 |
|
10 |
class Classifier:
|
11 |
+
def __init__(self):
|
12 |
+
self.is_image = None
|
13 |
+
self.classifier = None
|
14 |
+
self.face_detector = None
|
|
|
|
|
|
|
|
|
|
|
15 |
self.labels = {0: "Angry", 1: "Disgusted", 2: "Fearful",
|
16 |
3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
|
17 |
+
self.load_models()
|
18 |
+
|
19 |
+
def check_file_type(self, is_image, input_path):
|
20 |
+
self.is_image = is_image
|
21 |
self.input_path = input_path
|
22 |
+
self.output_path = "result/" + self.input_path.split('/')[-1]
|
23 |
+
os.makedirs("result", exist_ok=True)
|
24 |
+
|
25 |
+
def load_models(self):
|
26 |
+
# Load VGG
|
27 |
+
yaml_file = open('models/VGG19.yaml', 'r')
|
28 |
+
self.classifier = model_from_json(yaml_file.read())
|
29 |
+
self.classifier.load_weights("models/VGG19.h5")
|
30 |
+
yaml_file.close()
|
31 |
+
print("VGG19 has been loaded")
|
32 |
+
|
33 |
+
# Load Face Detector
|
34 |
+
self.face_detector = cv2.CascadeClassifier(
|
35 |
+
'models/haarcascade_frontalface_default.xml')
|
36 |
+
cv2.ocl.setUseOpenCL(False)
|
37 |
+
print("Face Detector has been loaded")
|
38 |
|
39 |
def predict(self, frame):
|
40 |
|
|
|
45 |
for (x, y, w, h) in faces:
|
46 |
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
|
47 |
roi_gray = gray[y:y + h, x:x + w]
|
48 |
+
roi_color = frame[y:y + h, x:x + w]
|
49 |
+
# Convert grayscale to RGB
|
50 |
+
roi_color_rgb = cv2.cvtColor(roi_color, cv2.COLOR_BGR2RGB)
|
51 |
cropped_img = np.expand_dims(np.expand_dims(
|
52 |
+
cv2.resize(roi_color_rgb, (48, 48)), 0), -1)
|
53 |
prediction = self.classifier.predict(cropped_img)
|
54 |
maxindex = int(np.argmax(prediction))
|
55 |
cv2.putText(frame, self.labels[maxindex], (x+20, y-60),
|
56 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1.4, (0, 0, 255), 2, cv2.LINE_AA)
|
57 |
|
58 |
return frame
|
59 |
|
|
|
72 |
fps = int(cap.get(5))
|
73 |
|
74 |
# Define the codec and create a VideoWriter object to save the output video as .MOV
|
75 |
+
fourcc = cv2.VideoWriter_fourcc('a', 'v', 'c', '1')
|
76 |
out = cv2.VideoWriter(self.output_path, fourcc, fps,
|
77 |
(frame_width, frame_height))
|
78 |
|
haarcascade_frontalface_default.xml
DELETED
The diff for this file is too large to render.
See raw diff
|
|
model.h5
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:8fb504950f1185882c2997ba0d9be2b62bbb85d05153827094a0f631819728cc
|
3 |
-
size 12439824
|
|
|
|
|
|
|
|