nsa commited on
Commit
33688d4
·
1 Parent(s): 4e9f4dd

initial commit

Browse files
Files changed (4) hide show
  1. app.py +101 -0
  2. classifier.py +78 -0
  3. main.py +13 -0
  4. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import warnings
3
+ import os
4
+ from classifier import Classifier
5
+ warnings.filterwarnings('ignore')
6
+
7
+ video_examples = [os.path.join(os.path.dirname(__file__),
8
+ "examples/group_test.mp4")]
9
+
10
+ img_examples = [["examples/group_test.png"],
11
+ ["examples/smile-1.png"],
12
+ ["examples/smile-2.png"],
13
+ ["examples/angry-1.png"],
14
+ ["examples/sad-1.png"],
15
+ ["examples/sad-2.png"]]
16
+
17
+
18
+ def predict(video_in, image_in_video, image_in_img):
19
+
20
+ if video_in == None and image_in_video == None and image_in_img == None:
21
+ raise gr.Error("Please upload a video or image.")
22
+
23
+ if image_in_video or image_in_img:
24
+ img_path = image_in_video or image_in_img
25
+ fname, fext = os.path.splitext(img_path)
26
+
27
+ if fext == ".mp4":
28
+ classifier = Classifier(is_image=False, input_path=img_path)
29
+ else:
30
+ classifier = Classifier(is_image=True, input_path=img_path)
31
+
32
+ output_path, pred_time = classifier.run()
33
+ gr.Markdown(f"Total prediction time: ", pred_time)
34
+ return output_path, pred_time
35
+
36
+ if video_in:
37
+ fname, fext = os.path.splitext(video_in)
38
+ if fext == ".mp4":
39
+ classifier = Classifier(is_image=False, input_path=video_in)
40
+ else:
41
+ classifier = Classifier(is_image=True, input_path=video_in)
42
+
43
+ output_path, pred_time = classifier.run()
44
+ gr.Markdown(f"Total prediction time: ", pred_time)
45
+ return output_path, pred_time
46
+
47
+
48
+ def toggle(choice):
49
+ if choice == "webcam":
50
+ return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
51
+ else:
52
+ return gr.update(visible=False, value=None), gr.update(visible=True, value=None)
53
+
54
+
55
+ with gr.Blocks() as demo:
56
+ gr.Markdown("### Video or Image? WebCam or Upload?""")
57
+ with gr.Tab("Video") as tab:
58
+ with gr.Row():
59
+ with gr.Column():
60
+ video_or_file_opt = gr.Radio(["webcam", "upload"], value="webcam",
61
+ label="How would you like to upload your video?")
62
+ video_in = gr.Video(
63
+ format='mp4', source="webcam", include_audio=False)
64
+ video_or_file_opt.change(fn=lambda s: gr.update(source=s, value=None), inputs=video_or_file_opt,
65
+ outputs=video_in, queue=False, show_progress=False)
66
+ with gr.Column():
67
+ video_out = gr.Video(format='mp4')
68
+
69
+ run_btn = gr.Button("Predict")
70
+ run_btn.click(fn=predict, inputs=[video_in], outputs=[video_out])
71
+ gr.Examples(fn=predict, examples=video_examples, inputs=[
72
+ video_in], outputs=[video_out])
73
+
74
+ with gr.Tab("Image"):
75
+ with gr.Row():
76
+ with gr.Column():
77
+ image_or_file_opt = gr.Radio(["webcam", "upload"], value="webcam",
78
+ label="How would you like to upload your image?")
79
+ image_in_video = gr.Image(source="webcam", type="filepath")
80
+ image_in_img = gr.Image(
81
+ source="upload", visible=False, type="filepath")
82
+
83
+ image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
84
+ outputs=[image_in_video, image_in_img], queue=False, show_progress=False)
85
+ with gr.Column():
86
+ image_out = gr.Image()
87
+
88
+ run_btn = gr.Button("Predict")
89
+
90
+ pred_tbox = gr.Textbox(label="Prediction Time")
91
+ run_btn.click(fn=predict, inputs=[
92
+ image_in_img, image_in_video], outputs=[image_out, pred_tbox])
93
+
94
+ gr.Examples(fn=predict,
95
+ examples=img_examples,
96
+ inputs=[image_in_img, image_in_video],
97
+ outputs=[image_out, pred_tbox])
98
+
99
+
100
+ demo.queue()
101
+ demo.launch()
classifier.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import os
3
+ import numpy as np
4
+ from tensorflow.keras.optimizers import Adam
5
+ from tensorflow.keras.models import load_model
6
+ import time
7
+
8
+
9
+ class Classifier:
10
+ def __init__(self, is_image, input_path):
11
+ self.is_image = is_image
12
+ self.counter = 0
13
+
14
+ self.classifier = load_model('models/model.h5')
15
+ self.face_detector = cv2.CascadeClassifier(
16
+ 'models/haarcascade_frontalface_default.xml')
17
+ cv2.ocl.setUseOpenCL(False)
18
+
19
+ self.labels = {0: "Angry", 1: "Disgusted", 2: "Fearful",
20
+ 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
21
+
22
+ self.input_path = input_path
23
+ self.output_path = "result/" + self.input_path.split('/')[-1]
24
+ os.makedirs("result", exist_ok=True)
25
+
26
+ print("Models have been loaded")
27
+ # def get_model(self):
28
+ # model = load_model(model_path, compile=False)
29
+ # model.compile(optimizer=Adam(learning_rate=1e-3),
30
+ # loss='categorical_crossentropy', metrics=['accuracy'])
31
+
32
+ def predict(self, frame):
33
+
34
+ faces = self.face_detector.detectMultiScale(
35
+ frame, scaleFactor=1.3, minNeighbors=5)
36
+
37
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
38
+ for (x, y, w, h) in faces:
39
+ cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
40
+ roi_gray = gray[y:y + h, x:x + w]
41
+ cropped_img = np.expand_dims(np.expand_dims(
42
+ cv2.resize(roi_gray, (48, 48)), -1), 0)
43
+ prediction = self.classifier.predict(cropped_img)
44
+ maxindex = int(np.argmax(prediction))
45
+ cv2.putText(frame, self.labels[maxindex], (x+20, y-60),
46
+ cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2, cv2.LINE_AA)
47
+
48
+ return frame
49
+
50
+ def run(self):
51
+ start_time = time.time()
52
+ if self.is_image:
53
+ image_arr = cv2.imread(self.input_path)
54
+ predicted_frame = self.predict(frame=image_arr)
55
+ cv2.imwrite(self.output_path, predicted_frame)
56
+ else:
57
+ cap = cv2.VideoCapture(self.input_path)
58
+
59
+ # Get video properties
60
+ frame_width = int(cap.get(3))
61
+ frame_height = int(cap.get(4))
62
+ fps = int(cap.get(5))
63
+
64
+ # Define the codec and create a VideoWriter object to save the output video as .MOV
65
+ fourcc = cv2.VideoWriter_fourcc('a', 'v', 'c', '1')
66
+ out = cv2.VideoWriter(self.output_path, fourcc, fps,
67
+ (frame_width, frame_height))
68
+
69
+ while True:
70
+ ret, frame = cap.read()
71
+ if not ret:
72
+ break
73
+
74
+ predicted_frame = self.predict(frame=frame)
75
+ out.write(predicted_frame)
76
+
77
+ total_time = str(round(time.time() - start_time, 3)) + "s"
78
+ return self.output_path, total_time
main.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ import gradio as gr
3
+ from app import demo
4
+
5
+
6
+ app = FastAPI()
7
+
8
+
9
+ @app.get('/')
10
+ async def root():
11
+ return "Gradio app is running at /gradio", 200
12
+
13
+ app = gr.mount_gradio_app(app, demo, path='/gradio')
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ numpy
3
+ tensorflow
4
+ opencv