Spaces:
Sleeping
Sleeping
import gradio as gr | |
import warnings | |
import os | |
from classifier import Classifier | |
warnings.filterwarnings('ignore') | |
video_examples = [os.path.join(os.path.dirname(__file__), | |
"examples/group_test.mp4")] | |
img_examples = [["examples/group_test.png"], | |
["examples/happy.png"], | |
["examples/fearful.png"], | |
["examples/angry.jpeg"], | |
["examples/surprised.jpeg"]] | |
ai_models = Classifier() | |
def predict(video_in, image_in_video, image_in_img): | |
if video_in == None and image_in_video == None and image_in_img == None: | |
raise gr.Error("Please upload a video or image.") | |
if image_in_video or image_in_img: | |
img_path = image_in_video or image_in_img | |
fname, fext = os.path.splitext(img_path) | |
if fext == ".mp4": | |
ai_models.check_file_type(is_image=False, input_path=img_path) | |
else: | |
ai_models.check_file_type(is_image=True, input_path=img_path) | |
output_path, pred_time = ai_models.run() | |
gr.Markdown(f"Total prediction time: ", pred_time) | |
return output_path, pred_time | |
if video_in: | |
fname, fext = os.path.splitext(video_in) | |
if fext == ".mp4": | |
ai_models.check_file_type(is_image=False, input_path=video_in) | |
else: | |
ai_models.check_file_type(is_image=True, input_path=video_in) | |
output_path, pred_time = ai_models.run() | |
gr.Markdown(f"Total prediction time: ", pred_time) | |
return output_path, pred_time | |
def toggle(choice): | |
if choice == "webcam": | |
return gr.update(visible=True, value=None), gr.update(visible=False, value=None) | |
else: | |
return gr.update(visible=False, value=None), gr.update(visible=True, value=None) | |
with gr.Blocks() as demo: | |
gr.Markdown("## Facial Emotion Recognition using CNN 🤖⭐️") | |
with gr.Tab("Video") as tab: | |
with gr.Row(): | |
with gr.Column(): | |
video_or_file_opt = gr.Radio(["webcam", "upload"], value="webcam", | |
label="How would you like to upload your video?") | |
video_in = gr.Video( | |
format='mp4', source="webcam", include_audio=False) | |
video_or_file_opt.change(fn=lambda s: gr.update(source=s, value=None), inputs=video_or_file_opt, | |
outputs=video_in, queue=False, show_progress=False) | |
with gr.Column(): | |
video_out = gr.Video(format='mp4') | |
run_btn = gr.Button("Predict") | |
run_btn.click(fn=predict, inputs=[video_in], outputs=[video_out]) | |
gr.Examples(fn=predict, examples=video_examples, inputs=[ | |
video_in], outputs=[video_out]) | |
with gr.Tab("Image"): | |
with gr.Row(): | |
with gr.Column(): | |
image_or_file_opt = gr.Radio(["webcam", "upload"], value="webcam", | |
label="How would you like to upload your image?") | |
image_in_video = gr.Image(source="webcam", type="filepath") | |
image_in_img = gr.Image( | |
source="upload", visible=False, type="filepath") | |
image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt], | |
outputs=[image_in_video, image_in_img], queue=False, show_progress=False) | |
with gr.Column(): | |
image_out = gr.Image() | |
run_btn = gr.Button("Predict") | |
pred_tbox = gr.Textbox(label="Prediction Time") | |
run_btn.click(fn=predict, inputs=[ | |
image_in_img, image_in_video], outputs=[image_out, pred_tbox]) | |
gr.Examples(fn=predict, | |
examples=img_examples, | |
inputs=[image_in_img, image_in_video], | |
outputs=[image_out, pred_tbox]) | |
demo.queue() | |
demo.launch() | |