File size: 3,919 Bytes
33688d4
 
 
 
 
 
 
 
 
 
edb429d
 
a08f201
 
33688d4
32e999f
33688d4
 
 
 
 
 
 
 
 
 
 
32e999f
33688d4
32e999f
33688d4
32e999f
33688d4
 
 
 
 
 
32e999f
33688d4
32e999f
33688d4
32e999f
33688d4
 
 
 
 
 
 
 
 
 
 
4848198
33688d4
 
 
 
 
 
 
 
 
 
 
 
 
32e999f
33688d4
32e999f
33688d4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import gradio as gr
import warnings
import os
from classifier import Classifier
warnings.filterwarnings('ignore')

video_examples = [os.path.join(os.path.dirname(__file__),
                               "examples/group_test.mp4")]

img_examples = [["examples/group_test.png"],
                ["examples/happy.png"],
                ["examples/fearful.png"],
                ["examples/angry.jpeg"],
                ["examples/surprised.jpeg"]]

ai_models = Classifier()

def predict(video_in, image_in_video, image_in_img):

    if video_in == None and image_in_video == None and image_in_img == None:
        raise gr.Error("Please upload a video or image.")

    if image_in_video or image_in_img:
        img_path = image_in_video or image_in_img
        fname, fext = os.path.splitext(img_path)

        if fext == ".mp4":
            ai_models.check_file_type(is_image=False, input_path=img_path)
        else:
            ai_models.check_file_type(is_image=True, input_path=img_path)

        output_path, pred_time = ai_models.run()
        gr.Markdown(f"Total prediction time: ", pred_time)
        return output_path, pred_time

    if video_in:
        fname, fext = os.path.splitext(video_in)
        if fext == ".mp4":
            ai_models.check_file_type(is_image=False, input_path=video_in)
        else:
            ai_models.check_file_type(is_image=True, input_path=video_in)

        output_path, pred_time = ai_models.run()
        gr.Markdown(f"Total prediction time: ", pred_time)
        return output_path, pred_time

def toggle(choice):
    if choice == "webcam":
        return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
    else:
        return gr.update(visible=False, value=None), gr.update(visible=True, value=None)


with gr.Blocks() as demo:
    gr.Markdown("## Facial Emotion Recognition using CNN 🤖⭐️")
    with gr.Tab("Video") as tab:
        with gr.Row():
            with gr.Column():
                video_or_file_opt = gr.Radio(["webcam", "upload"], value="webcam",
                                             label="How would you like to upload your video?")
                video_in = gr.Video(
                    format='mp4', source="webcam", include_audio=False)
                video_or_file_opt.change(fn=lambda s: gr.update(source=s, value=None), inputs=video_or_file_opt,
                                         outputs=video_in, queue=False, show_progress=False)
            with gr.Column():
                video_out = gr.Video(format='mp4')

        run_btn = gr.Button("Predict")
        run_btn.click(fn=predict, inputs=[video_in], outputs=[video_out])
        gr.Examples(fn=predict, examples=video_examples, inputs=[
                    video_in], outputs=[video_out])

    with gr.Tab("Image"):
        with gr.Row():
            with gr.Column():
                image_or_file_opt = gr.Radio(["webcam", "upload"], value="webcam",
                                             label="How would you like to upload your image?")
                image_in_video = gr.Image(source="webcam", type="filepath")
                image_in_img = gr.Image(
                    source="upload", visible=False, type="filepath")

                image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
                                         outputs=[image_in_video, image_in_img], queue=False, show_progress=False)
            with gr.Column():
                image_out = gr.Image()

        run_btn = gr.Button("Predict")

        pred_tbox = gr.Textbox(label="Prediction Time")
        run_btn.click(fn=predict, inputs=[
                      image_in_img, image_in_video], outputs=[image_out, pred_tbox])

        gr.Examples(fn=predict,
                    examples=img_examples,
                    inputs=[image_in_img, image_in_video],
                    outputs=[image_out, pred_tbox])


demo.queue()
demo.launch()