Spaces:
Runtime error
Runtime error
import gradio as gr | |
from utils.predict import predict_action | |
import os | |
import glob | |
##Create Dataset for loading examples | |
example_list = glob.glob("examples/*") | |
example_list = list(map(lambda el:[el], example_list)) | |
# def load_example(video): | |
# return video[0] | |
# demo = gr.Blocks() | |
input_video = gr.Video(label="Input Video", show_label=True) | |
output_label = gr.Label(label="Model Output", show_label=True) | |
output_gif = gr.Image(label="Video Gif", show_label=True) | |
title = "Video Classification with Transformers" | |
description = "This space demonstrates the use of a hybrid (CNN-Transformer based) model for video classification. \n The model can classify videos belonging to the following action categories: CricketShot, Punch, ShavingBeard, TennisSwing, PlayingCello. \n Upload a video and try out π€ " | |
article = '\n Demo created by: <a href=\"https://www.linkedin.com/in/shivalika-singh/\">Shivalika Singh</a> <br> Based on this <a href=\"https://keras.io/examples/vision/video_transformers/\">Keras example</a> by <a href=\"https://twitter.com/RisingSayak\">Sayak Paul</a> <br> Demo Powered by this <a href=\"https://huggingface.co/shivi/video-transformers/\"> Video Classification</a> model' | |
gr.Interface(predict_action, input_video, [output_label, output_gif], examples=example_list, allow_flagging=False, analytics_enabled=False, | |
title=title, description=description, cache_examples=True, article=article).launch(enable_queue=True,share=True) | |
# with demo: | |
# gr.Markdown("# **<p align='center'>Video Classification with Transformers</p>**") | |
# gr.Markdown("This space demonstrates the use of hybrid Transformer-based models for video classification that operate on CNN feature maps.") | |
# with gr.Tabs(): | |
# with gr.TabItem("Upload & Predict"): | |
# with gr.Box(): | |
# with gr.Row(): | |
# input_video = gr.Video(label="Input Video", show_label=True) | |
# output_label = gr.Label(label="Model Output", show_label=True) | |
# output_gif = gr.Image(label="Video Gif", show_label=True) | |
# gr.Markdown("**Predict**") | |
# with gr.Box(): | |
# with gr.Row(): | |
# submit_button = gr.Button("Submit") | |
# gr.Markdown("**Examples:**") | |
# gr.Markdown("The model is trained to classify videos belonging to the following classes:") | |
# gr.Markdown("CricketShot, PlayingCello, Punch, ShavingBeard, TennisSwing") | |
# with gr.Column(): | |
# # gr.Examples("examples", [input_video], [output_label,output_gif], predict_action, cache_examples=True) | |
# examples = gr.components.Dataset(components=[input_video], samples=example_list, type='values') | |
# examples.click(load_example, examples, input_video) | |
# submit_button.click(predict_action, inputs=input_video, outputs=[output_label,output_gif]) | |
# gr.Markdown('\n Author: <a href=\"https://www.linkedin.com/in/shivalika-singh/\">Shivalika Singh</a> <br> Based on this <a href=\"https://keras.io/examples/vision/video_transformers/\">Keras example</a> by <a href=\"https://twitter.com/RisingSayak\">Sayak Paul</a> <br> Demo Powered by this <a href=\"https://huggingface.co/shivi/video-transformers/\"> Video Classification</a> model') | |
# demo.launch() | |