import gradio as gr
import audiospeechsentimentanalysis_jrmdiouf as assaj
def find_sentiment(input):
return assaj.get_audio_sentiment(input)
with gr.Blocks() as demo:
gr.Markdown(
"
CUSTOM MODEL BASED ON WAV2VEC2 AND BERT BASE TO ANALYZE SPEECH SENTIMENT
"
)
gr.Interface(
fn=find_sentiment,
inputs=[gr.Audio(type="filepath")],
outputs=["text"],
live=False,
)
gr.Markdown(
"Speech sentiment analysis model loss during training and eval time
"
)
with gr.Row():
gr.Image(value="wandb_chart_train.png", label="Training Loss", width=300)
gr.Image(value="wandb_chart_eval.png", label="Pipeline eval Loss", width=300)
gr.Markdown(
"Confusion matrix obtained from model evaluation on VoxCeleb dataset
"
)
with gr.Row():
gr.Image(
value="SpeechSentimentModelConfusionMatrix.png",
label="Confusion Matrix from model evaluation",
)
with gr.Row():
gr.Markdown(
"Pipeline Accuracy : 0.758
"
)
demo.launch(share=True)