j-tobias
Results visualisation
4c0593e
raw
history blame
4.73 kB
import gradio as gr
from dataset import Dataset
from model import Model
from utils import compute_wer
import plotly.graph_objs as go
# from utils import hf_login
# hf_login()
from huggingface_hub import login
import os
hf_token = os.getenv("HF_Token")
login(hf_token)
dataset = Dataset()
models = Model()
def run_tests (dataset_choice:str, model:str):
MoDeL = Model()
MoDeL.select(model)
MoDeL.load()
DaTaSeT = Dataset(100)
DaTaSeT.load(dataset_choice)
references, predictions = MoDeL.process(DaTaSeT)
wer = compute_wer(references=references, predictions=predictions)
return wer
def eval(data_subset:str, model_1:str, model_2:str)->str:
wer_result_1 = run_tests(data_subset, model_1)
wer_result_2 = run_tests(data_subset, model_2)
results_md = f"""#### {model_1}
- WER Score: {wer_result_1}
#### {model_2}
- WER Score: {wer_result_2}"""
# Create the bar plot
fig = go.Figure(
data=[
go.Bar(x=[f"{model_1}"], y=[wer_result_1]),
go.Bar(x=[f"{model_2}"], y=[wer_result_2]),
]
)
# Update the layout for better visualization
fig.update_layout(
title="Comparison of Two Models",
xaxis_title="Models",
yaxis_title="Value",
barmode="group",
)
return results_md, fig
def get_card(selected_model:str)->str:
print("Selected Model for Card: ", selected_model)
with open("cards.txt", "r") as f:
cards = f.read()
print(cards)
cards = cards.split("@@")
for card in cards:
print("CARD: ", card)
if "ID: "+selected_model in card:
return card
return "Unknown Model"
def is_own(data_subset:str):
if data_subset == "own":
own_audio = gr.Audio(sources=['microphone'],streaming=False)
own_transcription = gr.TextArea(lines=2)
return own_audio, own_transcription
own_audio = None
own_transcription = None
return own_audio, own_transcription
with gr.Blocks() as demo:
gr.Markdown('# <p style="text-align: center;">ASR Model Comparison 💬</p>')
gr.Markdown("-------")
gr.Markdown("""### Welcome to ASR Model Comparison Hub! 🎉
Hey there, and welcome to an app designed just for developers like you, who are passionate about pushing the boundaries of Automatic Speech Recognition (ASR) technology!
Here, you can easily compare different ASR models by selecting a dataset and choosing two models from the dropdown to see how they stack up against each other. If you're feeling creative, go ahead and select 'OWN' as your dataset option to upload your own audio file or record something new right in the app. Don’t forget to provide a transcription, and the app will handle the rest!
ASR Model Comparison Hub uses the Word Error Rate (WER) ⬇️ (the lower the better) metric to give you a clear picture of each model's performance. And hey, don't miss out on checking the **Amazing Leaderboard** where you can see how a wide range of models have been evaluated—[Check it out here](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard).
Happy experimenting and comparing! 🚀""")
with gr.Row():
with gr.Column(scale=1):
pass
with gr.Column(scale=5):
data_subset = gr.Radio(
value="LibriSpeech Clean",
choices=dataset.get_options(),
label="Data subset / Own Sample",
)
with gr.Column(scale=1):
pass
with gr.Row():
own_audio = gr.Audio(sources=['microphone'],streaming=False,visible=False)
own_transcription = gr.TextArea(lines=2, visible=False)
data_subset.change(is_own, inputs=[data_subset], outputs=[own_audio, own_transcription])
with gr.Row():
with gr.Column(scale=1):
model_1 = gr.Dropdown(
choices=models.get_options(),
label="Select Model"
)
model_1_card = gr.Markdown("")
with gr.Column(scale=1):
model_2 = gr.Dropdown(
choices=models.get_options(),
label="Select Model"
)
model_2_card = gr.Markdown("")
model_1.change(get_card, inputs=model_1, outputs=model_1_card)
model_2.change(get_card, inputs=model_2, outputs=model_2_card)
eval_btn = gr.Button(
value="Evaluate",
variant="primary",
size="sm")
gr.Markdown('## <p style="text-align: center;">Results</p>')
results_md = gr.Markdown("")
results_plot = gr.Plot(show_label=False)
eval_btn.click(eval, [data_subset, model_1, model_2], [results_md, results_plot])
demo.launch(debug=True)