import gradio as gr from transformers import pipeline transcription = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-100h") clasification = pipeline( "audio-classification", model="anton-l/xtreme_s_xlsr_300m_minds14", ) def audio_a_text(audio): text = transcription(audio)["text"] return text def text_to_sentimient(audio): #text = transcription(audio)["text"] return clasification(audio) demo = gr.Blocks() with demo: gr.Markdown("Speech analyzer") audio = gr.Audio(type="filepath", label = "Upload a file") text = gr.Textbox() b1 = gr.Button("convert to text") b1.click(audio_a_text, inputs=audio, outputs=text) b2 = gr.Button("Classification of speech") b2.click(text_to_sentimient, inputs=audio, outputs=text) demo.launch()