import gradio as gr import torch import whisper from transformers import pipeline ### ———————————————————————————————————————— title="Whisper to Emotion" ### ———————————————————————————————————————— whisper_model = whisper.load_model("small") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") emotion_classifier = pipeline("text-classification",model='bhadresh-savani/distilbert-base-uncased-emotion') def translate(audio): print(""" — Sending audio to Whisper ... — """) audio = whisper.load_audio(audio) audio = whisper.pad_or_trim(audio) mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device) _, probs = whisper_model.detect_language(mel) transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False) translate_options = whisper.DecodingOptions(task="translate", fp16 = False) transcription = whisper.decode(whisper_model, mel, transcript_options) translation = whisper.decode(whisper_model, mel, translate_options) print("Language Spoken: " + transcription.language) print("Transcript: " + transcription.text) print("Translated: " + translation.text) return transcription.text with gr.Blocks() as demo: gr.Markdown(""" # Emotion Detection From Speech Using Whisper """) with gr.Row(): with gr.Column(): audio_input = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath") with gr.Row(): transcribe_audio = gr.Button('Transcribe') with gr.Column(): transcript_output = gr.Textbox(label="Transcription in the language you spoke") transcribe_audio.click(translate, inputs = audio_input, outputs = transcript_output) demo.launch()