import gradio as gr import torch import whisper ### ———————————————————————————————————————— title="Whisper to Emotion" ### ———————————————————————————————————————— whisper_model = whisper.load_model("small") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") def translate(audio): print(""" — Sending audio to Whisper ... — """) audio = whisper.load_audio(audio) audio = whisper.pad_or_trim(audio) mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device) _, probs = whisper_model.detect_language(mel) transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False) translate_options = whisper.DecodingOptions(task="translate", fp16 = False) transcription = whisper.decode(whisper_model, mel, transcript_options) translation = whisper.decode(whisper_model, mel, translate_options) print("Language Spoken: " + transcription.language) print("Transcript: " + transcription.text) print("Translated: " + translation.text) return transcription.text with gr.Blocks() as demo: gr.Markdown(""" # Emotion Detection From Speech Using Whisper """) audio_input = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath") transcript_output = gr.Textbox(label="Transcription in your the language you spoke") iface = gr.Interface(fn=translate, inputs=audio_input, outputs=transcript_output) demo.launch()