Spaces:
Runtime error
Runtime error
File size: 1,577 Bytes
ccbbbf4 e4a6674 ccbbbf4 e4a6674 05856c6 e4a6674 ccbbbf4 e4a6674 ccbbbf4 e4a6674 5272af9 e4a6674 c20d21c e4a6674 5272af9 ccbbbf4 3c2d085 ccbbbf4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import os
os.system("pip install git+https://github.com/openai/whisper.git")
import gradio as gr
import whisper
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import pipeline
#call tokenizer and NLP model for text classification
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest")
model_nlp = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest")
# call whisper model for audio/speech processing
model = whisper.load_model("small")
def inference_audio(audio):
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(model.device)
_, probs = model.detect_language(mel)
options = whisper.DecodingOptions(fp16 = False)
result = whisper.decode(model, mel, options)
print(result.text)
return result.text, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
def inference_text(audio):
text,_,_,_ =inference_audio(audio)
sentiment_task = pipeline("sentiment-analysis", model=model_nlp, tokenizer=tokenizer)
res=sentiment_task(text)[0]
return res['label'],res['score']
audio = gr.Audio(
label="Input Audio",
show_label=False,
source="microphone",
type="filepath"
)
app=gr.Interface(title="Sentiment Audio Analysis",fn=inference_text,inputs=[audio], outputs=["text","text"])
app.launch()
|