|
import gradio as gr |
|
import whisper |
|
from transformers import pipeline |
|
import gradio as gr |
|
import pandas as pd |
|
from io import StringIO |
|
import os,re |
|
from langchain.llms import OpenAI |
|
import pandas as pd |
|
|
|
from langchain.document_loaders import UnstructuredPDFLoader |
|
|
|
from langchain.prompts import PromptTemplate |
|
from langchain.chains import LLMChain |
|
|
|
from langchain.embeddings.openai import OpenAIEmbeddings |
|
from langchain.vectorstores import Chroma |
|
from langchain.text_splitter import CharacterTextSplitter |
|
from langchain.llms import OpenAI |
|
from langchain.chains import RetrievalQA |
|
from langchain.document_loaders import TextLoader |
|
from langchain.prompts import PromptTemplate |
|
from langchain.callbacks.stdout import StdOutCallbackHandler |
|
from langchain.chat_models.openai import ChatOpenAI |
|
from langchain.prompts.prompt import PromptTemplate |
|
|
|
from langchain.llms import OpenAI |
|
from langchain.prompts import PromptTemplate |
|
from langchain.chains import LLMChain |
|
|
|
|
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") |
|
|
|
def predict(text): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt_template = """Ignore all previous instructions. You are the world's hearing aid company markerting agent. |
|
I am going to give you a text of a customer. Analyze it and you have 4 products in list which you have to suggest to the customer: |
|
ampli-mini it is mainly works for Maximum comfort and discretion, ampli-connect it is mainly works for Connected to the things you love, |
|
ampli-energy it is mainly works for Full of energy, like you, ampli-easy it is mainly works for Allow yourself to hear well. |
|
You can also be creative, funny, or show emotions at time. |
|
also share the book a appointment link of your company https://www.amplifon.com/uk/book-an-appointment |
|
|
|
Question: {question} |
|
Product details:""" |
|
|
|
prompt_template_lang = """ |
|
You are the world's best languages translator. Will give you some text or paragraph which you have to convert into Tamil, Hindi, Kannada |
|
and French. |
|
Input Text: {text} |
|
Tamil: |
|
Hindi: |
|
Kannada: |
|
French: |
|
""" |
|
PROMPT = PromptTemplate( |
|
template=prompt_template, input_variables=["question"] |
|
) |
|
PROMPT_lang = PromptTemplate( |
|
template=prompt_template_lang, input_variables=["text"] |
|
) |
|
|
|
|
|
|
|
|
|
|
|
llm = OpenAI() |
|
|
|
|
|
|
|
|
|
chain = LLMChain(llm=llm, prompt=PROMPT) |
|
chain_lang = LLMChain(llm=llm, prompt=PROMPT_lang) |
|
|
|
resp = chain.run(question=text) |
|
resp_lang = chain_lang.run(text=resp) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return [resp, resp_lang] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = whisper.load_model("base") |
|
sentiment_analysis = pipeline("sentiment-analysis", framework="pt", model="SamLowe/roberta-base-go_emotions") |
|
|
|
def analyze_sentiment(text): |
|
results = sentiment_analysis(text) |
|
sentiment_results = {result['label']: result['score'] for result in results} |
|
return sentiment_results |
|
|
|
def get_sentiment_emoji(sentiment): |
|
|
|
emoji_mapping = { |
|
"disappointment": "😞", |
|
"sadness": "😢", |
|
"annoyance": "😠", |
|
"neutral": "😐", |
|
"disapproval": "👎", |
|
"realization": "😮", |
|
"nervousness": "😬", |
|
"approval": "👍", |
|
"joy": "😄", |
|
"anger": "😡", |
|
"embarrassment": "😳", |
|
"caring": "🤗", |
|
"remorse": "😔", |
|
"disgust": "🤢", |
|
"grief": "😥", |
|
"confusion": "😕", |
|
"relief": "😌", |
|
"desire": "😍", |
|
"admiration": "😌", |
|
"optimism": "😊", |
|
"fear": "😨", |
|
"love": "❤️", |
|
"excitement": "🎉", |
|
"curiosity": "🤔", |
|
"amusement": "😄", |
|
"surprise": "😲", |
|
"gratitude": "🙏", |
|
"pride": "🦁" |
|
} |
|
return emoji_mapping.get(sentiment, "") |
|
|
|
def display_sentiment_results(sentiment_results, option): |
|
sentiment_text = "" |
|
for sentiment, score in sentiment_results.items(): |
|
emoji = get_sentiment_emoji(sentiment) |
|
if option == "Sentiment Only": |
|
sentiment_text += f"{sentiment} {emoji}\n" |
|
elif option == "Sentiment + Score": |
|
sentiment_text += f"{sentiment} {emoji}: {score}\n" |
|
return sentiment_text |
|
|
|
def inference(audio, sentiment_option): |
|
audio = whisper.load_audio(audio) |
|
audio = whisper.pad_or_trim(audio) |
|
|
|
mel = whisper.log_mel_spectrogram(audio).to(model.device) |
|
|
|
_, probs = model.detect_language(mel) |
|
lang = max(probs, key=probs.get) |
|
|
|
options = whisper.DecodingOptions(fp16=False) |
|
result = whisper.decode(model, mel, options) |
|
|
|
sentiment_results = analyze_sentiment(result.text) |
|
print(result.text) |
|
prediction = predict(result.text) |
|
sentiment_output = display_sentiment_results(sentiment_results, sentiment_option) |
|
|
|
return lang.upper(), result.text, sentiment_output, prediction[0], prediction[1] |
|
|
|
title = """<h1 align="center">🎤 Multilingual ASR 💬</h1>""" |
|
image_path = "thmbnail.jpg" |
|
description = """ |
|
💻 This demo showcases a general-purpose speech recognition model called Whisper. It is trained on a large dataset of diverse audio and supports multilingual speech recognition, speech translation, and language identification tasks.<br><br> |
|
<br> |
|
⚙️ Components of the tool:<br> |
|
<br> |
|
- Real-time multilingual speech recognition<br> |
|
- Language identification<br> |
|
- Sentiment analysis of the transcriptions<br> |
|
<br> |
|
🎯 The sentiment analysis results are provided as a dictionary with different emotions and their corresponding scores.<br> |
|
<br> |
|
|
|
😃 The sentiment analysis results are displayed with emojis representing the corresponding sentiment.<br> |
|
<br> |
|
|
|
✅ The higher the score for a specific emotion, the stronger the presence of that emotion in the transcribed text.<br> |
|
<br> |
|
|
|
❓ Use the microphone for real-time speech recognition.<br> |
|
<br> |
|
|
|
⚡️ The model will transcribe the audio and perform sentiment analysis on the transcribed text.<br> |
|
|
|
""" |
|
|
|
custom_css = """ |
|
#banner-image { |
|
display: block; |
|
margin-left: auto; |
|
margin-right: auto; |
|
} |
|
#chat-message { |
|
font-size: 14px; |
|
min-height: 300px; |
|
} |
|
""" |
|
|
|
block = gr.Blocks(css=custom_css) |
|
|
|
with block: |
|
gr.HTML(title) |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Image(image_path, elem_id="banner-image", show_label=False) |
|
with gr.Column(): |
|
gr.HTML(description) |
|
|
|
with gr.Group(): |
|
with gr.Box(): |
|
audio = gr.Audio( |
|
label="Input Audio", |
|
show_label=False, |
|
source="microphone", |
|
type="filepath" |
|
) |
|
|
|
sentiment_option = gr.Radio( |
|
choices=["Sentiment Only", "Sentiment + Score"], |
|
label="Select an option", |
|
default="Sentiment Only" |
|
) |
|
|
|
btn = gr.Button("Transcribe") |
|
|
|
lang_str = gr.Textbox(label="Language") |
|
|
|
text = gr.Textbox(label="Transcription") |
|
|
|
sentiment_output = gr.Textbox(label="Sentiment Analysis Results", output=True) |
|
|
|
prediction = gr.Textbox(label="Prediction") |
|
|
|
language_translation = gr.Textbox(label="Language Translation") |
|
|
|
|
|
btn.click(inference, inputs=[audio, sentiment_option], outputs=[lang_str, text, sentiment_output, prediction,language_translation]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
block.launch() |
|
|
|
|
|
|