Vishaltiwari2019's picture
Update app.py
9618452 verified
raw
history blame
2.28 kB
import gradio as gr
from transformers import pipeline
# Load text-to-speech model
tts_title = "Text to Speech Translation"
tts_examples = ["I love learning machine learning", "How do you do?"]
tts_demo = gr.Interface.load(
"huggingface/facebook/fastspeech2-en-ljspeech",
title=tts_title,
examples=tts_examples,
description="Give me something to say!",
)
# Load emotion classification model
emotion_model_checkpoint = "MuntasirHossain/RoBERTa-base-finetuned-emotion"
emotion_model = pipeline("text-classification", model=emotion_model_checkpoint)
def classify_emotion_and_speech(text=""):
# Emotion classification
emotion_label = emotion_model(text)[0]["label"]
# Adjust speech synthesis parameters based on emotion_label.
# Customize this part based on the emotion_label.
# Replace the following line with your desired text-to-speech model and parameters.
speech_output = f"Emotion: {emotion_label}, Text: {text}"
return {"emotion_label": emotion_label, "audio": speech_output}
emotion_title = "Texts Expressing Emotion with Speech"
emotion_description = "This AI model classifies texts expressing human emotion and converts them into speech."
emotion_examples = [["He is very happy today", "Free Palestine"]]
# Define a theme for the Gradio interface
theme = {
"container": {
"background-color": "#007bff",
"color": "#fff",
"padding": "20px",
},
"textbox": {
"background-color": "#fff",
"border-radius": "5px",
"padding": "10px",
"margin-bottom": "10px",
},
"button": {
"background-color": "#007bff",
"color": "#fff",
"padding": "10px",
"border-radius": "5px",
"cursor": "pointer",
},
"label": {
"color": "#fff",
},
}
combined_demo = gr.Interface(
fn=classify_emotion_and_speech,
inputs="textbox",
outputs=["text", "audio"],
title=emotion_title,
theme=theme,
description=emotion_description,
examples=emotion_examples,
)
# Combine both demos into a Tabbed Interface
combined_demo_tabbed = gr.TabbedInterface([tts_demo, combined_demo], ["Text to Speech", "Texts Expressing Emotion with Speech"])
if __name__ == "__main__":
combined_demo_tabbed.launch()