ChatBot_Temp / app.py
Sujithanumala's picture
Update app.py
b76903d verified
import os
import whisper
import gradio as gr
from groq import Groq
from gtts import gTTS
# βœ… Set API Key (Replace with your actual key)
os.environ["GROQ_API_KEY"] = "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"
# βœ… Load Whisper Model securely
model = whisper.load_model("small")
def speech_to_text(audio_path):
"""Convert speech from audio file to text using Whisper."""
result = model.transcribe(audio_path)
return result["text"]
def generate_text(prompt):
"""Generate AI response using Groq API."""
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model="llama-3.3-70b-versatile",
)
return chat_completion.choices[0].message.content
def text_to_speech(text):
"""Convert text response to speech using Google TTS."""
tts = gTTS(text=text, lang="en")
output_path = "output.mp3"
tts.save(output_path)
return output_path # Return file path for Gradio
def chat_process(history, text_input, audio_path):
"""Main pipeline: Handles both Text and Speech inputs."""
# Check if user provided text or voice input
if audio_path:
user_input = speech_to_text(audio_path) # Convert audio to text
elif text_input:
user_input = text_input # Use text input
else:
return history, None # No input received
# Generate response
response_text = generate_text(user_input)
# Convert response to speech
tts_output = text_to_speech(response_text)
# Append conversation history
history.append({"role": "user", "content": user_input})
history.append({"role": "assistant", "content": response_text})
return history, tts_output
def reset_chat():
return []
# βœ… Updated Gradio Chat UI
with gr.Blocks() as chat_ui:
gr.Markdown("## πŸ“πŸŽ™οΈ Sujith's AI Assistant")
chatbot = gr.Chatbot(label="Conversation", type="messages") # Chat display
with gr.Row():
text_input = gr.Textbox(placeholder="Type your message here...", interactive=True)
audio_input = gr.Audio(type="filepath", format="wav") # βœ… Fixed Mic input
submit_button = gr.Button("Submit")
clear_button = gr.Button("Clear Chat")
submit_button.click(chat_process, [chatbot, text_input, audio_input], [chatbot, gr.Audio(type="filepath")])
clear_button.click(reset_chat, [], chatbot)
# βœ… Launch Gradio App
chat_ui.launch(share=True)