import gradio as gr import asyncio import os from duckduckgo_search import DDGS import edge_tts import hashlib from datetime import datetime # Hardcoded values MODEL = "gpt-4o-mini" VOICE = "en-US-AvaMultilingualNeural" async def text_to_speech(text, output_file): """Convert text to speech using edge-tts""" communicate = edge_tts.Communicate(text, VOICE) await communicate.save(output_file) return output_file def get_chat_response(query): """Get response from DuckDuckGo Chat""" try: # Updated system prompt for natural, concise, speech-friendly responses system_prompt = "Your name is Vani. Give short, natural responses under 100 words that sound like casual human speech. Avoid lists, technical jargon, or complex sentences. Keep it simple and friendly for easy text-to-speech conversion." enhanced_query = f"{system_prompt}\n\n{query}" response = DDGS().chat(enhanced_query, model=MODEL, timeout=30) return response except Exception as e: return f"Error: {str(e)}" async def process_chat(query): """Process chat query and generate audio""" if not query: return "Please enter a query.", None, None # Get chat response response_text = get_chat_response(query) # Generate unique output filename hash_object = hashlib.md5(query.encode()) query_hash = hash_object.hexdigest()[:8] timestamp = datetime.now().strftime("%Y%m%d%H%M%S") output_file = f"response_{timestamp}_{query_hash}.mp3" # Convert to speech audio_file = await text_to_speech(response_text, output_file) return response_text, audio_file, audio_file def gradio_interface(query): """Gradio interface function""" # Run async function in Gradio response_text, audio_file, audio_play = asyncio.run(process_chat(query)) return response_text, audio_file # Create Gradio interface with gr.Blocks(title="Chat to Speech Demo") as demo: gr.Markdown("# Chat to Speech Demo") gr.Markdown("Enter a query and get a short, natural text and audio response!") with gr.Row(): with gr.Column(): query_input = gr.Textbox(label="Your Query", placeholder="Ask anything...") submit_btn = gr.Button("Generate") with gr.Column(): text_output = gr.Textbox(label="Response Text") audio_output = gr.Audio(label="Response Audio") # Connect inputs to processing function submit_btn.click( fn=gradio_interface, inputs=[query_input], outputs=[text_output, audio_output] ) # Launch optimized for Hugging Face Spaces demo.launch( server_name="0.0.0.0", server_port=7860, share=False, # Set to True if you want public sharing show_api=False) # Clean up audio files (optional for Spaces) def cleanup(): for file in os.listdir(): if file.startswith("response_") and file.endswith(".mp3"): try: os.remove(file) except: pass import atexit atexit.register(cleanup)