Echo-ai
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,133 +1,20 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import asyncio
|
3 |
import os
|
4 |
-
|
5 |
-
import
|
6 |
-
import hashlib
|
7 |
-
from datetime import datetime
|
8 |
-
import re
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
async def text_to_speech(text, output_file):
|
16 |
-
"""Convert text to speech using edge-tts"""
|
17 |
-
communicate = edge_tts.Communicate(text, VOICE)
|
18 |
-
await communicate.save(output_file)
|
19 |
-
return output_file
|
20 |
-
|
21 |
-
def get_chat_response(query):
|
22 |
-
"""Get response from DuckDuckGo Chat"""
|
23 |
-
try:
|
24 |
-
system_prompt = "<system_prompt>Your name is Vani. Give short, natural responses under 100 words that sound like casual human speech. Avoid lists, technical jargon, or complex sentences. Keep it simple and friendly for easy text-to-speech conversion.</system_prompt>"
|
25 |
-
enhanced_query = f"{system_prompt}\n\n{query}"
|
26 |
-
response = DDGS().chat(enhanced_query, model=MODEL, timeout=30)
|
27 |
-
return response
|
28 |
-
except Exception as e:
|
29 |
-
return f"Error: {str(e)}"
|
30 |
-
|
31 |
-
def split_response(response_text):
|
32 |
-
"""Split response into smaller parts for faster TTS"""
|
33 |
-
# Split into sentences first
|
34 |
-
sentences = re.split(r'(?<=[.!?])\s+', response_text.strip())
|
35 |
-
parts = []
|
36 |
-
current_part = []
|
37 |
-
word_count = 0
|
38 |
-
|
39 |
-
for sentence in sentences:
|
40 |
-
words = sentence.split()
|
41 |
-
if word_count + len(words) > MAX_WORDS_PER_PART and current_part:
|
42 |
-
# Start a new part if we exceed the word limit
|
43 |
-
parts.append(" ".join(current_part))
|
44 |
-
current_part = words
|
45 |
-
word_count = len(words)
|
46 |
-
else:
|
47 |
-
current_part.extend(words)
|
48 |
-
word_count += len(words)
|
49 |
-
|
50 |
-
if current_part: # Add the last part
|
51 |
-
parts.append(" ".join(current_part))
|
52 |
-
|
53 |
-
return parts
|
54 |
-
|
55 |
-
async def process_chat(query):
|
56 |
-
"""Process chat query, split response, and generate audio parts"""
|
57 |
-
if not query:
|
58 |
-
return "Please enter a query.", None
|
59 |
-
|
60 |
-
# Get chat response
|
61 |
-
response_text = get_chat_response(query)
|
62 |
-
|
63 |
-
# Split response into parts
|
64 |
-
response_parts = split_response(response_text)
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
70 |
-
base_filename = f"response_{timestamp}_{query_hash}"
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
for i, part in enumerate(response_parts):
|
77 |
-
output_file = f"{base_filename}_part{i}.mp3"
|
78 |
-
audio_file = await text_to_speech(part, output_file)
|
79 |
-
audio_files.append(audio_file)
|
80 |
-
full_text += part + " " # Rebuild full text for display
|
81 |
-
|
82 |
-
# Return full text and list of audio files
|
83 |
-
return full_text.strip(), audio_files
|
84 |
-
|
85 |
-
def gradio_interface(query):
|
86 |
-
"""Gradio interface function"""
|
87 |
-
# Run async function in Gradio
|
88 |
-
response_text, audio_files = asyncio.run(process_chat(query))
|
89 |
-
|
90 |
-
# Return full text and first audio file (Gradio will display one at a time)
|
91 |
-
# Subsequent parts will be available as separate files
|
92 |
-
return response_text, audio_files[0] if audio_files else None
|
93 |
-
|
94 |
-
# Create Gradio interface
|
95 |
-
with gr.Blocks(title="Chat to Speech Demo") as demo:
|
96 |
-
gr.Markdown("# Chat to Speech Demo")
|
97 |
-
gr.Markdown("Enter a query and get a fast, natural text and audio response split into parts!")
|
98 |
-
|
99 |
-
with gr.Row():
|
100 |
-
with gr.Column():
|
101 |
-
query_input = gr.Textbox(label="Your Query", placeholder="Ask anything...")
|
102 |
-
submit_btn = gr.Button("Generate")
|
103 |
-
|
104 |
-
with gr.Column():
|
105 |
-
text_output = gr.Textbox(label="Response Text")
|
106 |
-
audio_output = gr.Audio(label="Response Audio (First Part)")
|
107 |
-
gr.Markdown("Note: If the response is long, it’s split into parts. Check the Spaces file system for all parts.")
|
108 |
-
|
109 |
-
# Connect inputs to processing function
|
110 |
-
submit_btn.click(
|
111 |
-
fn=gradio_interface,
|
112 |
-
inputs=[query_input],
|
113 |
-
outputs=[text_output, audio_output]
|
114 |
-
)
|
115 |
-
|
116 |
-
# Launch optimized for Hugging Face Spaces
|
117 |
-
demo.launch(
|
118 |
-
server_name="0.0.0.0",
|
119 |
-
server_port=7860,
|
120 |
-
share=False # Set to True if you want public sharing
|
121 |
-
)
|
122 |
-
|
123 |
-
# Clean up audio files
|
124 |
-
def cleanup():
|
125 |
-
for file in os.listdir():
|
126 |
-
if file.startswith("response_") and file.endswith(".mp3"):
|
127 |
-
try:
|
128 |
-
os.remove(file)
|
129 |
-
except:
|
130 |
-
pass
|
131 |
|
132 |
-
|
133 |
-
|
|
|
|
|
|
|
1 |
import os
|
2 |
+
import tempfile
|
3 |
+
import importlib.util
|
|
|
|
|
|
|
4 |
|
5 |
+
def load_app():
|
6 |
+
code = os.getenv("OPENAI_API_KEY")
|
7 |
+
if not code:
|
8 |
+
raise RuntimeError("No application code found")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
with tempfile.NamedTemporaryFile(suffix=".py", delete=False) as f:
|
11 |
+
f.write(code.encode('utf-8'))
|
12 |
+
tmp_name = f.name
|
|
|
|
|
13 |
|
14 |
+
spec = importlib.util.spec_from_file_location("hidden_app", tmp_name)
|
15 |
+
module = importlib.util.module_from_spec(spec)
|
16 |
+
spec.loader.exec_module(module)
|
17 |
+
module.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
if __name__ == "__main__":
|
20 |
+
load_app()
|