Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
@@ -1,27 +1,28 @@
|
|
1 |
import os
|
2 |
import uuid
|
|
|
3 |
import logging
|
4 |
-
import streamlit as st
|
5 |
import requests
|
|
|
6 |
from dotenv import load_dotenv
|
7 |
from utils import voice_map, get_voice_prompt_style, AUDIO_DIR
|
8 |
from generate_audio import generate_audio
|
9 |
|
10 |
-
# Load
|
11 |
load_dotenv()
|
12 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
13 |
ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY")
|
14 |
|
15 |
-
#
|
16 |
os.makedirs(AUDIO_DIR, exist_ok=True)
|
17 |
logging.basicConfig(filename="frontend.log", level=logging.INFO)
|
18 |
|
19 |
-
#
|
20 |
st.set_page_config(page_title="Voice Agent Pro", page_icon="π€")
|
21 |
st.title("ποΈ Voice Agent Pro")
|
22 |
st.markdown("Summarized answers with expressive AI voices.")
|
23 |
|
24 |
-
# Sidebar voice
|
25 |
st.sidebar.header("ποΈ Voice Settings")
|
26 |
voice_label = st.sidebar.selectbox("Choose a voice:", list(voice_map.keys()))
|
27 |
voice_id = voice_map[voice_label]
|
@@ -33,18 +34,12 @@ if "audio_key" not in st.session_state: st.session_state.audio_key = None
|
|
33 |
if "file_text" not in st.session_state: st.session_state.file_text = ""
|
34 |
if "key_points" not in st.session_state: st.session_state.key_points = []
|
35 |
|
36 |
-
#
|
37 |
-
query = st.text_area(
|
38 |
-
"π¨οΈ Ask or refine something based on the bullets:",
|
39 |
-
value="",
|
40 |
-
placeholder="e.g., What makes you so cool, Grandma?",
|
41 |
-
key="query"
|
42 |
-
)
|
43 |
-
|
44 |
url = st.text_input("π Optional URL to summarize:")
|
45 |
uploaded_file = st.file_uploader("π Or upload a file (PDF, TXT, DOCX)", type=["pdf", "txt", "docx"])
|
46 |
|
47 |
-
#
|
48 |
if st.button("π§Ή Clear All"):
|
49 |
st.session_state.query = ""
|
50 |
st.session_state.file_text = ""
|
@@ -52,17 +47,25 @@ if st.button("π§Ή Clear All"):
|
|
52 |
st.session_state.audio_key = None
|
53 |
st.session_state.key_points = []
|
54 |
|
55 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
if st.button("π Summarize"):
|
57 |
if not query and not url and not uploaded_file:
|
58 |
st.warning("Please enter a question, a URL, or upload a file.")
|
59 |
else:
|
60 |
with st.spinner("Talking to GPT..."):
|
61 |
try:
|
|
|
62 |
if uploaded_file:
|
63 |
st.session_state.file_text = uploaded_file.read().decode("utf-8")
|
64 |
|
65 |
-
#
|
66 |
context = ""
|
67 |
if st.session_state.file_text:
|
68 |
context += st.session_state.file_text + "\n\n"
|
@@ -70,29 +73,34 @@ if st.button("π Summarize"):
|
|
70 |
context += f"Summarize this page: {url}\n\n"
|
71 |
context += f"{tone_prompt}\n\nNow answer: {query}"
|
72 |
|
73 |
-
#
|
74 |
headers = {"Authorization": f"Bearer {OPENAI_API_KEY}"}
|
75 |
-
|
76 |
-
"
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
88 |
audio_key = str(uuid.uuid4())
|
89 |
-
generate_audio(answer, voice_id, audio_key)
|
90 |
st.session_state.audio_key = audio_key
|
91 |
|
92 |
except Exception as e:
|
93 |
st.error(f"π₯ Error: {e}")
|
94 |
|
95 |
-
#
|
96 |
if st.session_state.answer:
|
97 |
st.subheader("π Answer")
|
98 |
st.success(st.session_state.answer)
|
|
|
1 |
import os
|
2 |
import uuid
|
3 |
+
import json
|
4 |
import logging
|
|
|
5 |
import requests
|
6 |
+
import streamlit as st
|
7 |
from dotenv import load_dotenv
|
8 |
from utils import voice_map, get_voice_prompt_style, AUDIO_DIR
|
9 |
from generate_audio import generate_audio
|
10 |
|
11 |
+
# Load API keys from .env
|
12 |
load_dotenv()
|
13 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
14 |
ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY")
|
15 |
|
16 |
+
# Ensure audio output folder exists
|
17 |
os.makedirs(AUDIO_DIR, exist_ok=True)
|
18 |
logging.basicConfig(filename="frontend.log", level=logging.INFO)
|
19 |
|
20 |
+
# Streamlit UI config
|
21 |
st.set_page_config(page_title="Voice Agent Pro", page_icon="π€")
|
22 |
st.title("ποΈ Voice Agent Pro")
|
23 |
st.markdown("Summarized answers with expressive AI voices.")
|
24 |
|
25 |
+
# Sidebar: select voice
|
26 |
st.sidebar.header("ποΈ Voice Settings")
|
27 |
voice_label = st.sidebar.selectbox("Choose a voice:", list(voice_map.keys()))
|
28 |
voice_id = voice_map[voice_label]
|
|
|
34 |
if "file_text" not in st.session_state: st.session_state.file_text = ""
|
35 |
if "key_points" not in st.session_state: st.session_state.key_points = []
|
36 |
|
37 |
+
# Input fields
|
38 |
+
query = st.text_area("π¨οΈ Ask or refine something based on the bullets:", value="", placeholder="e.g., What makes you so cool, Grandma?", key="query")
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
url = st.text_input("π Optional URL to summarize:")
|
40 |
uploaded_file = st.file_uploader("π Or upload a file (PDF, TXT, DOCX)", type=["pdf", "txt", "docx"])
|
41 |
|
42 |
+
# Reset app state
|
43 |
if st.button("π§Ή Clear All"):
|
44 |
st.session_state.query = ""
|
45 |
st.session_state.file_text = ""
|
|
|
47 |
st.session_state.audio_key = None
|
48 |
st.session_state.key_points = []
|
49 |
|
50 |
+
# Helper: OpenAI response streaming
|
51 |
+
def stream_openai_response(payload, headers):
|
52 |
+
with requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload, stream=True) as r:
|
53 |
+
for line in r.iter_lines():
|
54 |
+
if line and line.startswith(b"data: "):
|
55 |
+
yield line[len(b"data: "):].decode()
|
56 |
+
|
57 |
+
# Main button: summarize and speak
|
58 |
if st.button("π Summarize"):
|
59 |
if not query and not url and not uploaded_file:
|
60 |
st.warning("Please enter a question, a URL, or upload a file.")
|
61 |
else:
|
62 |
with st.spinner("Talking to GPT..."):
|
63 |
try:
|
64 |
+
# Load file content if present
|
65 |
if uploaded_file:
|
66 |
st.session_state.file_text = uploaded_file.read().decode("utf-8")
|
67 |
|
68 |
+
# Build GPT prompt
|
69 |
context = ""
|
70 |
if st.session_state.file_text:
|
71 |
context += st.session_state.file_text + "\n\n"
|
|
|
73 |
context += f"Summarize this page: {url}\n\n"
|
74 |
context += f"{tone_prompt}\n\nNow answer: {query}"
|
75 |
|
76 |
+
# OpenAI request setup
|
77 |
headers = {"Authorization": f"Bearer {OPENAI_API_KEY}"}
|
78 |
+
payload = {
|
79 |
+
"model": "gpt-4o",
|
80 |
+
"messages": [{"role": "user", "content": context}],
|
81 |
+
"temperature": 0.7,
|
82 |
+
"stream": True
|
83 |
+
}
|
84 |
+
|
85 |
+
# Streaming UI update
|
86 |
+
st.session_state.answer = ""
|
87 |
+
answer_box = st.empty()
|
88 |
+
|
89 |
+
for chunk in stream_openai_response(payload, headers):
|
90 |
+
parsed = json.loads(chunk)
|
91 |
+
delta = parsed['choices'][0]['delta'].get('content', '')
|
92 |
+
st.session_state.answer += delta
|
93 |
+
answer_box.markdown(st.session_state.answer)
|
94 |
+
|
95 |
+
# Generate audio from final answer
|
96 |
audio_key = str(uuid.uuid4())
|
97 |
+
generate_audio(st.session_state.answer, voice_id, audio_key)
|
98 |
st.session_state.audio_key = audio_key
|
99 |
|
100 |
except Exception as e:
|
101 |
st.error(f"π₯ Error: {e}")
|
102 |
|
103 |
+
# Final UI display
|
104 |
if st.session_state.answer:
|
105 |
st.subheader("π Answer")
|
106 |
st.success(st.session_state.answer)
|