Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -142,9 +142,57 @@ def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0):
|
|
| 142 |
def play_and_download_audio(file_path):
|
| 143 |
if file_path and os.path.exists(file_path):
|
| 144 |
st.audio(file_path)
|
| 145 |
-
dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}
|
| 146 |
st.markdown(dl_link, unsafe_allow_html=True)
|
| 147 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
def process_image(image_path, user_prompt):
|
| 149 |
with open(image_path, "rb") as imgf:
|
| 150 |
image_data = imgf.read()
|
|
@@ -197,47 +245,6 @@ def process_video_with_gpt(video_path, prompt):
|
|
| 197 |
)
|
| 198 |
return resp.choices[0].message.content
|
| 199 |
|
| 200 |
-
def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True):
|
| 201 |
-
start = time.time()
|
| 202 |
-
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
| 203 |
-
r = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")
|
| 204 |
-
refs = r[0]
|
| 205 |
-
r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
|
| 206 |
-
result = f"### π {q}\n\n{r2}\n\n{refs}"
|
| 207 |
-
|
| 208 |
-
st.markdown(result)
|
| 209 |
-
|
| 210 |
-
if vocal_summary:
|
| 211 |
-
main_text = clean_for_speech(r2)
|
| 212 |
-
audio_file_main = speak_with_edge_tts(main_text)
|
| 213 |
-
st.write("### ποΈ Vocal Summary (Short Answer)")
|
| 214 |
-
play_and_download_audio(audio_file_main)
|
| 215 |
-
|
| 216 |
-
if extended_refs:
|
| 217 |
-
summaries_text = "Here are the summaries from the references: " + refs.replace('"','')
|
| 218 |
-
summaries_text = clean_for_speech(summaries_text)
|
| 219 |
-
audio_file_refs = speak_with_edge_tts(summaries_text)
|
| 220 |
-
st.write("### π Extended References & Summaries")
|
| 221 |
-
play_and_download_audio(audio_file_refs)
|
| 222 |
-
|
| 223 |
-
if titles_summary:
|
| 224 |
-
titles = []
|
| 225 |
-
for line in refs.split('\n'):
|
| 226 |
-
m = re.search(r"\[([^\]]+)\]", line)
|
| 227 |
-
if m:
|
| 228 |
-
titles.append(m.group(1))
|
| 229 |
-
if titles:
|
| 230 |
-
titles_text = "Here are the titles of the papers: " + ", ".join(titles)
|
| 231 |
-
titles_text = clean_for_speech(titles_text)
|
| 232 |
-
audio_file_titles = speak_with_edge_tts(titles_text)
|
| 233 |
-
st.write("### π Paper Titles")
|
| 234 |
-
play_and_download_audio(audio_file_titles)
|
| 235 |
-
|
| 236 |
-
elapsed = time.time()-start
|
| 237 |
-
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
| 238 |
-
create_file(q, result, "md")
|
| 239 |
-
return result
|
| 240 |
-
|
| 241 |
def process_with_gpt(text):
|
| 242 |
if not text: return
|
| 243 |
st.session_state.messages.append({"role":"user","content":text})
|
|
@@ -385,9 +392,6 @@ def main():
|
|
| 385 |
st.sidebar.markdown("### π²BikeAIπ Multi-Agent Research AI")
|
| 386 |
tab_main = st.radio("Action:",["π€ Voice Input","πΈ Media Gallery","π Search ArXiv","π File Editor"],horizontal=True)
|
| 387 |
|
| 388 |
-
# Removed the old model_choice radio
|
| 389 |
-
# Instead, we rely on the dropdown in the Process Input section.
|
| 390 |
-
|
| 391 |
mycomponent = components.declare_component("mycomponent", path="mycomponent")
|
| 392 |
val = mycomponent(my_input_value="Hello")
|
| 393 |
|
|
@@ -396,39 +400,51 @@ def main():
|
|
| 396 |
val_stripped = val.replace('\n', ' ')
|
| 397 |
edited_input = st.text_area("Edit your detected input:", value=val_stripped, height=100)
|
| 398 |
run_option = st.selectbox("Select AI Model:", ["Arxiv", "GPT-4o", "Claude-3.5"])
|
| 399 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 400 |
|
| 401 |
input_changed = (val != st.session_state.old_val)
|
| 402 |
|
| 403 |
if autorun and input_changed:
|
| 404 |
-
# Automatically run the selected model if input changed
|
| 405 |
st.session_state.old_val = val
|
| 406 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 407 |
else:
|
| 408 |
-
# If not autorun, show a button to run manually
|
| 409 |
if st.button("Process Input"):
|
| 410 |
st.session_state.old_val = val
|
| 411 |
-
|
| 412 |
-
|
|
|
|
|
|
|
|
|
|
| 413 |
|
| 414 |
if tab_main == "π Search ArXiv":
|
| 415 |
st.subheader("π Search ArXiv")
|
| 416 |
-
q=st.text_input("Research query:")
|
| 417 |
|
| 418 |
st.markdown("### ποΈ Audio Generation Options")
|
| 419 |
vocal_summary = st.checkbox("ποΈ Vocal Summary (Short Answer)", value=True)
|
| 420 |
extended_refs = st.checkbox("π Extended References & Summaries (Long)", value=False)
|
| 421 |
titles_summary = st.checkbox("π Paper Titles Only", value=True)
|
|
|
|
|
|
|
| 422 |
|
| 423 |
if q and st.button("Run ArXiv Query"):
|
| 424 |
-
perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
|
|
|
| 425 |
|
| 426 |
elif tab_main == "π€ Voice Input":
|
| 427 |
st.subheader("π€ Voice Recognition")
|
| 428 |
user_text = st.text_area("Message:", height=100)
|
| 429 |
user_text = user_text.strip().replace('\n', ' ')
|
| 430 |
if st.button("Send π¨"):
|
| 431 |
-
# Default to GPT-4o here, or you could similarly provide options.
|
| 432 |
process_with_gpt(user_text)
|
| 433 |
st.subheader("π Chat History")
|
| 434 |
t1,t2=st.tabs(["Claude History","GPT-4o History"])
|
|
@@ -506,4 +522,4 @@ def main():
|
|
| 506 |
st.rerun()
|
| 507 |
|
| 508 |
if __name__=="__main__":
|
| 509 |
-
main()
|
|
|
|
| 142 |
def play_and_download_audio(file_path):
|
| 143 |
if file_path and os.path.exists(file_path):
|
| 144 |
st.audio(file_path)
|
| 145 |
+
dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
|
| 146 |
st.markdown(dl_link, unsafe_allow_html=True)
|
| 147 |
|
| 148 |
+
def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False):
|
| 149 |
+
start = time.time()
|
| 150 |
+
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
| 151 |
+
r = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")
|
| 152 |
+
refs = r[0]
|
| 153 |
+
r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
|
| 154 |
+
result = f"### π {q}\n\n{r2}\n\n{refs}"
|
| 155 |
+
|
| 156 |
+
st.markdown(result)
|
| 157 |
+
|
| 158 |
+
# Generate full audio version if requested
|
| 159 |
+
if full_audio:
|
| 160 |
+
complete_text = f"Complete response for query: {q}. {clean_for_speech(r2)} {clean_for_speech(refs)}"
|
| 161 |
+
audio_file_full = speak_with_edge_tts(complete_text)
|
| 162 |
+
st.write("### π Complete Audio Response")
|
| 163 |
+
play_and_download_audio(audio_file_full)
|
| 164 |
+
|
| 165 |
+
if vocal_summary:
|
| 166 |
+
main_text = clean_for_speech(r2)
|
| 167 |
+
audio_file_main = speak_with_edge_tts(main_text)
|
| 168 |
+
st.write("### ποΈ Vocal Summary (Short Answer)")
|
| 169 |
+
play_and_download_audio(audio_file_main)
|
| 170 |
+
|
| 171 |
+
if extended_refs:
|
| 172 |
+
summaries_text = "Here are the summaries from the references: " + refs.replace('"','')
|
| 173 |
+
summaries_text = clean_for_speech(summaries_text)
|
| 174 |
+
audio_file_refs = speak_with_edge_tts(summaries_text)
|
| 175 |
+
st.write("### π Extended References & Summaries")
|
| 176 |
+
play_and_download_audio(audio_file_refs)
|
| 177 |
+
|
| 178 |
+
if titles_summary:
|
| 179 |
+
titles = []
|
| 180 |
+
for line in refs.split('\n'):
|
| 181 |
+
m = re.search(r"\[([^\]]+)\]", line)
|
| 182 |
+
if m:
|
| 183 |
+
titles.append(m.group(1))
|
| 184 |
+
if titles:
|
| 185 |
+
titles_text = "Here are the titles of the papers: " + ", ".join(titles)
|
| 186 |
+
titles_text = clean_for_speech(titles_text)
|
| 187 |
+
audio_file_titles = speak_with_edge_tts(titles_text)
|
| 188 |
+
st.write("### π Paper Titles")
|
| 189 |
+
play_and_download_audio(audio_file_titles)
|
| 190 |
+
|
| 191 |
+
elapsed = time.time()-start
|
| 192 |
+
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
| 193 |
+
create_file(q, result, "md")
|
| 194 |
+
return result
|
| 195 |
+
|
| 196 |
def process_image(image_path, user_prompt):
|
| 197 |
with open(image_path, "rb") as imgf:
|
| 198 |
image_data = imgf.read()
|
|
|
|
| 245 |
)
|
| 246 |
return resp.choices[0].message.content
|
| 247 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
def process_with_gpt(text):
|
| 249 |
if not text: return
|
| 250 |
st.session_state.messages.append({"role":"user","content":text})
|
|
|
|
| 392 |
st.sidebar.markdown("### π²BikeAIπ Multi-Agent Research AI")
|
| 393 |
tab_main = st.radio("Action:",["π€ Voice Input","πΈ Media Gallery","π Search ArXiv","π File Editor"],horizontal=True)
|
| 394 |
|
|
|
|
|
|
|
|
|
|
| 395 |
mycomponent = components.declare_component("mycomponent", path="mycomponent")
|
| 396 |
val = mycomponent(my_input_value="Hello")
|
| 397 |
|
|
|
|
| 400 |
val_stripped = val.replace('\n', ' ')
|
| 401 |
edited_input = st.text_area("Edit your detected input:", value=val_stripped, height=100)
|
| 402 |
run_option = st.selectbox("Select AI Model:", ["Arxiv", "GPT-4o", "Claude-3.5"])
|
| 403 |
+
col1, col2 = st.columns(2)
|
| 404 |
+
with col1:
|
| 405 |
+
autorun = st.checkbox("AutoRun on input change", value=False)
|
| 406 |
+
with col2:
|
| 407 |
+
full_audio = st.checkbox("Generate Complete Audio", value=False,
|
| 408 |
+
help="Generate audio for the complete response including all papers and summaries")
|
| 409 |
|
| 410 |
input_changed = (val != st.session_state.old_val)
|
| 411 |
|
| 412 |
if autorun and input_changed:
|
|
|
|
| 413 |
st.session_state.old_val = val
|
| 414 |
+
if run_option == "Arxiv":
|
| 415 |
+
perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
|
| 416 |
+
titles_summary=True, full_audio=full_audio)
|
| 417 |
+
else:
|
| 418 |
+
run_selected_model(run_option, edited_input)
|
| 419 |
else:
|
|
|
|
| 420 |
if st.button("Process Input"):
|
| 421 |
st.session_state.old_val = val
|
| 422 |
+
if run_option == "Arxiv":
|
| 423 |
+
perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
|
| 424 |
+
titles_summary=True, full_audio=full_audio)
|
| 425 |
+
else:
|
| 426 |
+
run_selected_model(run_option, edited_input)
|
| 427 |
|
| 428 |
if tab_main == "π Search ArXiv":
|
| 429 |
st.subheader("π Search ArXiv")
|
| 430 |
+
q = st.text_input("Research query:")
|
| 431 |
|
| 432 |
st.markdown("### ποΈ Audio Generation Options")
|
| 433 |
vocal_summary = st.checkbox("ποΈ Vocal Summary (Short Answer)", value=True)
|
| 434 |
extended_refs = st.checkbox("π Extended References & Summaries (Long)", value=False)
|
| 435 |
titles_summary = st.checkbox("π Paper Titles Only", value=True)
|
| 436 |
+
full_audio = st.checkbox("π Generate Complete Audio Response", value=False,
|
| 437 |
+
help="Generate audio for the complete response including all papers and summaries")
|
| 438 |
|
| 439 |
if q and st.button("Run ArXiv Query"):
|
| 440 |
+
perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
| 441 |
+
titles_summary=titles_summary, full_audio=full_audio)
|
| 442 |
|
| 443 |
elif tab_main == "π€ Voice Input":
|
| 444 |
st.subheader("π€ Voice Recognition")
|
| 445 |
user_text = st.text_area("Message:", height=100)
|
| 446 |
user_text = user_text.strip().replace('\n', ' ')
|
| 447 |
if st.button("Send π¨"):
|
|
|
|
| 448 |
process_with_gpt(user_text)
|
| 449 |
st.subheader("π Chat History")
|
| 450 |
t1,t2=st.tabs(["Claude History","GPT-4o History"])
|
|
|
|
| 522 |
st.rerun()
|
| 523 |
|
| 524 |
if __name__=="__main__":
|
| 525 |
+
main()
|