awacke1 commited on
Commit
bd2c640
·
verified ·
1 Parent(s): 53efd63

Create backup-03252025.app.py

Browse files
Files changed (1) hide show
  1. backup-03252025.app.py +2140 -0
backup-03252025.app.py ADDED
@@ -0,0 +1,2140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import cv2
3
+ import glob
4
+ import json
5
+ import math
6
+ import os
7
+ import pytz
8
+ import random
9
+ import re
10
+ import requests
11
+ import streamlit as st
12
+ import streamlit.components.v1 as components
13
+ import textract
14
+ import time
15
+ import zipfile
16
+
17
+ from audio_recorder_streamlit import audio_recorder
18
+ from bs4 import BeautifulSoup
19
+ from collections import deque
20
+ from datetime import datetime
21
+ from dotenv import load_dotenv
22
+ from gradio_client import Client, handle_file
23
+ from huggingface_hub import InferenceClient
24
+ from io import BytesIO
25
+ #from moviepy.editor import VideoFileClip
26
+ from moviepy import VideoFileClip
27
+ from PIL import Image
28
+ from PyPDF2 import PdfReader
29
+ from templates import bot_template, css, user_template
30
+ from urllib.parse import quote # Ensure this import is included
31
+ from xml.etree import ElementTree as ET
32
+
33
+ import openai
34
+ from openai import OpenAI
35
+
36
+ # 1. Configuration
37
+ Site_Name = 'Scholarly-Article-Document-Search-With-Memory'
38
+ title="🔬🧠ScienceBrain.AI"
39
+ helpURL='https://huggingface.co/awacke1'
40
+ bugURL='https://huggingface.co/spaces/awacke1'
41
+ icons='🔬'
42
+ icons = Image.open("icons.ico")
43
+ st.set_page_config(
44
+ page_title=title,
45
+ page_icon=icons,
46
+ layout="wide",
47
+ #initial_sidebar_state="expanded",
48
+ initial_sidebar_state="auto",
49
+ menu_items={
50
+ 'Get Help': helpURL,
51
+ 'Report a bug': bugURL,
52
+ 'About': title
53
+ }
54
+ )
55
+
56
+ # My Inference API Copy
57
+ API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
58
+ API_KEY = os.getenv('API_KEY')
59
+ MODEL1="meta-llama/Llama-2-7b-chat-hf"
60
+ MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf"
61
+ HF_KEY = os.getenv('HF_KEY')
62
+ headers = {
63
+ "Authorization": f"Bearer {HF_KEY}",
64
+ "Content-Type": "application/json"
65
+ }
66
+ key = os.getenv('OPENAI_API_KEY')
67
+ prompt = "...."
68
+ should_save = st.sidebar.checkbox("💾 Save", value=True, help="Save your session data.")
69
+
70
+
71
+
72
+ client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
73
+ MODEL = "gpt-4o-2024-05-13"
74
+ if "openai_model" not in st.session_state:
75
+ st.session_state["openai_model"] = MODEL
76
+ if "messages" not in st.session_state:
77
+ st.session_state.messages = []
78
+ if st.button("Clear Session"):
79
+ st.session_state.messages = []
80
+
81
+ # HTML5 based Speech Synthesis (Text to Speech in Browser)
82
+ @st.cache_resource
83
+ def SpeechSynthesis(result):
84
+ documentHTML5='''
85
+ <!DOCTYPE html>
86
+ <html>
87
+ <head>
88
+ <title>Read It Aloud</title>
89
+ <script type="text/javascript">
90
+ function readAloud() {
91
+ const text = document.getElementById("textArea").value;
92
+ const speech = new SpeechSynthesisUtterance(text);
93
+ window.speechSynthesis.speak(speech);
94
+ }
95
+ </script>
96
+ </head>
97
+ <body>
98
+ <h1>🔊 Read It Aloud</h1>
99
+ <textarea id="textArea" rows="10" cols="80">
100
+ '''
101
+ documentHTML5 = documentHTML5 + result
102
+ documentHTML5 = documentHTML5 + '''
103
+ </textarea>
104
+ <br>
105
+ <button onclick="readAloud()">🔊 Read Aloud</button>
106
+ </body>
107
+ </html>
108
+ '''
109
+ components.html(documentHTML5, width=1280, height=300)
110
+
111
+
112
+
113
+ # GPT4o documentation
114
+ # 1. Cookbook: https://cookbook.openai.com/examples/gpt4o/introduction_to_gpt4o
115
+ # 2. Configure your Project and Orgs to limit/allow Models: https://platform.openai.com/settings/organization/general
116
+ # 3. Watch your Billing! https://platform.openai.com/settings/organization/billing/overview
117
+
118
+
119
+ # Set API key and organization ID from environment variables
120
+
121
+ openai.api_key = os.getenv('OPENAI_API_KEY')
122
+ openai.organization = os.getenv('OPENAI_ORG_ID')
123
+ client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
124
+
125
+ # Define the model to be used
126
+ #MODEL = "gpt-4o"
127
+ MODEL = "gpt-4o-2024-05-13"
128
+
129
+
130
+
131
+ # 5. Auto name generated output files from time and content
132
+ def generate_filename(prompt, file_type):
133
+ """
134
+ Generates a safe filename using the prompt and file type.
135
+ It allows Unicode characters, including emojis, and replaces unsafe characters with spaces.
136
+ """
137
+ # Get current time in the US/Central timezone
138
+ central = pytz.timezone('US/Central')
139
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
140
+
141
+ # Replace any unsafe characters with spaces, allow emojis and Unicode characters
142
+ replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
143
+
144
+ # Strip extra spaces from the start and end, and collapse multiple spaces
145
+ safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:240] # Limit length for filename safety
146
+
147
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
148
+
149
+
150
+ def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
151
+ """
152
+ Combines file name generation and file creation into one function.
153
+ If the file is a markdown file, extracts the title from the content (if available) and uses it for the filename.
154
+ """
155
+ if not should_save:
156
+ return None
157
+
158
+ # Step 1: Generate filename based on the prompt or content
159
+ filename = generate_filename(prompt if prompt else content, file_type)
160
+
161
+ # Step 2: If it's a markdown file, check if it has a title (e.g., # Heading in markdown)
162
+ if file_type == "md":
163
+ title_from_content = extract_markdown_title(content)
164
+ if title_from_content:
165
+ filename = generate_filename(title_from_content, file_type)
166
+
167
+ # Step 3: Save the file
168
+ with open(filename, "w", encoding="utf-8") as f:
169
+ if is_image:
170
+ f.write(content)
171
+ else:
172
+ f.write(prompt + "\n\n" + content)
173
+
174
+ return filename
175
+
176
+
177
+ def extract_markdown_title(content):
178
+ """
179
+ Extracts the first markdown title (line starting with '#') from the content.
180
+ """
181
+ # Use regex to find the first line that starts with '#'
182
+ title_match = re.search(r'^\s*#\s*(.+)', content, re.MULTILINE)
183
+ if title_match:
184
+ return title_match.group(1).strip()
185
+ return None
186
+
187
+
188
+ # 5. Auto name generated output files from time and content
189
+ def generate_filename_old2(prompt, file_type):
190
+ central = pytz.timezone('US/Central')
191
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
192
+ replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
193
+ safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:240] # 255 is linux max, 260 is windows max
194
+ #safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:45]
195
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
196
+
197
+
198
+ def create_and_save_file_old2(content, file_type="md", prompt=None, is_image=False, should_save=True):
199
+ """
200
+ Combines file name generation and file creation into one function.
201
+ If the file is a markdown file, extracts the title from the content (if available) and uses it for the filename.
202
+ """
203
+ if not should_save:
204
+ return None
205
+
206
+ # Step 1: Generate filename
207
+ filename = generate_filename(prompt if prompt else content, file_type)
208
+
209
+ # Step 2: If it's a markdown file, check if it has a title (e.g., # Heading in markdown)
210
+ if file_type == "md":
211
+ title_from_content = extract_markdown_title(content)
212
+ if title_from_content:
213
+ filename = generate_filename(title_from_content, file_type)
214
+
215
+ # Step 3: Save file
216
+ with open(filename, "w", encoding="utf-8") as f:
217
+ if is_image:
218
+ f.write(content)
219
+ else:
220
+ f.write(prompt + "\n\n" + content)
221
+
222
+ return filename
223
+
224
+
225
+ def extract_markdown_title(content):
226
+ """
227
+ Extract the first markdown title (line starting with '#') from the content.
228
+ """
229
+ # Use regex to find the first line that starts with '#'
230
+ title_match = re.search(r'^\s*#\s*(.+)', content, re.MULTILINE)
231
+ if title_match:
232
+ return title_match.group(1).strip()
233
+ return None
234
+
235
+ def process_text(text_input):
236
+ if text_input:
237
+
238
+ st.session_state.messages.append({"role": "user", "content": text_input})
239
+
240
+ with st.chat_message("user"):
241
+ st.markdown(text_input)
242
+
243
+ with st.chat_message("assistant"):
244
+ completion = client.chat.completions.create(
245
+ model=MODEL,
246
+ messages=[
247
+ {"role": m["role"], "content": m["content"]}
248
+ for m in st.session_state.messages
249
+ ],
250
+ stream=False
251
+ )
252
+ return_text = completion.choices[0].message.content
253
+ st.write("Assistant: " + return_text)
254
+ filename = generate_filename(text_input, "md")
255
+
256
+ create_and_save_file(return_text, file_type="md", prompt=text_input, is_image=False, should_save=True)
257
+ #create_file(filename, text_input, return_text, should_save)
258
+ st.session_state.messages.append({"role": "assistant", "content": return_text})
259
+
260
+ #st.write("Assistant: " + completion.choices[0].message.content)
261
+
262
+ def create_file(filename, prompt, response, is_image=False):
263
+ with open(filename, "w", encoding="utf-8") as f:
264
+ f.write(prompt + "\n\n" + response)
265
+
266
+ def sanitize_filename(filename):
267
+ import string
268
+ # Characters not allowed in Windows filenames
269
+ windows_disallowed_chars = '<>:"\\|?*'
270
+
271
+ # Characters not allowed in Unix/Linux filenames
272
+ linux_disallowed_chars = '/'
273
+
274
+ # Additional disallowed characters (non-printable ASCII characters)
275
+ additional_disallowed_chars = ''.join(chr(i) for i in range(32))
276
+
277
+ # Combined set of disallowed characters
278
+ disallowed_chars = windows_disallowed_chars + linux_disallowed_chars + additional_disallowed_chars
279
+
280
+ # Remove disallowed characters
281
+ sanitized_filename = ''.join(c for c in filename if c not in disallowed_chars and c in string.printable)
282
+
283
+ return sanitized_filename
284
+
285
+
286
+ # Now filename length protected for linux and windows filename lengths
287
+ def save_image(image, filename):
288
+ max_filename_length = 250
289
+ filename_stem, extension = os.path.splitext(filename)
290
+ truncated_stem = filename_stem[:max_filename_length - len(extension)] if len(filename) > max_filename_length else filename_stem
291
+ filename = f"{truncated_stem}{extension}"
292
+ filename = sanitize_filename(filename)
293
+ try:
294
+ with open(filename, "wb") as f:
295
+ f.write(image.getbuffer())
296
+ except:
297
+ errored=True
298
+ return filename
299
+
300
+ def extract_boldface_terms(text):
301
+ return re.findall(r'\*\*(.*?)\*\*', text)
302
+
303
+ def extract_title(text):
304
+ boldface_terms = re.findall(r'\*\*(.*?)\*\*', text)
305
+ if boldface_terms:
306
+ title = ' '.join(boldface_terms)
307
+ else:
308
+ title = re.sub(r'[^a-zA-Z0-9_\-]', ' ', text[-200:])
309
+ return title[-200:]
310
+
311
+ def process_audio(audio_input, text_input=''):
312
+ if audio_input:
313
+
314
+ # Check type - if it is a file we need bytes
315
+ #st.write(audio_input)
316
+ #if isinstance(audio_input, str):
317
+ with open(audio_input, "rb") as file:
318
+ audio_input = file.read()
319
+ #SaveNewFile=False # file is there and this is just prompt inference
320
+ #st.write(audio_input)
321
+
322
+ transcription = client.audio.transcriptions.create(
323
+ model="whisper-1",
324
+ file=audio_input,
325
+ )
326
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
327
+ with st.chat_message("assistant"):
328
+ st.markdown(transcription.text)
329
+
330
+ SpeechSynthesis(transcription.text)
331
+ filename = generate_filename(transcription.text, "wav")
332
+
333
+ create_audio_file(filename, audio_input, should_save)
334
+
335
+ #SpeechSynthesis(transcription.text)
336
+
337
+ filename = generate_filename(transcription.text, "md")
338
+ create_file(filename, transcription.text, transcription.text, should_save)
339
+ #st.markdown(response.choices[0].message.content)
340
+
341
+ def process_audio_for_video(video_input):
342
+ if video_input:
343
+ try:
344
+ transcription = client.audio.transcriptions.create(
345
+ model="whisper-1",
346
+ file=video_input,
347
+ )
348
+ response = client.chat.completions.create(
349
+ model=MODEL,
350
+ messages=[
351
+ {"role": "system", "content":"""You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."""},
352
+ {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription}"}],}
353
+ ],
354
+ temperature=0,
355
+ )
356
+ st.markdown(response.choices[0].message.content)
357
+ return response.choices[0].message.content
358
+ except:
359
+ st.write('No transcript')
360
+
361
+ #@st.cache_resource
362
+ def process_image(image_input, user_prompt):
363
+ SaveNewFile=True
364
+ image_file_name=''
365
+ if isinstance(image_input, str):
366
+ image_file_name = image_input
367
+ with open(image_input, "rb") as image_file:
368
+ image_input = image_file.read()
369
+ SaveNewFile=False # file is there and this is just prompt inference
370
+ else:
371
+ if image_input is None:
372
+ data=False
373
+ else:
374
+ #image_file_name = image_input.name
375
+ image_bytes = image_input.read()
376
+ SaveNewFile=True
377
+ try:
378
+ if (image_input.filename is not None):
379
+ image_file_name = image_input.filename
380
+ except:
381
+ image_file_name = image_input.name
382
+ image_input = image_bytes # this should allow new posts to ssave and to flow through bytes
383
+
384
+ st.markdown('Processing image: ' + image_file_name)
385
+ base64_image = base64.b64encode(image_input).decode("utf-8")
386
+ response = client.chat.completions.create(
387
+ model=MODEL,
388
+ messages=[
389
+ {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
390
+ {"role": "user", "content": [
391
+ {"type": "text", "text": user_prompt},
392
+ {"type": "image_url", "image_url": {
393
+ "url": f"data:image/png;base64,{base64_image}"}
394
+ }
395
+ ]}
396
+ ],
397
+ temperature=0.0,
398
+ )
399
+ image_response = response.choices[0].message.content
400
+ st.markdown(image_response)
401
+
402
+ # Save markdown on image AI output from gpt4o
403
+ filename_md = generate_filename(image_file_name + '- ' + image_response, "md")
404
+ # Save markdown on image AI output from gpt4o
405
+ filename_png = filename_md.replace('.md', '.' + image_file_name.split('.')[-1])
406
+
407
+ create_file(filename_md, image_response, '', True)
408
+
409
+ with open(filename_md, "w", encoding="utf-8") as f:
410
+ f.write(image_response)
411
+
412
+ # Extract boldface terms from image_response then autoname save file
413
+ boldface_terms = extract_title(image_response).replace(':','')
414
+ filename_stem, extension = os.path.splitext(image_file_name)
415
+ filename_img = f"{filename_stem} {''.join(boldface_terms)}{extension}"
416
+ if SaveNewFile:
417
+ newfilename = save_image(image_input, filename_img)
418
+ filename_md = newfilename.replace('.png', '.md')
419
+ create_file(filename_md, '', image_response, True)
420
+ else:
421
+
422
+ filename = generate_filename(filename_md, "md")
423
+ create_file(filename, image_file_name, image_response, should_save)
424
+
425
+ #filename_md = image_file_name.replace('.png', '.md')
426
+ #create_file(filename_md, '', image_response, True)
427
+
428
+
429
+ return image_response
430
+
431
+
432
+ def create_audio_file(filename, audio_data, should_save):
433
+ if should_save:
434
+ with open(filename, "wb") as file:
435
+ file.write(audio_data.getvalue())
436
+ st.success(f"Audio file saved as {filename}")
437
+ else:
438
+ st.warning("Audio file not saved.")
439
+
440
+ def save_video(video_file):
441
+ # Save the uploaded video file
442
+ with open(video_file.name, "wb") as f:
443
+ f.write(video_file.getbuffer())
444
+ return video_file.name
445
+
446
+ def process_video_broke(video_input, user_prompt):
447
+ SaveNewFile=True
448
+ video_file_name=''
449
+ if isinstance(video_input, str):
450
+ video_file_name = video_input
451
+ with open(video_input, "rb") as video_file:
452
+ video_input = video_file.read()
453
+ SaveNewFile=False # file is there and this is just prompt inference
454
+ else:
455
+ video_file_name = video_input.name
456
+ video_input = video_input.read()
457
+ SaveNewFile=True
458
+
459
+ st.markdown('Processing video: ' + video_file_name)
460
+
461
+ base64Frames, audio_path = process_video(video_file_name, seconds_per_frame=1)
462
+
463
+ # Get the transcript for the video model call
464
+ transcript = process_audio_for_video(video_input)
465
+
466
+ # Generate a summary with visual and audio
467
+ response = client.chat.completions.create(
468
+ model=MODEL,
469
+ messages=[
470
+ {"role": "system", "content": """You are generating a video summary. Create a summary of the provided video and its transcript. Respond in Markdown"""},
471
+ {"role": "user", "content": [
472
+ "These are the frames from the video.",
473
+ *map(lambda x: {"type": "image_url",
474
+ "image_url": {"url": f'data:image/jpg;base64,{x}', "detail": "low"}}, base64Frames),
475
+ {"type": "text", "text": f"The audio transcription is: {transcript}"},
476
+ {"type": "text", "text": user_prompt}
477
+ ]},
478
+ ],
479
+ temperature=0,
480
+ )
481
+ video_response = response.choices[0].message.content
482
+ st.markdown(video_response)
483
+
484
+ # Save markdown on video AI output from gpt4o
485
+ filename_md = generate_filename(video_file_name + '- ' + video_response, "md")
486
+ # Save markdown on video AI output from gpt4o
487
+ filename_mp4 = filename_md.replace('.md', '.' + video_file_name.split('.')[-1])
488
+
489
+ create_file(filename_md, video_response, '', True)
490
+
491
+ with open(filename_md, "w", encoding="utf-8") as f:
492
+ f.write(video_response)
493
+
494
+ # Extract boldface terms from video_response then autoname save file
495
+ boldface_terms = extract_title(video_response).replace(':','')
496
+ filename_stem, extension = os.path.splitext(video_file_name)
497
+ filename_video = f"{filename_stem} {''.join(boldface_terms)}{extension}"
498
+ if SaveNewFile:
499
+ newfilename = save_video(video_input, filename_video)
500
+ #filename_md = newfilename.replace('.mp4', '.md')
501
+ filename_md = newfilename.replace('.mp4', '.md')
502
+ create_file(filename_md, '', video_response, True)
503
+ else:
504
+ filename = generate_filename(filename_md, "md")
505
+ create_file(filename, video_file_name, video_response, should_save)
506
+
507
+ return video_response
508
+
509
+ def process_video(video_path, seconds_per_frame=2):
510
+ base64Frames = []
511
+ base_video_path, _ = os.path.splitext(video_path)
512
+ video = cv2.VideoCapture(video_path)
513
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
514
+ fps = video.get(cv2.CAP_PROP_FPS)
515
+ frames_to_skip = int(fps * seconds_per_frame)
516
+ curr_frame = 0
517
+
518
+ # Loop through the video and extract frames at specified sampling rate
519
+ while curr_frame < total_frames - 1:
520
+ video.set(cv2.CAP_PROP_POS_FRAMES, curr_frame)
521
+ success, frame = video.read()
522
+ if not success:
523
+ break
524
+ _, buffer = cv2.imencode(".jpg", frame)
525
+ base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
526
+ curr_frame += frames_to_skip
527
+
528
+ video.release()
529
+
530
+ # Extract audio from video
531
+ audio_path = f"{base_video_path}.mp3"
532
+ try:
533
+ clip = VideoFileClip(video_path)
534
+ clip.audio.write_audiofile(audio_path, bitrate="32k")
535
+ clip.audio.close()
536
+ clip.close()
537
+ except:
538
+ st.write('No audio track found, moving on..')
539
+
540
+
541
+ print(f"Extracted {len(base64Frames)} frames")
542
+ print(f"Extracted audio to {audio_path}")
543
+
544
+ return base64Frames, audio_path
545
+
546
+ def process_audio_and_video(video_input):
547
+ if video_input is not None:
548
+ # Save the uploaded video file
549
+ video_path = save_video(video_input )
550
+
551
+ # Process the saved video
552
+ base64Frames, audio_path = process_video(video_path)
553
+
554
+ # Get the transcript for the video model call
555
+ transcript = process_audio_for_video(video_input)
556
+
557
+ # Generate a summary with visual and audio
558
+ response = client.chat.completions.create(
559
+ model=MODEL,
560
+ messages=[
561
+ {"role": "system", "content": """You are generating a video summary. Create a summary of the provided video and its transcript. Respond in Markdown"""},
562
+ {"role": "user", "content": [
563
+ "These are the frames from the video.",
564
+ *map(lambda x: {"type": "image_url",
565
+ "image_url": {"url": f'data:image/jpg;base64,{x}', "detail": "low"}}, base64Frames),
566
+ {"type": "text", "text": f"The audio transcription is: {transcript}"}
567
+ ]},
568
+ ],
569
+ temperature=0,
570
+ )
571
+ results = response.choices[0].message.content
572
+ st.markdown(results)
573
+
574
+ if transcript:
575
+ filename = generate_filename(transcript, "md")
576
+ create_file(filename, transcript, results, should_save)
577
+
578
+
579
+
580
+
581
+
582
+
583
+
584
+ # 🔍Search Glossary
585
+ # @st.cache_resource
586
+ def search_glossary(query):
587
+ all=""
588
+ st.markdown(f"- {query}")
589
+
590
+ # 🔍Run 1 - ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM
591
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
592
+ response2 = client.predict(
593
+ query, # str in 'parameter_13' Textbox component
594
+ #"mistralai/Mixtral-8x7B-Instruct-v0.1", # Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] in 'LLM Model' Dropdown component
595
+ #"mistralai/Mistral-7B-Instruct-v0.2", # Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] in 'LLM Model' Dropdown component
596
+ "google/gemma-7b-it", # Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] in 'LLM Model' Dropdown component
597
+ True, # bool in 'Stream output' Checkbox component
598
+ api_name="/ask_llm"
599
+ )
600
+ st.write('🔍Run of Multi-Agent System Paper Summary Spec is Complete')
601
+ st.markdown(response2)
602
+
603
+ # ArXiv searcher ~-<>-~ Paper References - Update with RAG
604
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
605
+ response1 = client.predict(
606
+ query,
607
+ 10,
608
+ "Semantic Search - up to 10 Mar 2024", # Literal['Semantic Search - up to 10 Mar 2024', 'Arxiv Search - Latest - (EXPERIMENTAL)'] in 'Search Source' Dropdown component
609
+ "mistralai/Mixtral-8x7B-Instruct-v0.1", # Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] in 'LLM Model' Dropdown component
610
+ api_name="/update_with_rag_md"
611
+ )
612
+ st.write('🔍Run of Multi-Agent System Paper References is Complete')
613
+ responseall = response2 + response1[0] + response1[1]
614
+ st.markdown(responseall)
615
+ return responseall
616
+
617
+
618
+
619
+
620
+ def parse_to_markdown(text):
621
+ return text
622
+
623
+ def load_file(file_name):
624
+ with open(file_name, "r", encoding='utf-8') as file:
625
+ #with open(file_name, "r") as file:
626
+ content = file.read()
627
+ return content
628
+
629
+ def extract_urls(text):
630
+ try:
631
+ date_pattern = re.compile(r'### (\d{2} \w{3} \d{4})')
632
+ abs_link_pattern = re.compile(r'\[(.*?)\]\((https://arxiv\.org/abs/\d+\.\d+)\)')
633
+ pdf_link_pattern = re.compile(r'\[⬇️\]\((https://arxiv\.org/pdf/\d+\.\d+)\)')
634
+ title_pattern = re.compile(r'### \d{2} \w{3} \d{4} \| \[(.*?)\]')
635
+ date_matches = date_pattern.findall(text)
636
+ abs_link_matches = abs_link_pattern.findall(text)
637
+ pdf_link_matches = pdf_link_pattern.findall(text)
638
+ title_matches = title_pattern.findall(text)
639
+
640
+ # markdown with the extracted fields
641
+ markdown_text = ""
642
+ for i in range(len(date_matches)):
643
+ date = date_matches[i]
644
+ title = title_matches[i]
645
+ abs_link = abs_link_matches[i][1]
646
+ pdf_link = pdf_link_matches[i]
647
+ markdown_text += f"**Date:** {date}\n\n"
648
+ markdown_text += f"**Title:** {title}\n\n"
649
+ markdown_text += f"**Abstract Link:** [{abs_link}]({abs_link})\n\n"
650
+ markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
651
+ markdown_text += "---\n\n"
652
+ return markdown_text
653
+
654
+ except:
655
+ st.write('.')
656
+ return ''
657
+
658
+ def download_pdfs(urls):
659
+ local_files = []
660
+ for url in urls:
661
+ if url.endswith('.pdf'):
662
+ local_filename = url.split('/')[-1]
663
+ response = requests.get(url)
664
+ with open(local_filename, 'wb') as f:
665
+ f.write(response.content)
666
+ local_files.append(local_filename)
667
+ return local_files
668
+
669
+ def generate_html(local_files):
670
+ html = "<ul>"
671
+ for file in local_files:
672
+ link = f'<li><a href="{file}">{file}</a></li>'
673
+ html += link
674
+ html += "</ul>"
675
+ return html
676
+
677
+
678
+
679
+
680
+
681
+ #@st.cache_resource
682
+ def search_arxiv(query):
683
+ start_time = time.strftime("%Y-%m-%d %H:%M:%S")
684
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
685
+ response1 = client.predict(
686
+ message="Hello!!",
687
+ llm_results_use=5,
688
+ database_choice="Semantic Search",
689
+ llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
690
+ api_name="/update_with_rag_md"
691
+ )
692
+
693
+ Question = '### 🔎 ' + query + '\r\n' # Format for markdown display with links
694
+ References = response1[0]
695
+ References2 = response1[1]
696
+
697
+ st.code(References, language="markdown")
698
+ st.code(References2, language="markdown")
699
+
700
+ ReferenceLinks = extract_urls(References)
701
+
702
+ filename = generate_filename(query, "md")
703
+ create_file(filename, query, References + ReferenceLinks, should_save)
704
+ st.session_state.messages.append({"role": "assistant", "content": References + ReferenceLinks})
705
+
706
+ RunSecondQuery = True
707
+ results=''
708
+ if RunSecondQuery:
709
+ # Search 2 - Retrieve the Summary with Papers Context and Original Query
710
+ response2 = client.predict(
711
+ query,
712
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
713
+ True,
714
+ api_name="/ask_llm"
715
+ )
716
+ if len(response2) > 10:
717
+ Answer = response2
718
+ SpeechSynthesis(Answer)
719
+ # Restructure results to follow format of Question, Answer, References, ReferenceLinks
720
+ results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
721
+ st.markdown(results)
722
+
723
+ st.write('🔍Run of Multi-Agent System Paper Summary Spec is Complete')
724
+ end_time = time.strftime("%Y-%m-%d %H:%M:%S")
725
+ start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
726
+ end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S"))
727
+ elapsed_seconds = end_timestamp - start_timestamp
728
+ st.write(f"Start time: {start_time}")
729
+ st.write(f"Finish time: {end_time}")
730
+ st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
731
+
732
+ return results
733
+
734
+ def download_pdfs_and_generate_html(urls):
735
+ pdf_links = []
736
+ for url in urls:
737
+ if url.endswith('.pdf'):
738
+ pdf_filename = os.path.basename(url)
739
+ download_pdf(url, pdf_filename)
740
+ pdf_links.append(pdf_filename)
741
+ local_links_html = '<ul>'
742
+ for link in pdf_links:
743
+ local_links_html += f'<li><a href="{link}">{link}</a></li>'
744
+ local_links_html += '</ul>'
745
+ return local_links_html
746
+
747
+ def download_pdf(url, filename):
748
+ response = requests.get(url)
749
+ with open(filename, 'wb') as file:
750
+ file.write(response.content)
751
+
752
+ # Prompts for App, for App Product, and App Product Code
753
+ PromptPrefix = 'Create a specification with streamlit functions creating markdown outlines and tables rich with appropriate emojis for methodical step by step rules defining the concepts at play. Use story structure architect rules to plan, structure and write three dramatic situations to include in the rules and how to play by matching the theme for topic of '
754
+ PromptPrefix2 = 'Create a streamlit python user app with full code listing to create a UI implementing the using streamlit, gradio, huggingface to create user interface elements like emoji buttons, sliders, drop downs, and data interfaces like dataframes to show tables, session_statematching this ruleset and thematic story plot line: '
755
+ PromptPrefix3 = 'Create a HTML5 aframe and javascript app using appropriate libraries to create a word game simulation with advanced libraries like aframe to render 3d scenes creating moving entities that stay within a bounding box but show text and animation in 3d for inventory, components and story entities. Show full code listing. Add a list of new random entities say 3 of a few different types to any list appropriately and use emojis to make things easier and fun to read. Use appropriate emojis in labels. Create the UI to implement storytelling in the style of a dungeon master, with features using three emoji appropriate text plot twists and recurring interesting funny fascinating and complex almost poetic named characters with genius traits and file IO, randomness, ten point choice lists, math distribution tradeoffs, witty humorous dilemnas with emoji , rewards, variables, reusable functions with parameters, and data driven app with python libraries and streamlit components for Javascript and HTML5. Use appropriate emojis for labels to summarize and list parts, function, conditions for topic:'
756
+
757
+ # MoE Roleplaying Technique for Context Experts
758
+ roleplaying_glossary = {
759
+ "🤖 AI Concepts": {
760
+ "MoE (Mixture of Experts) 🧠": [
761
+ "As a leading AI health researcher, provide an overview of MoE, MAS, memory, and mirroring in healthcare applications.",
762
+ "Explain how MoE and MAS can be leveraged to create AGI and AMI systems for healthcare, as an AI architect.",
763
+ "Discuss the key concepts, benefits, and challenges of self-rewarding AI in healthcare, as an expert.",
764
+ "Identify the top 3 pain points that MoE addresses in AI and healthcare, such as complexity and resource allocation.",
765
+ "Describe the top 3 joys of the MoE solution, including improved performance and adaptability in healthcare AI.",
766
+ "Highlight the top 3 superpowers MoE gives users, like tackling complex problems and personalizing interventions.",
767
+ "Identify the top 3 problems MoE solves in AI and healthcare, such as model complexity, lack of specialization, and inefficient resource allocation, and explain how it addresses each problem effectively.",
768
+ "Outline the 3 essential method steps required for implementing MoE in AI systems, highlighting the novelty and significance of each step in advancing healthcare applications.",
769
+ "Discuss the innovative aspects of the MoE method steps and how they differ from traditional approaches, contributing to advancements in AI and healthcare.",
770
+ "Propose 3 creative ways to structure MoE-based projects and collaborations to optimize performance, efficiency, and impact in healthcare AI applications."
771
+ ],
772
+ "Multi Agent Systems (MAS) 🤝": [
773
+ "As a renowned MAS researcher, describe the key characteristics of distributed, autonomous, and cooperative MAS.",
774
+ "Discuss how MAS is applied in robotics, simulations, and decentralized problem-solving, as an AI engineer.",
775
+ "Provide insights into future trends and breakthroughs in MAS research and applications, as a thought leader.",
776
+ "Identify the top 3 pain points MAS addresses in complex environments, such as coordination and adaptability.",
777
+ "Describe the top 3 joys of the MAS solution, including enhanced collaboration and emergent behaviors in AI.",
778
+ "Highlight the top 3 superpowers MAS gives users, like modeling complex systems and building resilient applications.",
779
+ "Identify the top 3 problems MAS solves in complex, distributed environments, such as lack of coordination, limited adaptability, and centralized control, and explain how it addresses each problem effectively.",
780
+ "Outline the 3 essential method steps required for designing and implementing MAS, highlighting the novelty and significance of each step in advancing AI applications.",
781
+ "Discuss the innovative aspects of the MAS method steps and how they differ from traditional approaches, contributing to advancements in distributed AI systems.",
782
+ "Propose 3 creative ways to structure MAS-based projects and collaborations to optimize performance, efficiency, and impact in various AI domains."
783
+ ],
784
+ "Self Rewarding AI 🎁": [
785
+ "As a leading expert, discuss the main research areas in developing AI with intrinsic motivation and goal-setting.",
786
+ "Explain how self-rewarding AI enables open-ended development and adaptability, as a curiosity-driven researcher.",
787
+ "Share your vision for the future of AI systems that autonomously set goals, learn, and adapt, as a pioneer.",
788
+ "Identify the top 3 pain points self-rewarding AI addresses, such as lack of motivation and limited adaptability.",
789
+ "Describe the top 3 joys of the self-rewarding AI solution, including autonomous learning and novel solutions.",
790
+ "Highlight the top 3 superpowers self-rewarding AI gives users, like creating continuously improving AI systems.",
791
+ "Identify the top 3 problems self-rewarding AI solves in current AI systems, such as lack of intrinsic motivation, limited adaptability, and reliance on external rewards, and explain how it addresses each problem effectively.",
792
+ "Outline the 3 essential method steps required for developing self-rewarding AI systems, highlighting the novelty and significance of each step in advancing autonomous AI.",
793
+ "Discuss the innovative aspects of the self-rewarding AI method steps and how they differ from traditional approaches, contributing to advancements in open-ended AI development.",
794
+ "Propose 3 creative ways to structure self-rewarding AI projects and collaborations to optimize performance, efficiency, and impact in creating adaptive and self-motivated AI systems."
795
+ ]
796
+ },
797
+ "🛠️ AI Tools & Platforms": {
798
+ "ChatDev 💬": [
799
+ "As a chatbot developer, ask about the features and capabilities ChatDev offers for building conversational AI.",
800
+ "Inquire about the pre-built assets, integrations, and multi-platform support in ChatDev, as a product manager.",
801
+ "Ask how ChatDev facilitates chatbot development, deployment, and analytics across channels, as a business owner.",
802
+ "Identify the top 3 challenges ChatDev helps overcome in chatbot development, such as customization and management.",
803
+ "Outline the top 3 essential method steps in building chatbots with ChatDev, emphasizing novelty and efficiency.",
804
+ "Propose 3 innovative ways to structure chatbot projects using ChatDev for optimizing speed, engagement, and deployment.",
805
+ "Identify the top 3 problems ChatDev solves in chatbot development, such as limited customization, lack of multi-platform support, and difficulty in managing conversational flows, and explain how it addresses each problem effectively.",
806
+ "Outline the 3 essential method steps required for building chatbots using ChatDev, highlighting the novelty and significance of each step in streamlining the development process.",
807
+ "Discuss the innovative aspects of the ChatDev method steps and how they differ from traditional approaches, contributing to advancements in conversational AI development.",
808
+ "Propose 3 creative ways to structure chatbot projects using ChatDev to optimize performance, efficiency, and impact in creating engaging and multi-platform conversational experiences."
809
+ ],
810
+ "Online Multiplayer Experiences 🌐": [
811
+ "As a game developer, explore the potential of online multiplayer experiences, including games, AR, and VR.",
812
+ "Discuss the future of image and video models in enhancing online multiplayer experiences, as a researcher.",
813
+ "Inquire about the challenges and opportunities in creating immersive and interactive online multiplayer environments.",
814
+ "Identify the top 3 problems online multiplayer experiences solve, such as limited social interaction, lack of realism, and difficulty in creating engaging content, and explain how they address each problem effectively.",
815
+ "Outline the 3 essential method steps required for developing cutting-edge online multiplayer experiences, highlighting the novelty and significance of each step in advancing gaming, AR, and VR.",
816
+ "Discuss the innovative aspects of online multiplayer experience development and how they differ from traditional approaches, contributing to advancements in immersive technologies.",
817
+ "Propose 3 creative ways to structure online multiplayer projects and collaborations to optimize performance, efficiency, and impact in creating captivating and socially engaging experiences.",
818
+ "Explore the potential of integrating AI and machine learning techniques in online multiplayer experiences to enhance player interactions, generate dynamic content, and personalize experiences.",
819
+ "Discuss the ethical considerations and challenges in developing online multiplayer experiences, such as ensuring fair play, protecting user privacy, and moderating user-generated content.",
820
+ "Identify the key trends and future directions in online multiplayer experiences, considering advancements in AI, AR, VR, and cloud computing technologies."
821
+ ]
822
+ },
823
+ "🔬 Science Topics": {
824
+ "Physics 🔭": [
825
+ "As a Physics student, ask about the main branches and research areas in Physics and their interconnections.",
826
+ "Discuss the current state and future directions of Astrophysics research, as a researcher in the field.",
827
+ "Explain how General Relativity, Quantum Cosmology, and Mathematical Physics interrelate, as a theorist.",
828
+ "Identify the top 3 fundamental questions in Physics that recent research aims to answer and their implications.",
829
+ "Outline the top 3 essential method steps in conducting cutting-edge Physics research, emphasizing novelty.",
830
+ "Propose 3 innovative ways to structure research collaborations in Physics for interdisciplinary breakthroughs.",
831
+ "Identify the top 3 problems physics research solves, such as understanding fundamental laws, resolving theory inconsistencies, and exploring the universe's origins, and explain how it addresses each problem effectively.",
832
+ "Outline the 3 essential method steps required for conducting cutting-edge physics research, highlighting the novelty and significance of each step in advancing our understanding of the universe.",
833
+ "Discuss the innovative aspects of the physics research method steps and how they differ from traditional approaches, contributing to advancements in the field.",
834
+ "Propose 3 creative ways to structure physics research projects and collaborations to optimize performance, efficiency, and impact in making groundbreaking discoveries."
835
+ ],
836
+ "Mathematics ➗": [
837
+ "As a Mathematics enthusiast, inquire about the main branches of Mathematics and their key research areas.",
838
+ "Ask about the main branches of pure Mathematics, like Algebra and Geometry, and their fundamental concepts.",
839
+ "Discuss how Probability, Statistics, and Applied Math relate to other Mathematical fields, as an applied mathematician.",
840
+ "Identify the top 3 unsolved problems in Mathematics that researchers are actively working on and their significance.",
841
+ "Describe the top 3 core method steps in advancing mathematical research, highlighting novelty and creativity.",
842
+ "Suggest 3 innovative ways to structure mathematical research and collaborations for discoveries and applications.",
843
+ "Identify the top 3 problems mathematics research solves, such as proving theorems, developing new tools, and finding real-world applications, and explain how it addresses each problem effectively.",
844
+ "Outline the 3 essential method steps required for advancing mathematical research, highlighting the novelty and significance of each step in expanding mathematical knowledge.",
845
+ "Discuss the innovative aspects of the mathematical research method steps and how they differ from traditional approaches, contributing to advancements in the field.",
846
+ "Propose 3 creative ways to structure mathematical research projects and collaborations to optimize performance, efficiency, and impact in making novel discoveries and finding interdisciplinary applications."
847
+ ],
848
+ "Computer Science 💻": [
849
+ "As a Computer Science student, ask about the main research areas shaping the future of computing.",
850
+ "Discuss the major research topics in AI, ML, NLP, Vision, Graphics, and Robotics, as an AI researcher.",
851
+ "Inquire about the interconnections between Algorithms, Data Structures, Databases, and Programming Languages.",
852
+ "Identify the top 3 critical challenges in Computer Science that current research aims to address and approaches.",
853
+ "Outline the top 3 essential method steps in conducting groundbreaking Computer Science research, emphasizing novelty.",
854
+ "Propose 3 creative ways to structure research projects in Computer Science for innovation and real-world applications.",
855
+ "Identify the top 3 problems computer science research solves, such as developing efficient algorithms, building secure systems, and advancing AI and machine learning, and explain how it addresses each problem effectively.",
856
+ "Outline the 3 essential method steps required for conducting groundbreaking computer science research, highlighting the novelty and significance of each step in pushing the boundaries of computing.",
857
+ "Discuss the innovative aspects of the computer science research method steps and how they differ from traditional approaches, contributing to advancements in the field.",
858
+ "Propose 3 creative ways to structure computer science research projects and collaborations to optimize performance, efficiency, and impact in driving innovation and solving real-world problems."
859
+ ]
860
+ }
861
+ }
862
+ # This displays per video and per image.
863
+ @st.cache_resource
864
+ def display_glossary_entity(k):
865
+ search_urls = {
866
+ "🚀🌌ArXiv": lambda k: f"/?q={quote(k)}", # this url plus query!
867
+ "🃏Analyst": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix)}", # this url plus query!
868
+ "📚PyCoder": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix2)}", # this url plus query!
869
+ "🔬JSCoder": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix3)}", # this url plus query!
870
+ "📖": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}",
871
+ "🔍": lambda k: f"https://www.google.com/search?q={quote(k)}",
872
+ "🔎": lambda k: f"https://www.bing.com/search?q={quote(k)}",
873
+ "🎥": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}",
874
+ "🐦": lambda k: f"https://twitter.com/search?q={quote(k)}",
875
+ }
876
+ links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
877
+ #st.markdown(f"{k} {links_md}", unsafe_allow_html=True)
878
+ st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
879
+
880
+ # Function to display the entire glossary in a grid format with links
881
+ @st.cache_resource
882
+ def display_glossary_grid(roleplaying_glossary):
883
+ search_urls = {
884
+ "🚀🌌ArXiv": lambda k: f"/?q={quote(k)}", # this url plus query!
885
+ "🃏Analyst": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix)}", # this url plus query!
886
+ "📚PyCoder": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix2)}", # this url plus query!
887
+ "🔬JSCoder": lambda k: f"/?q={quote(k)}-{quote(PromptPrefix3)}", # this url plus query!
888
+ "📖": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}",
889
+ "🔍": lambda k: f"https://www.google.com/search?q={quote(k)}",
890
+ "▶️": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}",
891
+ "🔎": lambda k: f"https://www.bing.com/search?q={quote(k)}",
892
+ "🎥": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}",
893
+ "🐦": lambda k: f"https://twitter.com/search?q={quote(k)}",
894
+ }
895
+
896
+ for category, details in roleplaying_glossary.items():
897
+ st.write(f"### {category}")
898
+ cols = st.columns(len(details)) # Create dynamic columns based on the number of games
899
+ #cols = st.columns(num_columns_text) # Create dynamic columns based on the number of games
900
+ for idx, (game, terms) in enumerate(details.items()):
901
+ with cols[idx]:
902
+ st.markdown(f"#### {game}")
903
+ for term in terms:
904
+ links_md = ' '.join([f"[{emoji}]({url(term)})" for emoji, url in search_urls.items()])
905
+ st.markdown(f"**{term}** <small>{links_md}</small>", unsafe_allow_html=True)
906
+
907
+
908
+ # ChatBot client chat completions ------------------------- !!
909
+ def process_text2(MODEL='gpt-4o-2024-05-13', text_input='What is 2+2 and what is an imaginary number'):
910
+ if text_input:
911
+ completion = client.chat.completions.create(
912
+ model=MODEL,
913
+ messages=st.session_state.messages
914
+ )
915
+ return_text = completion.choices[0].message.content
916
+ st.write("Assistant: " + return_text)
917
+ filename = generate_filename(text_input, "md")
918
+
919
+ create_and_save_file(return_text, file_type="md", prompt=text_input, is_image=False, should_save=True) # the new
920
+
921
+ #create_file(filename, text_input, return_text, should_save)
922
+ return return_text
923
+
924
+ @st.cache_resource
925
+ def get_table_download_link(file_path):
926
+
927
+ try:
928
+ #with open(file_path, 'r') as file:
929
+ #with open(file_path, 'r', encoding="unicode", errors="surrogateescape") as file:
930
+ with open(file_path, 'r', encoding='utf-8') as file:
931
+ data = file.read()
932
+
933
+ b64 = base64.b64encode(data.encode()).decode()
934
+ file_name = os.path.basename(file_path)
935
+ ext = os.path.splitext(file_name)[1] # get the file extension
936
+ if ext == '.txt':
937
+ mime_type = 'text/plain'
938
+ elif ext == '.py':
939
+ mime_type = 'text/plain'
940
+ elif ext == '.xlsx':
941
+ mime_type = 'text/plain'
942
+ elif ext == '.csv':
943
+ mime_type = 'text/plain'
944
+ elif ext == '.htm':
945
+ mime_type = 'text/html'
946
+ elif ext == '.md':
947
+ mime_type = 'text/markdown'
948
+ elif ext == '.wav':
949
+ mime_type = 'audio/wav'
950
+ else:
951
+ mime_type = 'application/octet-stream' # general binary data type
952
+ href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
953
+ return href
954
+ except:
955
+ return ''
956
+
957
+
958
+ @st.cache_resource
959
+ def create_zip_of_files(files): # ----------------------------------
960
+ zip_name = "Arxiv-Paper-Search-QA-RAG-Streamlit-Gradio-AP.zip"
961
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
962
+ for file in files:
963
+ zipf.write(file)
964
+ return zip_name
965
+
966
+ @st.cache_resource
967
+ def get_zip_download_link(zip_file):
968
+ with open(zip_file, 'rb') as f:
969
+ data = f.read()
970
+ b64 = base64.b64encode(data).decode()
971
+ href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
972
+ return href # ----------------------------------
973
+
974
+ def get_file():
975
+ st.write(st.session_state['file'])
976
+
977
+ def SaveFileTextClicked():
978
+ fileText = st.session_state.file_content_area
979
+ fileName = st.session_state.file_name_input
980
+ with open(fileName, 'w', encoding='utf-8') as file:
981
+ file.write(fileText)
982
+ st.markdown('Saved ' + fileName + '.')
983
+
984
+ def SaveFileNameClicked():
985
+ newFileName = st.session_state.file_name_input
986
+ oldFileName = st.session_state.filename
987
+ if (newFileName!=oldFileName):
988
+ os.rename(oldFileName, newFileName)
989
+ st.markdown('Renamed file ' + oldFileName + ' to ' + newFileName + '.')
990
+ newFileText = st.session_state.file_content_area
991
+ oldFileText = st.session_state.filetext
992
+
993
+
994
+ # Function to compare file sizes and delete duplicates
995
+ def compare_and_delete_files(files):
996
+ if not files:
997
+ st.warning("No files to compare.")
998
+ return
999
+
1000
+ # Dictionary to store file sizes and their paths
1001
+ file_sizes = {}
1002
+ for file in files:
1003
+ size = os.path.getsize(file)
1004
+ if size in file_sizes:
1005
+ file_sizes[size].append(file)
1006
+ else:
1007
+ file_sizes[size] = [file]
1008
+
1009
+ # Remove all but the latest file for each size group
1010
+ for size, paths in file_sizes.items():
1011
+ if len(paths) > 1:
1012
+ latest_file = max(paths, key=os.path.getmtime)
1013
+ for file in paths:
1014
+ if file != latest_file:
1015
+ os.remove(file)
1016
+ st.success(f"Deleted {file} as a duplicate.")
1017
+ st.rerun()
1018
+
1019
+ # Function to get file size
1020
+ def get_file_size(file_path):
1021
+ return os.path.getsize(file_path)
1022
+
1023
+ def FileSidebar():
1024
+
1025
+ # File Sidebar for files 🌐View, 📂Open, ▶️Run, and 🗑Delete per file
1026
+ all_files = glob.glob("*.md")
1027
+ all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
1028
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by filename length which puts similar prompts together - consider making date and time of file optional.
1029
+
1030
+ # ⬇️ Download
1031
+ Files1, Files2 = st.sidebar.columns(2)
1032
+ with Files1:
1033
+ if st.button("🗑 Delete All"):
1034
+ for file in all_files:
1035
+ os.remove(file)
1036
+ st.rerun()
1037
+ with Files2:
1038
+ if st.button("⬇️ Download"):
1039
+ zip_file = create_zip_of_files(all_files)
1040
+ st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
1041
+ file_contents=''
1042
+ file_name=''
1043
+ next_action=''
1044
+
1045
+ # Add files 🌐View, 📂Open, ▶️Run, and 🗑Delete per file
1046
+ for file in all_files:
1047
+ col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed
1048
+ with col1:
1049
+ if st.button("🌐", key="md_"+file): # md emoji button
1050
+ file_contents = load_file(file)
1051
+ file_name=file
1052
+ next_action='md'
1053
+ st.session_state['next_action'] = next_action
1054
+ with col2:
1055
+ st.markdown(get_table_download_link(file), unsafe_allow_html=True)
1056
+ with col3:
1057
+ if st.button("📂", key="open_"+file): # open emoji button
1058
+ file_contents = load_file(file)
1059
+ file_name=file
1060
+ next_action='open'
1061
+ st.session_state['lastfilename'] = file
1062
+ st.session_state['filename'] = file
1063
+ st.session_state['filetext'] = file_contents
1064
+ st.session_state['next_action'] = next_action
1065
+ with col4:
1066
+ if st.button("▶️", key="read_"+file): # search emoji button
1067
+ file_contents = load_file(file)
1068
+ file_name=file
1069
+ next_action='search'
1070
+ st.session_state['next_action'] = next_action
1071
+ with col5:
1072
+ if st.button("🗑", key="delete_"+file):
1073
+ os.remove(file)
1074
+ file_name=file
1075
+ st.rerun()
1076
+ next_action='delete'
1077
+ st.session_state['next_action'] = next_action
1078
+
1079
+
1080
+ # 🚩File duplicate detector - useful to prune and view all. Pruning works well by file size detection of two similar and flags the duplicate.
1081
+ file_sizes = [get_file_size(file) for file in all_files]
1082
+ previous_size = None
1083
+ st.sidebar.title("File Operations")
1084
+ for file, size in zip(all_files, file_sizes):
1085
+ duplicate_flag = "🚩" if size == previous_size else ""
1086
+ with st.sidebar.expander(f"File: {file} {duplicate_flag}"):
1087
+ st.text(f"Size: {size} bytes")
1088
+
1089
+ if st.button("View", key=f"view_{file}"):
1090
+ try:
1091
+ with open(file, "r", encoding='utf-8') as f: # Ensure the file is read with UTF-8 encoding
1092
+ file_content = f.read()
1093
+ st.code(file_content, language="markdown")
1094
+ except UnicodeDecodeError:
1095
+ st.error("Failed to decode the file with UTF-8. It might contain non-UTF-8 encoded characters.")
1096
+
1097
+ if st.button("Delete", key=f"delete3_{file}"):
1098
+ os.remove(file)
1099
+ st.rerun()
1100
+ previous_size = size # Update previous size for the next iteration
1101
+
1102
+ if len(file_contents) > 0:
1103
+ if next_action=='open': # For "open", prep session state if it hasn't been yet
1104
+ if 'lastfilename' not in st.session_state:
1105
+ st.session_state['lastfilename'] = ''
1106
+ if 'filename' not in st.session_state:
1107
+ st.session_state['filename'] = ''
1108
+ if 'filetext' not in st.session_state:
1109
+ st.session_state['filetext'] = ''
1110
+ open1, open2 = st.columns(spec=[.8,.2])
1111
+
1112
+ with open1:
1113
+ # Use onchange functions to autoexecute file name and text save functions.
1114
+ file_name_input = st.text_input(key='file_name_input', on_change=SaveFileNameClicked, label="File Name:",value=file_name )
1115
+ file_content_area = st.text_area(key='file_content_area', on_change=SaveFileTextClicked, label="File Contents:", value=file_contents, height=300)
1116
+
1117
+ ShowButtons = False # Having buttons is redundant. They work but if on change event seals the deal so be it - faster save is less impedence - less context breaking
1118
+ if ShowButtons:
1119
+ bp1,bp2 = st.columns([.5,.5])
1120
+ with bp1:
1121
+ if st.button(label='💾 Save Name'):
1122
+ SaveFileNameClicked()
1123
+ with bp2:
1124
+ if st.button(label='💾 Save File'):
1125
+ SaveFileTextClicked()
1126
+
1127
+ new_file_content_area = st.session_state['file_content_area']
1128
+ if new_file_content_area != file_contents:
1129
+ st.markdown(new_file_content_area) #changed
1130
+
1131
+ if next_action=='search':
1132
+ filesearch = PromptPrefix + file_contents
1133
+ st.markdown(filesearch)
1134
+ process_text(filesearch)
1135
+
1136
+ if next_action=='md':
1137
+ st.markdown(file_contents)
1138
+ SpeechSynthesis(file_contents)
1139
+
1140
+ buttonlabel = '🔍Run'
1141
+ if st.button(key='Runmd', label = buttonlabel):
1142
+ MODEL = "gpt-4o-2024-05-13"
1143
+ openai.api_key = os.getenv('OPENAI_API_KEY')
1144
+ openai.organization = os.getenv('OPENAI_ORG_ID')
1145
+ client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
1146
+ st.session_state.messages.append({"role": "user", "content": transcript})
1147
+ with st.chat_message("user"):
1148
+ st.markdown(transcript)
1149
+ with st.chat_message("assistant"):
1150
+ completion = client.chat.completions.create(
1151
+ model=MODEL,
1152
+ messages = st.session_state.messages,
1153
+ stream=True
1154
+ )
1155
+ response = process_text2(text_input=prompt)
1156
+ st.session_state.messages.append({"role": "assistant", "content": response})
1157
+ #try:
1158
+ #search_glossary(file_contents)
1159
+ #except:
1160
+ #st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
1161
+
1162
+ if next_action=='search':
1163
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
1164
+ user_prompt = file_contents
1165
+ filesearch = PromptPrefix2 + file_content_area
1166
+ st.markdown(filesearch)
1167
+ if st.button(key='rerun', label='🔍Re-Code' ):
1168
+ search_arxiv(filesearch)
1169
+
1170
+ # ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
1171
+
1172
+ # Randomly select a title
1173
+ titles = [
1174
+ "🧠🎭 Semantic Symphonies 🎹🎸 & Episodic Encores 🥁🎻",
1175
+ "🌌🎼 AI Rhythms 🎺🎷 of Memory Lane 🏰",
1176
+ "🎭🎉 Cognitive Crescendos 🎹💃 & Neural Harmonies 🎸🎤",
1177
+ "🧠🎺 Mnemonic Melodies 🎷 & Synaptic Grooves 🥁",
1178
+ "🎼🎸 Straight Outta Cognition ⚙️",
1179
+ "🥁🎻 Jazzy 🎷 Jambalaya 🍛 of AI Memories",
1180
+ "🏰 Semantic 🧠 Soul 🙌 & Episodic 📜 Essence",
1181
+ "🥁🎻 The Music Of AI's Mind 🧠🎭🎉"
1182
+ ]
1183
+ selected_title = random.choice(titles)
1184
+ st.markdown(f"**{selected_title}**")
1185
+
1186
+ FileSidebar()
1187
+
1188
+
1189
+ # ---- Art Card Sidebar with Random Selection of image:
1190
+ def get_image_as_base64(url):
1191
+ response = requests.get(url)
1192
+ if response.status_code == 200:
1193
+ # Convert the image to base64
1194
+ return base64.b64encode(response.content).decode("utf-8")
1195
+ else:
1196
+ return None
1197
+
1198
+ def create_download_link(filename, base64_str):
1199
+ href = f'<a href="data:file/png;base64,{base64_str}" download="{filename}">Download Image</a>'
1200
+ return href
1201
+
1202
+ @st.cache_resource
1203
+ def SideBarImageShuffle():
1204
+ image_urls = [
1205
+ "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/cfhJIasuxLkT5fnaAE6Gj.png",
1206
+ "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/UMo4oWNrrd6RLLzsFxQAi.png",
1207
+ "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/o_EH4cTs5Qxiu7xTZw9I3.png",
1208
+ "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/cmCZ5RTdSx3usMm7MwwWK.png",
1209
+ ]
1210
+
1211
+ selected_image_url = random.choice(image_urls)
1212
+ selected_image_base64 = get_image_as_base64(selected_image_url)
1213
+ if selected_image_base64 is not None:
1214
+ with st.sidebar:
1215
+ st.markdown(f"![image](data:image/png;base64,{selected_image_base64})")
1216
+ else:
1217
+ st.sidebar.write("Failed to load the image.")
1218
+
1219
+ ShowSideImages=False
1220
+ if ShowSideImages:
1221
+ SideBarImageShuffle()
1222
+
1223
+
1224
+
1225
+ # Scoring for feedback: ----------------------------------------------------- emoji
1226
+
1227
+ # Ensure the directory for storing scores exists
1228
+ score_dir = "scores"
1229
+ os.makedirs(score_dir, exist_ok=True)
1230
+
1231
+ # Function to generate a unique key for each button, including an emoji
1232
+ def generate_key(label, header, idx):
1233
+ return f"{header}_{label}_{idx}_key"
1234
+
1235
+ # Function to increment and save score
1236
+ def update_score(key, increment=1):
1237
+ score_file = os.path.join(score_dir, f"{key}.json")
1238
+ if os.path.exists(score_file):
1239
+ with open(score_file, "r") as file:
1240
+ score_data = json.load(file)
1241
+ else:
1242
+ score_data = {"clicks": 0, "score": 0}
1243
+ score_data["clicks"] += increment
1244
+ score_data["score"] += increment
1245
+ with open(score_file, "w") as file:
1246
+ json.dump(score_data, file)
1247
+ return score_data["score"]
1248
+
1249
+ # Function to load score
1250
+ def load_score(key):
1251
+ score_file = os.path.join(score_dir, f"{key}.json")
1252
+ if os.path.exists(score_file):
1253
+ with open(score_file, "r") as file:
1254
+ score_data = json.load(file)
1255
+ return score_data["score"]
1256
+ return 0
1257
+
1258
+
1259
+
1260
+ # Function to display the glossary in a structured format
1261
+ def display_glossary(glossary, area):
1262
+ if area in glossary:
1263
+ st.subheader(f"📘 Glossary for {area}")
1264
+ for game, terms in glossary[area].items():
1265
+ st.markdown(f"### {game}")
1266
+ for idx, term in enumerate(terms, start=1):
1267
+ st.write(f"{idx}. {term}")
1268
+
1269
+ # Image Prompt
1270
+ def display_images_and_wikipedia_summaries(num_columns=4):
1271
+ image_files = [f for f in os.listdir('.') if f.endswith('.png')]
1272
+ if not image_files:
1273
+ st.write("No PNG images found in the current directory.")
1274
+ return
1275
+
1276
+ image_files_sorted = sorted(image_files, key=lambda x: len(x.split('.')[0]))
1277
+
1278
+ cols = st.columns(num_columns) # Use specified num_columns for layout
1279
+ col_index = 0 # Initialize column index for cycling through columns
1280
+
1281
+ errored = False
1282
+ for image_file in image_files_sorted:
1283
+ with cols[col_index % num_columns]: # Cycle through columns based on num_columns
1284
+ try:
1285
+ image = Image.open(image_file)
1286
+ #st.image(image, caption=image_file, use_column_width=True)
1287
+ st.image(image, use_column_width=True)
1288
+ k = image_file.split('.')[0] # Assumes keyword is the file name without extension
1289
+ display_glossary_entity(k)
1290
+
1291
+ # Add text input for image file
1292
+ #image_text_input = st.text_input(f"Image Prompt for {image_file}", key=f"image_prompt_{image_file}")
1293
+ image_text_input = st.text_input(f"Image Prompt:", key=f"image_prompt_{image_file}")
1294
+ #image_text_input = st.text_input(key=f"image_prompt_{image_file}")
1295
+ if (len(image_text_input) > 0):
1296
+ #image_response = process_image(image, image_text_input)
1297
+ image_response = process_image(image_file, image_text_input)
1298
+
1299
+ with st.chat_message(name="ai", avatar="🦖"):
1300
+ st.markdown(image_response)
1301
+ except:
1302
+ errored = True
1303
+
1304
+ col_index += 1 # Increment to move to the next column in the next iteration
1305
+
1306
+ def display_videos_and_links(num_columns):
1307
+ #video_files = [f for f in os.listdir('.') if f.endswith('.mp4')]
1308
+ #video_files = [f for f in os.listdir('.') if f.endswith('.webm')]
1309
+ video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
1310
+
1311
+ if not video_files:
1312
+ st.write("No MP4 videos found in the current directory.")
1313
+ return
1314
+
1315
+ video_files_sorted = sorted(video_files, key=lambda x: len(x.split('.')[0]))
1316
+ cols = st.columns(num_columns) # Define num_columns columns outside the loop
1317
+ col_index = 0 # Initialize column index
1318
+
1319
+ for video_file in video_files_sorted:
1320
+ with cols[col_index % num_columns]: # Use modulo to alternate between columns
1321
+ k = video_file.split('.')[0] # Assumes keyword is the file name without extension
1322
+ st.video(video_file, format='video/mp4', start_time=0)
1323
+ display_glossary_entity(k)
1324
+
1325
+ # Add text input for video file
1326
+ video_text_input = st.text_input(f"Video Prompt for {video_file}", key=f"video_prompt_{video_file}")
1327
+ if video_text_input:
1328
+ try:
1329
+ seconds_per_frame = 10
1330
+ process_video(video_file, seconds_per_frame)
1331
+ except ValueError:
1332
+ st.error(f"Invalid input for seconds per frame: {video_text_input}. Please enter a valid number.")
1333
+
1334
+ col_index += 1 # Increment column index to place the next video in the next column
1335
+
1336
+ def get_all_query_params(key):
1337
+ return st.query_params().get(key, [])
1338
+
1339
+ def clear_query_params():
1340
+ st.query_params()
1341
+
1342
+ # Function to display content or image based on a query
1343
+ #@st.cache_resource
1344
+ def display_content_or_image(query):
1345
+ for category, terms in transhuman_glossary.items():
1346
+ for term in terms:
1347
+ if query.lower() in term.lower():
1348
+ st.subheader(f"Found in {category}:")
1349
+ st.write(term)
1350
+ return True # Return after finding and displaying the first match
1351
+ image_dir = "images" # Example directory where images are stored
1352
+ image_path = f"{image_dir}/{query}.png" # Construct image path with query
1353
+ if os.path.exists(image_path):
1354
+ st.image(image_path, caption=f"Image for {query}")
1355
+ return True
1356
+ st.warning("No matching content or image found.")
1357
+ return False
1358
+
1359
+ game_emojis = {
1360
+ "Dungeons and Dragons": "🐉",
1361
+ "Call of Cthulhu": "🐙",
1362
+ "GURPS": "🎲",
1363
+ "Pathfinder": "🗺️",
1364
+ "Kindred of the East": "🌅",
1365
+ "Changeling": "🍃",
1366
+ }
1367
+
1368
+ topic_emojis = {
1369
+ "Core Rulebooks": "📚",
1370
+ "Maps & Settings": "🗺️",
1371
+ "Game Mechanics & Tools": "⚙️",
1372
+ "Monsters & Adversaries": "👹",
1373
+ "Campaigns & Adventures": "📜",
1374
+ "Creatives & Assets": "🎨",
1375
+ "Game Master Resources": "🛠️",
1376
+ "Lore & Background": "📖",
1377
+ "Character Development": "🧍",
1378
+ "Homebrew Content": "🔧",
1379
+ "General Topics": "🌍",
1380
+ }
1381
+
1382
+ # Adjusted display_buttons_with_scores function
1383
+ def display_buttons_with_scores(num_columns_text):
1384
+ for category, games in roleplaying_glossary.items():
1385
+ category_emoji = topic_emojis.get(category, "🔍") # Default to search icon if no match
1386
+ st.markdown(f"## {category_emoji} {category}")
1387
+ for game, terms in games.items():
1388
+ game_emoji = game_emojis.get(game, "🎮") # Default to generic game controller if no match
1389
+ for term in terms:
1390
+ key = f"{category}_{game}_{term}".replace(' ', '_').lower()
1391
+ score = load_score(key)
1392
+ if st.button(f"{game_emoji} {category} {game} {term} {score}", key=key):
1393
+ newscore = update_score(key.replace('?',''))
1394
+ query_prefix = f"{category_emoji} {game_emoji} ** {category} - {game} - {term} - **"
1395
+ st.markdown("Scored " + query_prefix + ' with score ' + str(newscore) + '.')
1396
+
1397
+
1398
+ def get_all_query_params(key):
1399
+ return st.query_params().get(key, [])
1400
+
1401
+ def clear_query_params():
1402
+ st.query_params()
1403
+
1404
+
1405
+
1406
+
1407
+ # 3. Stream Llama Response
1408
+ @st.cache_resource
1409
+ def StreamLLMChatResponse(prompt):
1410
+ try:
1411
+ endpoint_url = API_URL
1412
+ hf_token = API_KEY
1413
+ st.write('Running client ' + endpoint_url)
1414
+ client = InferenceClient(endpoint_url, token=hf_token)
1415
+ gen_kwargs = dict(
1416
+ max_new_tokens=512,
1417
+ top_k=30,
1418
+ top_p=0.9,
1419
+ temperature=0.2,
1420
+ repetition_penalty=1.02,
1421
+ stop_sequences=["\nUser:", "<|endoftext|>", "</s>"],
1422
+ )
1423
+ stream = client.text_generation(prompt, stream=True, details=True, **gen_kwargs)
1424
+ report=[]
1425
+ res_box = st.empty()
1426
+ collected_chunks=[]
1427
+ collected_messages=[]
1428
+ allresults=''
1429
+ for r in stream:
1430
+ if r.token.special:
1431
+ continue
1432
+ if r.token.text in gen_kwargs["stop_sequences"]:
1433
+ break
1434
+ collected_chunks.append(r.token.text)
1435
+ chunk_message = r.token.text
1436
+ collected_messages.append(chunk_message)
1437
+ try:
1438
+ report.append(r.token.text)
1439
+ if len(r.token.text) > 0:
1440
+ result="".join(report).strip()
1441
+ res_box.markdown(f'*{result}*')
1442
+
1443
+ except:
1444
+ st.write('Stream llm issue')
1445
+ SpeechSynthesis(result)
1446
+ return result
1447
+ except:
1448
+ st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
1449
+
1450
+ # 4. Run query with payload
1451
+ def query(payload):
1452
+ response = requests.post(API_URL, headers=headers, json=payload)
1453
+ st.markdown(response.json())
1454
+ return response.json()
1455
+
1456
+ def get_output(prompt):
1457
+ return query({"inputs": prompt})
1458
+
1459
+ # 6. Speech transcription via OpenAI service
1460
+ def transcribe_audio(openai_key, file_path, model):
1461
+ openai.api_key = openai_key
1462
+ OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
1463
+ headers = {
1464
+ "Authorization": f"Bearer {openai_key}",
1465
+ }
1466
+ with open(file_path, 'rb') as f:
1467
+ data = {'file': f}
1468
+ st.write('STT transcript ' + OPENAI_API_URL)
1469
+ response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
1470
+ if response.status_code == 200:
1471
+ st.write(response.json())
1472
+ chatResponse = chat_with_model(response.json().get('text'), '') # *************************************
1473
+ transcript = response.json().get('text')
1474
+ filename = generate_filename(transcript, 'txt')
1475
+ response = chatResponse
1476
+ user_prompt = transcript
1477
+ create_file(filename, user_prompt, response, should_save)
1478
+ return transcript
1479
+ else:
1480
+ st.write(response.json())
1481
+ st.error("Error in API call.")
1482
+ return None
1483
+
1484
+ # 7. Auto stop on silence audio control for recording WAV files
1485
+ def save_and_play_audio(audio_recorder):
1486
+ audio_bytes = audio_recorder(key='audio_recorder')
1487
+ if audio_bytes:
1488
+ filename = generate_filename("Recording", "wav")
1489
+ with open(filename, 'wb') as f:
1490
+ f.write(audio_bytes)
1491
+ st.audio(audio_bytes, format="audio/wav")
1492
+ return filename
1493
+ return None
1494
+
1495
+ # 8. File creator that interprets type and creates output file for text, markdown and code
1496
+ def create_file(filename, prompt, response, should_save=True):
1497
+ if not should_save:
1498
+ return
1499
+ base_filename, ext = os.path.splitext(filename)
1500
+ if ext in ['.txt', '.htm', '.md']:
1501
+
1502
+
1503
+ with open(f"{base_filename}.md", 'w', encoding='utf-8') as file:
1504
+ file.write(response)
1505
+
1506
+ # Code Interpreter
1507
+ #has_python_code = re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response)
1508
+ #has_python_code = bool(re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response))
1509
+ #if has_python_code:
1510
+ # python_code = re.findall(r"```python([\s\S]*?)```", response)[0].strip()
1511
+ # with open(f"{base_filename}-Code.py", 'w') as file:
1512
+ # file.write(python_code)
1513
+ # with open(f"{base_filename}.md", 'w') as file:
1514
+ # content = prompt.strip() + '\r\n' + response
1515
+ # file.write(content)
1516
+
1517
+ def truncate_document(document, length):
1518
+ return document[:length]
1519
+ def divide_document(document, max_length):
1520
+ return [document[i:i+max_length] for i in range(0, len(document), max_length)]
1521
+
1522
+ def CompressXML(xml_text):
1523
+ root = ET.fromstring(xml_text)
1524
+ for elem in list(root.iter()):
1525
+ if isinstance(elem.tag, str) and 'Comment' in elem.tag:
1526
+ elem.parent.remove(elem)
1527
+ return ET.tostring(root, encoding='unicode', method="xml")
1528
+
1529
+ # 10. Read in and provide UI for past files
1530
+ @st.cache_resource
1531
+ def read_file_content(file,max_length):
1532
+ if file.type == "application/json":
1533
+ content = json.load(file)
1534
+ return str(content)
1535
+ elif file.type == "text/html" or file.type == "text/htm":
1536
+ content = BeautifulSoup(file, "html.parser")
1537
+ return content.text
1538
+ elif file.type == "application/xml" or file.type == "text/xml":
1539
+ tree = ET.parse(file)
1540
+ root = tree.getroot()
1541
+ xml = CompressXML(ET.tostring(root, encoding='unicode'))
1542
+ return xml
1543
+ elif file.type == "text/markdown" or file.type == "text/md":
1544
+ md = mistune.create_markdown()
1545
+ content = md(file.read().decode())
1546
+ return content
1547
+ elif file.type == "text/plain":
1548
+ return file.getvalue().decode()
1549
+ else:
1550
+ return ""
1551
+
1552
+
1553
+ # 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
1554
+ @st.cache_resource
1555
+ def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'): # gpt-4-0125-preview gpt-3.5-turbo
1556
+ model = model_choice
1557
+ conversation = [{'role': 'system', 'content': 'You are a coder, inventor, and writer of quotes on wisdom as a helpful expert in all fields of health, math, development and AI using python.'}]
1558
+ conversation.append({'role': 'user', 'content': prompt})
1559
+ if len(document_section)>0:
1560
+ conversation.append({'role': 'assistant', 'content': document_section})
1561
+ start_time = time.time()
1562
+ report = []
1563
+ res_box = st.empty()
1564
+ collected_chunks = []
1565
+ collected_messages = []
1566
+
1567
+ for chunk in openai.ChatCompletion.create(model=model_choice, messages=conversation, temperature=0.5, stream=True):
1568
+ collected_chunks.append(chunk)
1569
+ chunk_message = chunk['choices'][0]['delta']
1570
+ collected_messages.append(chunk_message)
1571
+ content=chunk["choices"][0].get("delta",{}).get("content")
1572
+ try:
1573
+ report.append(content)
1574
+ if len(content) > 0:
1575
+ result = "".join(report).strip()
1576
+ res_box.markdown(f'*{result}*')
1577
+ except:
1578
+ st.write(' ')
1579
+ full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
1580
+ st.write("Elapsed time:")
1581
+ st.write(time.time() - start_time)
1582
+ return full_reply_content
1583
+
1584
+ # 11.1 45
1585
+ @st.cache_resource
1586
+ def chat_with_model45(prompt, document_section='', model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
1587
+ model = model_choice
1588
+ conversation = [{'role': 'system', 'content': 'You are a coder, inventor, and writer of quotes on wisdom as a helpful expert in all fields of health, math, development and AI using python.'}]
1589
+ conversation.append({'role': 'user', 'content': prompt})
1590
+ if len(document_section)>0:
1591
+ conversation.append({'role': 'assistant', 'content': document_section})
1592
+ start_time = time.time()
1593
+ report = []
1594
+ res_box = st.empty()
1595
+ collected_chunks = []
1596
+ collected_messages = []
1597
+
1598
+ for chunk in openai.ChatCompletion.create(model=model_choice, messages=conversation, temperature=0.5, stream=True):
1599
+ collected_chunks.append(chunk)
1600
+ chunk_message = chunk['choices'][0]['delta']
1601
+ collected_messages.append(chunk_message)
1602
+ content=chunk["choices"][0].get("delta",{}).get("content")
1603
+ try:
1604
+ report.append(content)
1605
+ if len(content) > 0:
1606
+ result = "".join(report).strip()
1607
+ res_box.markdown(f'*{result}*')
1608
+ except:
1609
+ st.write(' ')
1610
+ full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
1611
+ st.write("Elapsed time:")
1612
+ st.write(time.time() - start_time)
1613
+ return full_reply_content
1614
+
1615
+ @st.cache_resource
1616
+ def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'): # gpt-4-0125-preview gpt-3.5-turbo
1617
+ #def chat_with_file_contents(prompt, file_content, model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
1618
+ conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
1619
+ conversation.append({'role': 'user', 'content': prompt})
1620
+ if len(file_content)>0:
1621
+ conversation.append({'role': 'assistant', 'content': file_content})
1622
+ response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
1623
+ return response['choices'][0]['message']['content']
1624
+
1625
+
1626
+ def extract_mime_type(file):
1627
+ if isinstance(file, str):
1628
+ pattern = r"type='(.*?)'"
1629
+ match = re.search(pattern, file)
1630
+ if match:
1631
+ return match.group(1)
1632
+ else:
1633
+ raise ValueError(f"Unable to extract MIME type from {file}")
1634
+ elif isinstance(file, streamlit.UploadedFile):
1635
+ return file.type
1636
+ else:
1637
+ raise TypeError("Input should be a string or a streamlit.UploadedFile object")
1638
+
1639
+ def extract_file_extension(file):
1640
+ # get the file name directly from the UploadedFile object
1641
+ file_name = file.name
1642
+ pattern = r".*?\.(.*?)$"
1643
+ match = re.search(pattern, file_name)
1644
+ if match:
1645
+ return match.group(1)
1646
+ else:
1647
+ raise ValueError(f"Unable to extract file extension from {file_name}")
1648
+
1649
+ # Normalize input as text from PDF and other formats
1650
+ @st.cache_resource
1651
+ def pdf2txt(docs):
1652
+ text = ""
1653
+ for file in docs:
1654
+ file_extension = extract_file_extension(file)
1655
+ st.write(f"File type extension: {file_extension}")
1656
+ if file_extension.lower() in ['py', 'txt', 'html', 'htm', 'xml', 'json']:
1657
+ text += file.getvalue().decode('utf-8')
1658
+ elif file_extension.lower() == 'pdf':
1659
+ from PyPDF2 import PdfReader
1660
+ pdf = PdfReader(BytesIO(file.getvalue()))
1661
+ for page in range(len(pdf.pages)):
1662
+ text += pdf.pages[page].extract_text() # new PyPDF2 syntax
1663
+ return text
1664
+
1665
+ def txt2chunks(text):
1666
+ text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len)
1667
+ return text_splitter.split_text(text)
1668
+
1669
+ # Vector Store using FAISS
1670
+ @st.cache_resource
1671
+ def vector_store(text_chunks):
1672
+ embeddings = OpenAIEmbeddings(openai_api_key=key)
1673
+ return FAISS.from_texts(texts=text_chunks, embedding=embeddings)
1674
+
1675
+ # Memory and Retrieval chains
1676
+ @st.cache_resource
1677
+ def get_chain(vectorstore):
1678
+ llm = ChatOpenAI()
1679
+ memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
1680
+ return ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory)
1681
+
1682
+ def process_user_input(user_question):
1683
+ response = st.session_state.conversation({'question': user_question})
1684
+ st.session_state.chat_history = response['chat_history']
1685
+ for i, message in enumerate(st.session_state.chat_history):
1686
+ template = user_template if i % 2 == 0 else bot_template
1687
+ st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
1688
+ filename = generate_filename(user_question, 'txt')
1689
+ response = message.content
1690
+ user_prompt = user_question
1691
+
1692
+ create_and_save_file(response, file_type="md", prompt=user_prompt, is_image=False, should_save=True) # the new
1693
+
1694
+ #create_file(filename, user_prompt, response, should_save)
1695
+
1696
+ def divide_prompt(prompt, max_length):
1697
+ words = prompt.split()
1698
+ chunks = []
1699
+ current_chunk = []
1700
+ current_length = 0
1701
+ for word in words:
1702
+ if len(word) + current_length <= max_length:
1703
+ current_length += len(word) + 1
1704
+ current_chunk.append(word)
1705
+ else:
1706
+ chunks.append(' '.join(current_chunk))
1707
+ current_chunk = [word]
1708
+ current_length = len(word)
1709
+ chunks.append(' '.join(current_chunk))
1710
+ return chunks
1711
+
1712
+
1713
+
1714
+ API_URL_IE = f'https://tonpixzfvq3791u9.us-east-1.aws.endpoints.huggingface.cloud'
1715
+ API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small.en"
1716
+ MODEL2 = "openai/whisper-small.en"
1717
+ MODEL2_URL = "https://huggingface.co/openai/whisper-small.en"
1718
+ HF_KEY = st.secrets['HF_KEY']
1719
+ headers = {
1720
+ "Authorization": f"Bearer {HF_KEY}",
1721
+ "Content-Type": "audio/wav"
1722
+ }
1723
+
1724
+ def query(filename):
1725
+ with open(filename, "rb") as f:
1726
+ data = f.read()
1727
+ response = requests.post(API_URL_IE, headers=headers, data=data)
1728
+ return response.json()
1729
+
1730
+ def generate_filename(prompt, file_type):
1731
+ central = pytz.timezone('US/Central')
1732
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
1733
+ replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
1734
+ safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
1735
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
1736
+
1737
+ # 15. Audio recorder to Wav file
1738
+ def save_and_play_audio(audio_recorder):
1739
+ audio_bytes = audio_recorder()
1740
+ if audio_bytes:
1741
+ filename = generate_filename("Recording", "wav")
1742
+ with open(filename, 'wb') as f:
1743
+ f.write(audio_bytes)
1744
+ st.audio(audio_bytes, format="audio/wav")
1745
+ return filename
1746
+
1747
+ # 16. Speech transcription to file output
1748
+ def transcribe_audio(filename):
1749
+ output = query(filename)
1750
+ return output
1751
+
1752
+
1753
+ # Sample function to demonstrate a response, replace with your own logic
1754
+ def StreamMedChatResponse(topic):
1755
+ st.write(f"Showing resources or questions related to: {topic}")
1756
+
1757
+ # Function to encode file to base64
1758
+ def get_base64_encoded_file(file_path):
1759
+ with open(file_path, "rb") as file:
1760
+ return base64.b64encode(file.read()).decode()
1761
+
1762
+ # Function to create a download link
1763
+ def get_audio_download_link(file_path):
1764
+ base64_file = get_base64_encoded_file(file_path)
1765
+ return f'<a href="data:file/wav;base64,{base64_file}" download="{os.path.basename(file_path)}">⬇️ Download Audio</a>'
1766
+
1767
+
1768
+
1769
+
1770
+
1771
+ GiveFeedback=False
1772
+ if GiveFeedback:
1773
+ with st.expander("Give your feedback 👍", expanded=False):
1774
+ feedback = st.radio("Step 8: Give your feedback", ("👍 Upvote", "👎 Downvote"))
1775
+ if feedback == "👍 Upvote":
1776
+ st.write("You upvoted 👍. Thank you for your feedback!")
1777
+ else:
1778
+ st.write("You downvoted 👎. Thank you for your feedback!")
1779
+ load_dotenv()
1780
+ st.write(css, unsafe_allow_html=True)
1781
+ st.header("Chat with documents :books:")
1782
+ user_question = st.text_input("Ask a question about your documents:")
1783
+ if user_question:
1784
+ process_user_input(user_question)
1785
+ with st.sidebar:
1786
+ st.subheader("Your documents")
1787
+ docs = st.file_uploader("import documents", accept_multiple_files=True)
1788
+ with st.spinner("Processing"):
1789
+ raw = pdf2txt(docs)
1790
+ if len(raw) > 0:
1791
+ length = str(len(raw))
1792
+ text_chunks = txt2chunks(raw)
1793
+ vectorstore = vector_store(text_chunks)
1794
+ st.session_state.conversation = get_chain(vectorstore)
1795
+ st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
1796
+ filename = generate_filename(raw, 'txt')
1797
+ create_file(filename, raw, '', should_save)
1798
+
1799
+ # ⚙️q= Run ArXiv search from query parameters
1800
+ try:
1801
+ query_params = st.query_params
1802
+ query = (query_params.get('q') or query_params.get('query') or [''])
1803
+ if len(query) > 1:
1804
+ #result = search_arxiv(query)
1805
+ #result2 = search_glossary(result)
1806
+
1807
+ filesearch = PromptPrefix + query
1808
+ st.markdown(filesearch)
1809
+ process_text(filesearch)
1810
+ except:
1811
+ st.markdown(' ')
1812
+
1813
+ if 'action' in st.query_params:
1814
+ action = st.query_params()['action'][0] # Get the first (or only) 'action' parameter
1815
+ if action == 'show_message':
1816
+ st.success("Showing a message because 'action=show_message' was found in the URL.")
1817
+ elif action == 'clear':
1818
+ clear_query_params()
1819
+ #st.rerun()
1820
+
1821
+ if 'query' in st.query_params:
1822
+ query = st.query_params['query'][0] # Get the query parameter
1823
+ # Display content or image based on the query
1824
+ display_content_or_image(query)
1825
+
1826
+ def transcribe_canary(filename):
1827
+ from gradio_client import Client
1828
+
1829
+ client = Client("https://awacke1-speech-recognition-canary-nvidiat4.hf.space/")
1830
+ result = client.predict(
1831
+ filename, # filepath in 'parameter_5' Audio component
1832
+ "English", # Literal['English', 'Spanish', 'French', 'German'] in 'Input audio is spoken in:' Dropdown component
1833
+ "English", # Literal['English', 'Spanish', 'French', 'German'] in 'Transcribe in language:' Dropdown component
1834
+ True, # bool in 'Punctuation & Capitalization in transcript?' Checkbox component
1835
+ api_name="/transcribe"
1836
+ )
1837
+ st.write(result)
1838
+ return result
1839
+
1840
+
1841
+ def transcribe_audio(file_path, model):
1842
+ key = os.getenv('OPENAI_API_KEY')
1843
+ headers = {
1844
+ "Authorization": f"Bearer {key}",
1845
+ }
1846
+ with open(file_path, 'rb') as f:
1847
+ data = {'file': f}
1848
+ st.write("Read file {file_path}", file_path)
1849
+ OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
1850
+ response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
1851
+ if response.status_code == 200:
1852
+ st.write(response.json())
1853
+ chatResponse = chat_with_model(response.json().get('text'), '') # *************************************
1854
+ transcript = response.json().get('text')
1855
+ #st.write('Responses:')
1856
+ #st.write(chatResponse)
1857
+ filename = generate_filename(transcript, 'txt')
1858
+ #create_file(filename, transcript, chatResponse)
1859
+ response = chatResponse
1860
+ user_prompt = transcript
1861
+ create_file(filename, user_prompt, response, should_save)
1862
+ return transcript
1863
+ else:
1864
+ st.write(response.json())
1865
+ st.error("Error in API call.")
1866
+ return None
1867
+
1868
+
1869
+ def transcribe_whisperLTurbo(filename):
1870
+ client = Client("hf-audio/whisper-large-v3-turbo")
1871
+ result = client.predict(
1872
+ inputs=handle_file('https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav'),
1873
+ task="transcribe",
1874
+ api_name="/predict"
1875
+ )
1876
+ st.write(result)
1877
+ return result
1878
+
1879
+ # Transcript to arxiv and client chat completion ------------------------- !!
1880
+ filename = save_and_play_audio(audio_recorder)
1881
+ if filename is not None: # whisper1
1882
+ try:
1883
+ transcript = transcribe_audio(filename, "whisper-1")
1884
+ st.markdown(transcript)
1885
+ result = search_arxiv(transcript)
1886
+ with st.chat_message("user"):
1887
+ st.markdown(transcript)
1888
+ st.session_state.messages.append({"role": "user", "content": transcript})
1889
+ with st.chat_message("assistant"):
1890
+ st.markdown(result)
1891
+ st.session_state.messages.append({"role": "assistant", "content": result})
1892
+ except:
1893
+ st.write(' ')
1894
+ filename = None
1895
+
1896
+
1897
+ # Scholary ArXiV Search ------------------------- !!
1898
+ session_state = {}
1899
+ if "search_queries" not in session_state:
1900
+ session_state["search_queries"] = []
1901
+
1902
+ example_input = st.text_input("AI Search ArXiV Scholarly Articles", value=session_state["search_queries"][-1] if session_state["search_queries"] else "")
1903
+
1904
+ if example_input:
1905
+ session_state["search_queries"].append(example_input)
1906
+ query=example_input
1907
+ if query:
1908
+ result = search_arxiv(query)
1909
+ #search_glossary(query)
1910
+ #search_glossary(result)
1911
+ st.markdown(' ')
1912
+
1913
+ #st.write("Search history:")
1914
+ for example_input in session_state["search_queries"]:
1915
+ st.write(example_input)
1916
+
1917
+ openai.api_key = os.getenv('OPENAI_API_KEY')
1918
+ if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
1919
+ menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
1920
+ choice = st.sidebar.selectbox("Output File Type:", menu)
1921
+
1922
+ AddAFileForContext=False
1923
+ if AddAFileForContext:
1924
+
1925
+ collength, colupload = st.columns([2,3]) # adjust the ratio as needed
1926
+ with collength:
1927
+ #max_length = st.slider(key='maxlength', label="File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
1928
+ max_length = 128000
1929
+ with colupload:
1930
+ uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"])
1931
+ document_sections = deque()
1932
+ document_responses = {}
1933
+ if uploaded_file is not None:
1934
+ file_content = read_file_content(uploaded_file, max_length)
1935
+ document_sections.extend(divide_document(file_content, max_length))
1936
+
1937
+
1938
+ if len(document_sections) > 0:
1939
+ if st.button("👁️ View Upload"):
1940
+ st.markdown("**Sections of the uploaded file:**")
1941
+ for i, section in enumerate(list(document_sections)):
1942
+ st.markdown(f"**Section {i+1}**\n{section}")
1943
+
1944
+ st.markdown("**Chat with the model:**")
1945
+ for i, section in enumerate(list(document_sections)):
1946
+ if i in document_responses:
1947
+ st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
1948
+ else:
1949
+ if st.button(f"Chat about Section {i+1}"):
1950
+ st.write('Reasoning with your inputs...')
1951
+ st.write('Response:')
1952
+ st.write(response)
1953
+ document_responses[i] = response
1954
+ filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
1955
+ create_file(filename, user_prompt, response, should_save)
1956
+ st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
1957
+
1958
+
1959
+
1960
+
1961
+ def main():
1962
+ st.markdown("##### GPT-4o Omni Model: Text, Audio, Image, & Video")
1963
+ option = st.selectbox("Select an option", ("Text", "Image", "Audio", "Video"))
1964
+ if option == "Text":
1965
+ text_input = st.text_input("Enter your text:")
1966
+ if (text_input > ''):
1967
+ textResponse = process_text(text_input)
1968
+
1969
+ elif option == "Image":
1970
+ text = "Help me understand what is in this picture and list ten facts as markdown outline with appropriate emojis that describes what you see."
1971
+ text_input = st.text_input(label="Enter text prompt to use with Image context.", value=text)
1972
+ image_input = st.file_uploader("Upload an image", type=["png"])
1973
+ if (image_input is not None):
1974
+ image_response = process_image(image_input, text_input)
1975
+
1976
+ with st.chat_message(name="ai", avatar="🦖"):
1977
+ st.markdown(image_response)
1978
+
1979
+ elif option == "Audio":
1980
+ text = "You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."
1981
+ text_input = st.text_input(label="Enter text prompt to use with Audio context.", value=text)
1982
+ uploaded_files = st.file_uploader("Upload an audio file", type=["mp3", "wav"], accept_multiple_files=True)
1983
+
1984
+ for audio_input in uploaded_files:
1985
+ st.write(audio_input.name)
1986
+ if audio_input is not None:
1987
+ process_audio(audio_input, text_input)
1988
+
1989
+ elif option == "Audio old":
1990
+ text = "You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."
1991
+ text_input = st.text_input(label="Enter text prompt to use with Audio context.", value=text)
1992
+
1993
+ uploaded_files = st.file_uploader("Upload an audio file", type=["mp3", "wav"], accept_multiple_files=True)
1994
+ for audio_input in uploaded_files:
1995
+ st.write(audio_input.name)
1996
+
1997
+ if audio_input is not None:
1998
+ # To read file as bytes:
1999
+ bytes_data = uploaded_file.getvalue()
2000
+
2001
+
2002
+ process_audio(audio_input, text_input)
2003
+
2004
+ elif option == "Video":
2005
+ video_input = st.file_uploader("Upload a video file", type=["mp4"])
2006
+ process_audio_and_video(video_input)
2007
+
2008
+ # Enter the GPT-4o omni model in streamlit chatbot
2009
+ current_messages=[]
2010
+ for message in st.session_state.messages:
2011
+ with st.chat_message(message["role"]):
2012
+ current_messages.append(message)
2013
+ st.markdown(message["content"])
2014
+
2015
+ # 🎵 Wav Audio files - Transcription History in Wav
2016
+ audio_files = glob.glob("*.wav")
2017
+ audio_files = [file for file in audio_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
2018
+ audio_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
2019
+
2020
+ # 🖼 PNG Image files
2021
+ image_files = glob.glob("*.png")
2022
+ image_files = [file for file in image_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
2023
+ image_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
2024
+
2025
+ # 🎥 MP4 Video files
2026
+ video_files = glob.glob("*.mp4")
2027
+ video_files = [file for file in video_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
2028
+ video_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
2029
+
2030
+ # 🎥 MP3 Video files
2031
+ video_files_mp3 = glob.glob("*.mp3")
2032
+ video_files_mp3 = [file for file in video_files_mp3 if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
2033
+ video_files_mp3.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
2034
+
2035
+ main()
2036
+
2037
+ # Delete All button for each file type
2038
+ if st.sidebar.button("🗑 Delete All Audio"):
2039
+ for file in audio_files:
2040
+ os.remove(file)
2041
+ st.rerun()
2042
+
2043
+ if st.sidebar.button("🗑 Delete All Images"):
2044
+ for file in image_files:
2045
+ os.remove(file)
2046
+ st.rerun()
2047
+
2048
+ if st.sidebar.button("🗑 Delete All MP4 Videos"):
2049
+ for file in video_files:
2050
+ os.remove(file)
2051
+ st.rerun()
2052
+
2053
+ if st.sidebar.button("🗑 Delete All MP3 Videos"):
2054
+ for file in video_files_mp3:
2055
+ os.remove(file)
2056
+ st.rerun()
2057
+
2058
+ # Display and handle audio files
2059
+ for file in audio_files:
2060
+ col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
2061
+ with col1:
2062
+ st.markdown(file)
2063
+ if st.button("🎵", key="play_" + file): # play emoji button
2064
+ audio_file = open(file, 'rb')
2065
+ audio_bytes = audio_file.read()
2066
+ st.audio(audio_bytes, format='audio/wav')
2067
+ with col2:
2068
+ if st.button("🗑", key="delete_" + file):
2069
+ os.remove(file)
2070
+ st.rerun()
2071
+
2072
+ # Display and handle image files
2073
+ for file in image_files:
2074
+ col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
2075
+ with col1:
2076
+ st.markdown(file)
2077
+ if st.button("🖼", key="show_" + file): # show emoji button
2078
+ image = open(file, 'rb').read()
2079
+ st.image(image)
2080
+ with col2:
2081
+ if st.button("🗑", key="delete_" + file):
2082
+ os.remove(file)
2083
+ st.rerun()
2084
+
2085
+ # Display and handle MP4 video files
2086
+ for file in video_files:
2087
+ col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
2088
+ with col1:
2089
+ st.markdown(file)
2090
+ if st.button("🎥", key="play_" + file): # play emoji button
2091
+ video_file = open(file, 'rb')
2092
+ video_bytes = video_file.read()
2093
+ st.video(video_bytes)
2094
+ with col2:
2095
+ if st.button("🗑", key="delete_" + file):
2096
+ os.remove(file)
2097
+ st.rerun()
2098
+
2099
+ # Display and handle MP3 video files
2100
+ for file in video_files_mp3:
2101
+ col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
2102
+ with col1:
2103
+ st.markdown(file)
2104
+ if st.button("🎥", key="play_" + file): # play emoji button
2105
+ video_file = open(file, 'rb')
2106
+ video_bytes = video_file_mp3.read()
2107
+ st.video(video_bytes)
2108
+ with col2:
2109
+ if st.button("🗑", key="delete_" + file):
2110
+ os.remove(file)
2111
+ st.rerun()
2112
+
2113
+ # ChatBot Entry
2114
+ if prompt := st.chat_input("GPT-4o Multimodal ChatBot - What can I help you with?"):
2115
+ st.session_state.messages.append({"role": "user", "content": prompt})
2116
+ with st.chat_message("user"):
2117
+ st.markdown(prompt)
2118
+ with st.chat_message("assistant"):
2119
+ completion = client.chat.completions.create(
2120
+ model=MODEL,
2121
+ messages = st.session_state.messages,
2122
+ stream=True
2123
+ )
2124
+ response = process_text2(text_input=prompt)
2125
+ st.session_state.messages.append({"role": "assistant", "content": response})
2126
+
2127
+ # Image and Video Galleries
2128
+ num_columns_images=st.slider(key="num_columns_images", label="Choose Number of Image Columns", min_value=1, max_value=15, value=5)
2129
+ display_images_and_wikipedia_summaries(num_columns_images) # Image Jump Grid
2130
+
2131
+ num_columns_video=st.slider(key="num_columns_video", label="Choose Number of Video Columns", min_value=1, max_value=15, value=5)
2132
+ display_videos_and_links(num_columns_video) # Video Jump Grid
2133
+
2134
+ # Optional UI's
2135
+ showExtendedTextInterface=False
2136
+ if showExtendedTextInterface:
2137
+ display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid - Dynamically calculates columns based on details length to keep topic together
2138
+ num_columns_text=st.slider(key="num_columns_text", label="Choose Number of Text Columns", min_value=1, max_value=15, value=4)
2139
+ display_buttons_with_scores(num_columns_text) # Feedback Jump Grid
2140
+ st.markdown(personality_factors)