Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -26,6 +26,9 @@ import numpy as np
|
|
26 |
from pylatexenc.latex2text import LatexNodes2Text
|
27 |
import requests
|
28 |
from urllib.parse import quote
|
|
|
|
|
|
|
29 |
|
30 |
load_dotenv(override=True)
|
31 |
key = os.getenv('OPENAI_API_KEY')
|
@@ -37,6 +40,7 @@ DEEPSEEK_KEY=os.getenv('DEEPSEEK_KEY')
|
|
37 |
GROQ_KEY=os.getenv('GROQ_KEY')
|
38 |
BRAVE_KEY=os.getenv('BRAVE_KEY')
|
39 |
BRAVE_SEARCH_KEY=os.getenv('BRAVE_SEARCH_KEY')
|
|
|
40 |
|
41 |
site = os.getenv('SITE')
|
42 |
if site == 'local':
|
@@ -64,6 +68,12 @@ special_chat_types = ['math', 'logic']
|
|
64 |
|
65 |
news_interval_choices = [("Day", "pd"), ("Week", "pw"), ("Month", "pm"), ("Year", "py")]
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
def get_openai_file(file_id, container_id):
|
68 |
url = f'https://api.openai.com/v1/containers/{container_id}/files/{file_id}/content'
|
69 |
headers= {"Authorization": "Bearer " + key}
|
@@ -753,14 +763,8 @@ def updatePassword(txt):
|
|
753 |
# ref = len(txt[ref:loc]) + len(frag)
|
754 |
# return txt
|
755 |
|
756 |
-
def get_response(
|
757 |
-
|
758 |
-
instructions = '''
|
759 |
-
You are a personal math tutor. When asked a math question,
|
760 |
-
write and run code using the python tool to answer the question.
|
761 |
-
'''
|
762 |
-
else:
|
763 |
-
instructions = '''
|
764 |
You are a helpful assistant who knows how to browse the web for info and to write and run python
|
765 |
code.
|
766 |
'''
|
@@ -769,15 +773,47 @@ Do not use latex for math expressions in text output.
|
|
769 |
If a chart, table or plot is produced, return it as an image.
|
770 |
If a powerpoint slide is created, return it as an image but do not offer a download link.
|
771 |
If the user asks you to output a file, You must include the file you generate in the annotation
|
772 |
-
of the output text
|
773 |
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
774 |
response = Client().responses.create(
|
775 |
model= "gpt-5-mini", #"gpt-5-mini", "o4-mini",
|
776 |
tools=[{ "type": "web_search_preview" },
|
777 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
778 |
previous_response_id=previous_response_id,
|
779 |
instructions = instructions,
|
780 |
-
input=
|
781 |
reasoning ={
|
782 |
"effort": "medium",
|
783 |
"summary": "auto"
|
@@ -863,18 +899,28 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
|
|
863 |
using_groq = False
|
864 |
reasoning = False
|
865 |
prompt = prompt.strip()
|
866 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
867 |
if len(past) == 0:
|
868 |
-
prev_id = None
|
869 |
container_id = create_openai_container('My Container')
|
|
|
|
|
|
|
870 |
else:
|
871 |
-
(prev_id, container_id) = past
|
872 |
-
past.pop()
|
873 |
-
response = ''
|
874 |
for item in past:
|
875 |
response += item
|
876 |
try:
|
877 |
-
result = get_response(
|
|
|
878 |
except:
|
879 |
return [[], "Sorry, there was an error getting the AI response",
|
880 |
prompt, gptModel, uploaded_image_file, plot, image_out, file_out]
|
@@ -882,6 +928,7 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
|
|
882 |
image_done = False
|
883 |
ann_files = [] # (container_id, file_id, filename)
|
884 |
code_files = [] # (container_id, file_id, filename)
|
|
|
885 |
for output in result.output:
|
886 |
if output.type == 'message':
|
887 |
for content in output.content:
|
@@ -919,15 +966,53 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
|
|
919 |
code_files.append((cont_id, file_id, file_name))
|
920 |
except:
|
921 |
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
922 |
do_file_download = False
|
|
|
|
|
923 |
if len(ann_files) > 0:
|
924 |
(cont_id, file_id, file_name) = ann_files[-1]
|
|
|
925 |
do_file_download = True
|
926 |
elif len(code_files) > 0:
|
927 |
-
|
928 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
929 |
if do_file_download:
|
930 |
-
ext = file_name.split('.')[-1]
|
931 |
fpath = dataDir + user_window + '.' + ext
|
932 |
try:
|
933 |
data = get_openai_file(file_id, cont_id).content
|
@@ -938,6 +1023,10 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
|
|
938 |
except:
|
939 |
text += f'\nUnable to load code-generated file: {file_name}'
|
940 |
# text += '\nIf a download link is given above, ignore it. Use the button below'
|
|
|
|
|
|
|
|
|
941 |
out_text = "\n".join(line for line in text.splitlines() if
|
942 |
'download' not in line.casefold())
|
943 |
res = md("\n\n***YOU***: " + prompt + "\n\n***GPT***: " + out_text)
|
@@ -1289,10 +1378,10 @@ def show_help():
|
|
1289 |
can edit what's to be spoken. Except: In a chat conversation, spoken dialog will only include
|
1290 |
the latest prompt/response ("YOU:/GPT:") sequence.'''
|
1291 |
return str(txt).replace('```', ' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').replace('\n','<br>')
|
1292 |
-
def upload_image(prompt, user, password):
|
1293 |
if not (user in unames and password == pwdList[unames.index(user)]):
|
1294 |
return [gr.Image(visible=False, interactive=True), "Incorrect user name and/or password"]
|
1295 |
-
if len(prompt) < 3:
|
1296 |
return [gr.Image(visible=False, interactive=True), "You must provide prompt/instructions (what to do with the image)"]
|
1297 |
return [gr.Image(visible=True, interactive=True), '']
|
1298 |
|
@@ -1518,7 +1607,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
1518 |
button_do_image.click(fn=make_image, inputs=[prompt_window,user_window, password],outputs=[image_window, output_window])
|
1519 |
image_window.change(fn=delete_image, inputs=[user])
|
1520 |
help_button.click(fn=show_help, outputs=output_window)
|
1521 |
-
button_get_image.click(fn=upload_image,inputs = [prompt_window, user, password
|
|
|
1522 |
image_window2.upload(fn=load_image, inputs=[image_window2, user], outputs=[uploaded_image_file, output_window])
|
1523 |
mode.change(fn=mode_change, inputs=mode,outputs=news_period)
|
1524 |
pwd_window.blur(updatePassword, inputs = pwd_window, outputs = [password, pwd_window, mode])
|
|
|
26 |
from pylatexenc.latex2text import LatexNodes2Text
|
27 |
import requests
|
28 |
from urllib.parse import quote
|
29 |
+
import geo_distance
|
30 |
+
import geo_locate
|
31 |
+
|
32 |
|
33 |
load_dotenv(override=True)
|
34 |
key = os.getenv('OPENAI_API_KEY')
|
|
|
40 |
GROQ_KEY=os.getenv('GROQ_KEY')
|
41 |
BRAVE_KEY=os.getenv('BRAVE_KEY')
|
42 |
BRAVE_SEARCH_KEY=os.getenv('BRAVE_SEARCH_KEY')
|
43 |
+
LOCATIONID_KEY=os.getenv('LOCATIONID_KEY')
|
44 |
|
45 |
site = os.getenv('SITE')
|
46 |
if site == 'local':
|
|
|
68 |
|
69 |
news_interval_choices = [("Day", "pd"), ("Week", "pw"), ("Month", "pm"), ("Year", "py")]
|
70 |
|
71 |
+
def get_distance(addr1, addr2):
|
72 |
+
(lat1, lon1) = geo_locate.get_geo_coords(addr1, LOCATIONID_KEY)
|
73 |
+
(lat2, lon2) = geo_locate.get_geo_coords(addr2, LOCATIONID_KEY)
|
74 |
+
distance = geo_distance.great_circle_distance_miles(lat1, lon1, lat2, lon2)
|
75 |
+
return distance
|
76 |
+
|
77 |
def get_openai_file(file_id, container_id):
|
78 |
url = f'https://api.openai.com/v1/containers/{container_id}/files/{file_id}/content'
|
79 |
headers= {"Authorization": "Bearer " + key}
|
|
|
763 |
# ref = len(txt[ref:loc]) + len(frag)
|
764 |
# return txt
|
765 |
|
766 |
+
def get_response(inputs, previous_response_id, container_id, image_file):
|
767 |
+
instructions = '''
|
|
|
|
|
|
|
|
|
|
|
|
|
768 |
You are a helpful assistant who knows how to browse the web for info and to write and run python
|
769 |
code.
|
770 |
'''
|
|
|
773 |
If a chart, table or plot is produced, return it as an image.
|
774 |
If a powerpoint slide is created, return it as an image but do not offer a download link.
|
775 |
If the user asks you to output a file, You must include the file you generate in the annotation
|
776 |
+
of the output text.
|
777 |
'''
|
778 |
+
if image_file != '':
|
779 |
+
with open(image_file, 'rt') as fp:
|
780 |
+
b64data = fp.read()
|
781 |
+
inputs.append(
|
782 |
+
{
|
783 |
+
"role": "user",
|
784 |
+
"content": [
|
785 |
+
{
|
786 |
+
"type": "input_image",
|
787 |
+
"image_url": f'data:image/jpeg;base64, {b64data}',
|
788 |
+
}
|
789 |
+
]
|
790 |
+
}
|
791 |
+
)
|
792 |
+
|
793 |
response = Client().responses.create(
|
794 |
model= "gpt-5-mini", #"gpt-5-mini", "o4-mini",
|
795 |
tools=[{ "type": "web_search_preview" },
|
796 |
+
{ "type": "code_interpreter", "container": container_id}, #{'type': 'auto'}},
|
797 |
+
{"type": "function", "name": "get_distance",
|
798 |
+
"description": "get calculated straight-line (great-circle) distance between two locations or addresses.",
|
799 |
+
"parameters": {
|
800 |
+
"type": "object", "properties": {
|
801 |
+
"addr1": {
|
802 |
+
"type": "string",
|
803 |
+
"description": "The street address or other designation of a location.",
|
804 |
+
},
|
805 |
+
"addr2": {
|
806 |
+
"type": "string",
|
807 |
+
"description": "The street address or other designation of a location.",
|
808 |
+
},
|
809 |
+
},
|
810 |
+
"required": ["addr1", "addr2"],
|
811 |
+
},
|
812 |
+
},
|
813 |
+
],
|
814 |
previous_response_id=previous_response_id,
|
815 |
instructions = instructions,
|
816 |
+
input=inputs,
|
817 |
reasoning ={
|
818 |
"effort": "medium",
|
819 |
"summary": "auto"
|
|
|
899 |
using_groq = False
|
900 |
reasoning = False
|
901 |
prompt = prompt.strip()
|
902 |
+
need_turn = True
|
903 |
+
responses = []
|
904 |
+
inputs = []
|
905 |
+
prev_id = None
|
906 |
+
if mode == "Advanced":
|
907 |
+
if len(past):
|
908 |
+
(prev_id, container_id) = past.pop()
|
909 |
+
past = []
|
910 |
+
while mode == 'Advanced' and need_turn:
|
911 |
+
need_turn = False
|
912 |
if len(past) == 0:
|
|
|
913 |
container_id = create_openai_container('My Container')
|
914 |
+
inputs.append(
|
915 |
+
{"role": "user", "content": f"{prompt}"}
|
916 |
+
)
|
917 |
else:
|
918 |
+
(prev_id, container_id) = past.pop()
|
|
|
|
|
919 |
for item in past:
|
920 |
response += item
|
921 |
try:
|
922 |
+
result = get_response(inputs, prev_id, container_id, uploaded_image_file)
|
923 |
+
uploaded_image_file = ''
|
924 |
except:
|
925 |
return [[], "Sorry, there was an error getting the AI response",
|
926 |
prompt, gptModel, uploaded_image_file, plot, image_out, file_out]
|
|
|
928 |
image_done = False
|
929 |
ann_files = [] # (container_id, file_id, filename)
|
930 |
code_files = [] # (container_id, file_id, filename)
|
931 |
+
text = '??? AI returned no text for this query'
|
932 |
for output in result.output:
|
933 |
if output.type == 'message':
|
934 |
for content in output.content:
|
|
|
966 |
code_files.append((cont_id, file_id, file_name))
|
967 |
except:
|
968 |
pass
|
969 |
+
elif output.type == 'function_call':
|
970 |
+
if output.name == 'get_distance':
|
971 |
+
args = json.loads(output.arguments)
|
972 |
+
distance = get_distance(args['addr1'], args['addr2'])
|
973 |
+
inputs.append({
|
974 |
+
"type": "function_call_output",
|
975 |
+
"call_id": f"{output.call_id}",
|
976 |
+
"output": f"{float(distance):.2f}",
|
977 |
+
} )
|
978 |
+
need_turn = True
|
979 |
+
continue
|
980 |
+
elif output.type == 'image_generation_call':
|
981 |
+
if result and not image_done:
|
982 |
+
image_done = True;
|
983 |
+
image_data = base64.b64decode(result)
|
984 |
+
fpath = dataDir + user_window + '.png'
|
985 |
+
with open(fpath,'wb') as fp:
|
986 |
+
fp.write(image_data)
|
987 |
+
image_out = gr.Image(visible=True, value=fpath)
|
988 |
do_file_download = False
|
989 |
+
ext = ''
|
990 |
+
backup_image = None
|
991 |
if len(ann_files) > 0:
|
992 |
(cont_id, file_id, file_name) = ann_files[-1]
|
993 |
+
ext = file_name.split('.')[-1].casefold()
|
994 |
do_file_download = True
|
995 |
elif len(code_files) > 0:
|
996 |
+
for i in range(len(code_files)):
|
997 |
+
(cont_id, file_id, file_name) = code_files[i]
|
998 |
+
if file_name.casefold().find('access') >= 0:
|
999 |
+
continue
|
1000 |
+
ext = file_name.split('.')[-1].casefold()
|
1001 |
+
if ext == 'png':
|
1002 |
+
if not image_done:
|
1003 |
+
backup_image = code_files[i]
|
1004 |
+
else:
|
1005 |
+
do_file_download = True
|
1006 |
+
break
|
1007 |
+
if not do_file_download and not image_done and backup_image:
|
1008 |
+
(cont_id, file_id, file_name) = backup_image
|
1009 |
+
fpath = dataDir + user_window + '.png'
|
1010 |
+
image_data = get_openai_file(file_id, cont_id).content
|
1011 |
+
with open(fpath,'wb') as fp:
|
1012 |
+
fp.write(image_data)
|
1013 |
+
image_out = gr.Image(visible=True, value=fpath)
|
1014 |
+
|
1015 |
if do_file_download:
|
|
|
1016 |
fpath = dataDir + user_window + '.' + ext
|
1017 |
try:
|
1018 |
data = get_openai_file(file_id, cont_id).content
|
|
|
1023 |
except:
|
1024 |
text += f'\nUnable to load code-generated file: {file_name}'
|
1025 |
# text += '\nIf a download link is given above, ignore it. Use the button below'
|
1026 |
+
if need_turn:
|
1027 |
+
# past.append(md(prompt))
|
1028 |
+
past.append((result.id, container_id))
|
1029 |
+
continue
|
1030 |
out_text = "\n".join(line for line in text.splitlines() if
|
1031 |
'download' not in line.casefold())
|
1032 |
res = md("\n\n***YOU***: " + prompt + "\n\n***GPT***: " + out_text)
|
|
|
1378 |
can edit what's to be spoken. Except: In a chat conversation, spoken dialog will only include
|
1379 |
the latest prompt/response ("YOU:/GPT:") sequence.'''
|
1380 |
return str(txt).replace('```', ' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').replace('\n','<br>')
|
1381 |
+
def upload_image(prompt, user, password, mode):
|
1382 |
if not (user in unames and password == pwdList[unames.index(user)]):
|
1383 |
return [gr.Image(visible=False, interactive=True), "Incorrect user name and/or password"]
|
1384 |
+
if len(prompt) < 3 and mode != 'Advanced':
|
1385 |
return [gr.Image(visible=False, interactive=True), "You must provide prompt/instructions (what to do with the image)"]
|
1386 |
return [gr.Image(visible=True, interactive=True), '']
|
1387 |
|
|
|
1607 |
button_do_image.click(fn=make_image, inputs=[prompt_window,user_window, password],outputs=[image_window, output_window])
|
1608 |
image_window.change(fn=delete_image, inputs=[user])
|
1609 |
help_button.click(fn=show_help, outputs=output_window)
|
1610 |
+
button_get_image.click(fn=upload_image,inputs = [prompt_window, user, password, mode],
|
1611 |
+
outputs = [image_window2, output_window])
|
1612 |
image_window2.upload(fn=load_image, inputs=[image_window2, user], outputs=[uploaded_image_file, output_window])
|
1613 |
mode.change(fn=mode_change, inputs=mode,outputs=news_period)
|
1614 |
pwd_window.blur(updatePassword, inputs = pwd_window, outputs = [password, pwd_window, mode])
|