Spaces:
Running
Running
File size: 4,116 Bytes
770f5f7 20d9410 caebb7e 770f5f7 20d9410 770f5f7 3de8d87 14fc436 1125ceb 770f5f7 cc4a887 770f5f7 2a9567a 3b847a5 2a9567a a157f63 14fc436 a157f63 167569a a157f63 14fc436 cc4a887 3de8d87 3b847a5 a157f63 3b847a5 167569a a157f63 3b847a5 067746e cc4a887 f01d93f 35f5401 cc4a887 93675e1 cc4a887 770f5f7 2a9567a 3b847a5 a157f63 14fc436 a157f63 8819714 a157f63 8819714 3de8d87 a157f63 f6fe9f4 3b847a5 a157f63 3de8d87 3b847a5 167569a 3f653ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import os
import gradio as gr
# import openai
from openai import OpenAI
from dotenv import load_dotenv
from pathlib import Path
load_dotenv()
key = os.getenv('OPENAI_API_KEY')
users = os.getenv('LOGNAME')
unames = users.split(',')
pwds = os.getenv('PASSWORD')
pwdList = pwds.split(',')
site = os.getenv('SITE')
if site == 'local':
dp = Path('./data')
dp.mkdir(exist_ok=True)
dataDir = './data/'
else:
dp = Path('/data')
dp.mkdir(exist_ok=True)
dataDir = '/data/'
client = OpenAI(api_key = key)
def genUsageStats(do_reset=False):
result = []
for user in unames:
total = 0
fp = dataDir + user + '_log.txt'
if os.path.exists(fp):
with open(fp) as f:
dataList = f.readlines()
if do_reset:
os.remove(fp)
for line in dataList:
(u, c) = line.split(':')
total += int(c)
else:
total = 0
result.append([user, str(total)])
return result
def clear():
return [None, [], None]
def updatePassword(txt):
return [txt.lower(), "*********"]
def setModel(val):
return val
def chat(prompt, user_window, pwd_window, past, response, gptModel):
user_window = user_window.lower()
if user_window == unames[0] and pwd_window == pwdList[0]:
if prompt == 'stats':
response = genUsageStats()
return [past, response, None]
if prompt == 'reset':
response = genUsageStats(True)
return [past, response, None]
if user_window in unames and pwd_window in pwdList:
past.append({"role":"user", "content":prompt})
completion = client.chat.completions.create(model=gptModel,
messages=past)
reply = completion.choices[0].message.content
response += "\n\nYOU: " + prompt + "\nGPT: " + reply # + "\nModel = " + gptModel
if len(response) > 40000:
response += "\n\nTHIS DIALOG IS GETTING TOO LONG. PLEASE CLEAR IT."
past.append({"role":"assistant", "content": reply})
try:
dataFile = dataDir + user_window + '_log.txt'
with open(dataFile, 'a') as f:
f.write(f'{user_window}: {len(response)}\n')
# with open(dataFile) as f:
# response += '\n' + f.read()
except Exception as e:
response += f"\nDATA LOG FAILED, path = {dataFile}\nmsg = {e}"
# response += f'\npPath = {p.absolute()}'
# response += '\nUsers = ' + ','.join(unames)
return [past, response , None]
else:
return [[], "User name and/or password are incorrect", prompt]
with gr.Blocks() as demo:
history = gr.State([])
password = gr.State("")
model = gr.State("gpt-3.5-turbo")
gr.Markdown('# GPT Chat')
gr.Markdown('Enter user name & password then enter prompt and click submit button. GPT 3.5 is cheaper but GPT 4o may perform better. Restart conversation if topic changes')
# heading = gr.Label(value="GPT Chat", scale=2, color="Crimson" )
with gr.Row():
user_window = gr.Textbox(label = "User Name")
pwd_window = gr.Textbox(label = "Password")
pwd_window.blur(updatePassword, pwd_window, [password, pwd_window])
with gr.Row():
clear_button = gr.Button(value="Restart Conversation")
gpt_chooser=gr.Radio(choices=[("GPT-3.5","gpt-3.5-turbo"),("GPT-4o","gpt-4o-mini")],
value="gpt-3.5-turbo", label="GPT Model", interactive=True)
submit_window = gr.Button(value="Submit Prompt/Question")
prompt_window = gr.Textbox(label = "Prompt or Question")
output_window = gr.Textbox(label = "Dialog")
submit_window.click(chat, inputs=[prompt_window, user_window, password, history, output_window, model],
outputs=[history, output_window, prompt_window])
clear_button.click(clear, inputs=[], outputs=[prompt_window, history, output_window])
gpt_chooser.input(fn=setModel, inputs=gpt_chooser, outputs=model)
demo.launch()
|