Spaces:
Running
Running
import os | |
import gradio as gr | |
import openai | |
from numpy._core.defchararray import endswith, isdecimal | |
from openai import OpenAI | |
from dotenv import load_dotenv | |
from pathlib import Path | |
from time import sleep | |
import audioread | |
import queue | |
import threading | |
from glob import glob | |
import copy | |
import base64 | |
import json | |
from PIL import Image | |
from io import BytesIO | |
from pydantic import BaseModel | |
import pprint | |
load_dotenv(override=True) | |
key = os.getenv('OPENAI_API_KEY') | |
users = os.getenv('LOGNAME') | |
unames = users.split(',') | |
pwds = os.getenv('PASSWORD') | |
pwdList = pwds.split(',') | |
site = os.getenv('SITE') | |
if site == 'local': | |
dp = Path('./data') | |
dp.mkdir(exist_ok=True) | |
dataDir = './data/' | |
else: | |
dp = Path('/data') | |
dp.mkdir(exist_ok=True) | |
dataDir = '/data/' | |
speak_file = dataDir + "speek.wav" | |
# client = OpenAI(api_key = key) | |
#digits = ['zero: ','one: ','two: ','three: ','four: ','five: ','six: ','seven: ','eight: ','nine: '] | |
abbrevs = {'St. ' : 'Saint ', 'Mr. ': 'mister ', 'Mrs. ':'mussus ', 'Mr. ':'mister ', 'Ms. ':'mizz '} | |
special_chat_types = ['math', 'logic'] | |
class Step(BaseModel): | |
explanation: str | |
output: str | |
class MathReasoning(BaseModel): | |
steps: list[Step] | |
final_answer: str | |
def Client(): | |
return OpenAI(api_key = key) | |
def solve(prompt, chatType): | |
tokens_in = 0 | |
tokens_out = 0 | |
tokens = 0 | |
if chatType == 'math': | |
instruction = "You are a helpful math tutor. Guide the user through the solution step by step." | |
elif chatType == "logic": | |
instruction = "you are an expert in logic and reasoning. Guide the user through the solution step by step" | |
try: | |
completion = Client().beta.chat.completions.parse( | |
model = 'gpt-4o-2024-08-06', | |
messages = [ | |
{"role": "system", "content": instruction}, | |
{"role": "user", "content": prompt} | |
], | |
response_format=MathReasoning, | |
max_tokens = 2000 | |
) | |
tokens_in = completion.usage.prompt_tokens | |
tokens_out = completion.usage.completion_tokens | |
tokens = completion.usage.total_tokens | |
msg = completion.choices[0].message | |
if msg.parsed: | |
dr = msg.parsed.model_dump() | |
response = pprint.pformat(dr) | |
elif msg.refusal: | |
response = msg.refusal | |
except Exception as e: | |
if type(e) == openai.LengthFinishReasonError: | |
response = 'Too many tokens' | |
else: | |
response = str(e) | |
return (response, tokens_in, tokens_out, tokens) | |
def genUsageStats(do_reset=False): | |
result = [] | |
ttotal4o_in = 0 | |
ttotal4o_out = 0 | |
ttotal4mini_in = 0 | |
ttotal4mini_out = 0 | |
totalAudio = 0 | |
totalSpeech = 0 | |
totalImages = 0 | |
totalHdImages = 0 | |
if do_reset: | |
dudPath = dataDir + '_speech.txt' | |
if os.path.exists(dudPath): | |
os.remove(dudPath) | |
for user in unames: | |
tokens4o_in = 0 | |
tokens4o_out = 0 | |
tokens4mini_in = 0 | |
tokens4mini_out = 0 | |
fp = dataDir + user + '_log.txt' | |
if os.path.exists(fp): | |
accessOk = False | |
for i in range(3): | |
try: | |
with open(fp) as f: | |
dataList = f.readlines() | |
if do_reset: | |
os.remove(fp) | |
else: | |
for line in dataList: | |
(u, t) = line.split(':') | |
(t, m) = t.split('-') | |
(tin, tout) = t.split('/') | |
incount = int(tin) | |
outcount = int(tout) | |
if 'mini' in m: | |
tokens4mini_in += incount | |
tokens4mini_out += outcount | |
ttotal4mini_in += incount | |
ttotal4mini_out += outcount | |
else: | |
tokens4o_in += incount | |
tokens4o_out += outcount | |
ttotal4o_in += incount | |
ttotal4o_out += outcount | |
accessOk = True | |
break | |
except: | |
sleep(3) | |
if not accessOk: | |
return f'File access failed reading stats for user: {user}' | |
userAudio = 0 | |
fp = dataDir + user + '_audio.txt' | |
if os.path.exists(fp): | |
accessOk = False | |
for i in range(3): | |
try: | |
with open(fp) as f: | |
dataList = f.readlines() | |
if do_reset: | |
os.remove(fp) | |
else: | |
for line in dataList: | |
(dud, len) = line.split(':') | |
userAudio += int(len) | |
totalAudio += int(userAudio) | |
accessOk = True | |
break | |
except: | |
sleep(3) | |
if not accessOk: | |
return f'File access failed reading audio stats for user: {user}' | |
userSpeech = 0 | |
fp = dataDir + user + '_speech.txt' | |
if os.path.exists(fp): | |
accessOk = False | |
for i in range(3): | |
try: | |
with open(fp) as f: | |
dataList = f.readlines() | |
if do_reset: | |
os.remove(fp) | |
else: | |
for line in dataList: | |
(dud, len) = line.split(':') | |
userSpeech += int(len) | |
totalSpeech += int(userSpeech) | |
accessOk = True | |
break | |
except: | |
sleep(3) | |
if not accessOk: | |
return f'File access failed reading speech stats for user: {user}' | |
user_images = 0 | |
user_hd_images = 0 | |
fp = image_count_path(user) | |
if os.path.exists(fp): | |
accessOk = False | |
for i in range(3): | |
try: | |
with open(fp) as f: | |
dataList = f.readlines() | |
if do_reset: | |
os.remove(fp) | |
else: | |
for line in dataList: | |
x = line.strip() | |
if x == 'hd': | |
user_hd_images += 1 | |
totalHdImages += 1 | |
else: | |
user_images += 1 | |
totalImages += 1 | |
accessOk = True | |
break | |
except: | |
sleep(3) | |
if not accessOk: | |
return f'File access failed reading image gen stats for user: {user}' | |
result.append([user, f'{tokens4mini_in}/{tokens4mini_out}', f'{tokens4o_in}/{tokens4o_out}', f'audio:{userAudio}',f'speech:{userSpeech}', f'images:{user_images}/{user_hd_images}']) | |
result.append(['totals', f'{ttotal4mini_in}/{ttotal4mini_out}', f'{ttotal4o_in}/{ttotal4o_out}', f'audio:{totalAudio}',f'speech:{totalSpeech}', f'images:{totalImages}/{totalHdImages}']) | |
return result | |
def new_conversation(user): | |
clean_up(user) # .wav files | |
flist = glob(f'{dataDir}{user}.png') | |
flist.extend(glob(f'{dataDir}{user}_image.b64')) | |
for fpath in flist: | |
if os.path.exists(fpath): | |
os.remove(fpath) | |
return [None, [], None, gr.Image(visible=False, value=None), gr.Image(visible=False, value=None), ''] | |
def updatePassword(txt): | |
password = txt.lower().strip() | |
return [password, "*********"] | |
# def parse_math(txt): | |
# ref = 0 | |
# loc = txt.find(r'\(') | |
# if loc == -1: | |
# return txt | |
# while (True): | |
# loc2 = txt[ref:].find(r'\)') | |
# if loc2 == -1: | |
# break | |
# loc = txt[ref:].find(r'\(') | |
# if loc > -1: | |
# loc2 += 2 | |
# slice = txt[ref:][loc:loc2] | |
# frag = lconv.convert(slice) | |
# txt = txt[:loc+ref] + frag + txt[loc2+ref:] | |
# ref = len(txt[ref:loc]) + len(frag) | |
# return txt | |
def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_image_file=''): | |
image_gen_model = 'gpt-4o-2024-08-06' | |
user_window = user_window.lower().strip() | |
isBoss = False | |
if user_window == unames[0] and pwd_window == pwdList[0]: | |
isBoss = True | |
if prompt == 'stats': | |
response = genUsageStats() | |
return [past, response, None, gptModel, uploaded_image_file] | |
if prompt == 'reset': | |
response = genUsageStats(True) | |
return [past, response, None, gptModel, uploaded_image_file] | |
if prompt.startswith('gpt4'): | |
gptModel = 'gpt-4o-2024-08-06' | |
prompt = prompt[5:] | |
if prompt.startswith("clean"): | |
user = prompt[6:] | |
response = f'cleaned all .wav and .b64 files for {user}' | |
final_clean_up(user, True) | |
return [past, response, None, gptModel, uploaded_image_file] | |
if prompt.startswith('files'): | |
(log_cnt, wav_cnt, other_cnt, others, log_list) = list_permanent_files() | |
response = f'{log_cnt} log files\n{wav_cnt} .wav files\n{other_cnt} Other files:\n{others}\nlogs: {str(log_list)}' | |
return [past, response, None, gptModel, uploaded_image_file] | |
if user_window in unames and pwd_window == pwdList[unames.index(user_window)]: | |
chatType = 'normal' | |
prompt = prompt.strip() | |
if prompt.lower().startswith('solve'): | |
prompt = 'How do I solve ' + prompt[5:] + ' Do not use Latex for math expressions.' | |
chatType = 'math' | |
elif prompt.lower().startswith('puzzle'): | |
chatType = 'logic' | |
prompt = prompt[6:] | |
past.append({"role":"user", "content":prompt}) | |
gen_image = (uploaded_image_file != '') | |
if chatType in special_chat_types: | |
(reply, tokens_in, tokens_out, tokens) = solve(prompt, chatType) | |
reporting_model = image_gen_model | |
elif not gen_image: | |
completion = Client().chat.completions.create(model=gptModel, | |
messages=past) | |
reporting_model = gptModel | |
else: | |
(completion, msg) = analyze_image(user_window, image_gen_model, prompt) | |
uploaded_image_file= '' | |
reporting_model = image_gen_model | |
if not msg == 'ok': | |
return [past, msg, None, gptModel, uploaded_image_file] | |
if not chatType in special_chat_types: | |
reply = completion.choices[0].message.content | |
tokens_in = completion.usage.prompt_tokens | |
tokens_out = completion.usage.completion_tokens | |
tokens = completion.usage.total_tokens | |
response += "\n\nYOU: " + prompt + "\nGPT: " + reply | |
if isBoss: | |
response += f"\n{reporting_model}: tokens in/out = {tokens_in}/{tokens_out}" | |
if tokens > 40000: | |
response += "\n\nTHIS DIALOG IS GETTING TOO LONG. PLEASE RESTART CONVERSATION SOON." | |
past.append({"role":"assistant", "content": reply}) | |
accessOk = False | |
for i in range(3): | |
try: | |
dataFile = new_func(user_window) | |
with open(dataFile, 'a') as f: | |
m = '4o' | |
if 'mini' in reporting_model: | |
m = '4omini' | |
f.write(f'{user_window}:{tokens_in}/{tokens_out}-{m}\n') | |
accessOk = True | |
break | |
except Exception as e: | |
sleep(3) | |
if not accessOk: | |
response += f"\nDATA LOG FAILED, path = {dataFile}" | |
return [past, response , None, gptModel, uploaded_image_file] | |
else: | |
return [[], "User name and/or password are incorrect", prompt, gptModel, uploaded_image_file] | |
def new_func(user): | |
dataFile = dataDir + user + '_log.txt' | |
return dataFile | |
def image_count_path(user): | |
fpath = dataDir + user + '_image_count.txt' | |
return fpath | |
def transcribe(user, pwd, fpath): | |
user = user.lower().strip() | |
pwd = pwd.lower().strip() | |
if not (user in unames and pwd in pwdList): | |
return 'Bad credentials' | |
with audioread.audio_open(fpath) as audio: | |
duration = int(audio.duration) | |
if duration > 0: | |
with open(dataDir + user + '_audio.txt','a') as f: | |
f.write(f'audio:{str(duration)}\n') | |
with open(fpath,'rb') as audio_file: | |
transcript = Client().audio.transcriptions.create( | |
model='whisper-1', file = audio_file ,response_format = 'text' ) | |
reply = transcript | |
return str(reply) | |
def pause_message(): | |
return "Audio input is paused. Resume or Stop as desired" | |
# def gen_output_audio(txt): | |
# if len(txt) < 10: | |
# txt = "This dialog is too short to mess with!" | |
# response = Client().audio.speech.create(model="tts-1", voice="fable", input=txt) | |
# with open(speak_file, 'wb') as fp: | |
# fp.write(response.content) | |
# return speak_file | |
def set_speak_button(txt): | |
vis = False | |
if len(txt) > 2: | |
vis = True | |
return gr.Button(visible=vis) | |
def update_user(user_win): | |
user_win = user_win.lower().strip() | |
user = 'unknown' | |
for s in unames: | |
if user_win == s: | |
user = s | |
break | |
return [user, user] | |
def speech_worker(chunks=[],q=[]): | |
for chunk in chunks: | |
fpath = q.pop(0) | |
response = Client().audio.speech.create(model="tts-1", voice="fable", input=chunk, speed=0.85, response_format='wav') | |
with open(fpath, 'wb') as fp: | |
fp.write(response.content) | |
def gen_speech_file_names(user, cnt): | |
rv = [] | |
for i in range(0, cnt): | |
rv.append(dataDir + f'{user}_speech{i}.wav') | |
return rv | |
def final_clean_up(user, do_b64 = False): | |
user = user.strip().lower() | |
if user == 'kill': | |
flist = glob(dataDir + '*') | |
elif user == 'all': | |
flist = glob(dataDir + '*_speech*.wav') | |
if do_b64: | |
flist.extend(glob(dataDir + '*.b64')) | |
else: | |
flist = glob(dataDir + f'{user}_speech*.wav') | |
if do_b64: | |
flist.append(dataDir + user + '_image.b64') | |
for fpath in flist: | |
try: | |
os.remove(fpath) | |
except: | |
continue | |
def delete_image(user): | |
fpath = dataDir + user + '.png' | |
if os.path.exists(fpath): | |
os.remove(fpath) | |
def list_permanent_files(): | |
flist = os.listdir(dataDir) | |
others = [] | |
log_cnt = 0 | |
wav_cnt = 0 | |
other_cnt = 0 | |
list_logs = [] | |
for fpath in flist: | |
if fpath.endswith('.txt'): | |
log_cnt += 1 | |
list_logs.append(fpath) | |
elif fpath.endswith('.wav'): | |
wav_cnt += 1 | |
else: | |
others.append(fpath) | |
other_cnt = len(others) | |
if log_cnt > 5: | |
list_logs = [] | |
return (str(log_cnt), str(wav_cnt), str(other_cnt), str(others), list_logs) | |
def make_image(prompt, user, pwd): | |
user = user.lower().strip() | |
msg = 'Error: unable to create image.' | |
fpath = None | |
model = 'dall-e-2' | |
size = '512x512' | |
quality = 'standard' | |
if user in unames and pwd == pwdList[unames.index(user)]: | |
if len(prompt.strip()) == 0: | |
return [gr.Image(value=None, visible=False), 'You must provide a prompt describing image you desire'] | |
if prompt.startswith('hd '): | |
prompt = prompt[3:] | |
model = 'dall-e-3' | |
size = '1024x1024' | |
quality = 'hd' | |
try: | |
response = Client().images.generate(model=model, prompt=prompt,size=size, | |
quality=quality, response_format='b64_json') | |
except Exception as ex: | |
msg = ex.message | |
return [gr.Image(visible=False, value=None), msg] | |
if len(response.data) == 0: | |
msg = "OpenAI returned no image data" | |
return [gr.Image(visible=False, value=None), msg] | |
try: | |
image_data = response.data[0].b64_json | |
with Image.open(BytesIO(base64.b64decode(image_data))) as image: | |
fpath = dataDir + user + '.png' | |
image.save(fpath) | |
with open(image_count_path(user), 'at') as fp: | |
if quality == 'hd': | |
fp.write('hd\n') | |
else: | |
fp.write('1\n') | |
msg = 'Image created!' | |
except: | |
return [gr.Image(visible=False, value=None), msg] | |
else: | |
msg = 'Incorrect user name or password' | |
return [gr.Image(visible=False, value=None), msg] | |
return [gr.Image(visible=True, value=fpath), msg] | |
def show_help(): | |
return ''' | |
1. Gemeral: | |
1.1 Login with user name and password (not case-sensitive) | |
1.2 Type prompts (questions, instructions) into "Prompt or Question" window (OR) you can speak prompts by | |
tapping the audio "Record" button, saying your prompt, then tapping the "Stop" button. | |
Your prompt will appear in the Prompt window, and you can edit it there if needed. | |
1.3 Text in the "Dialog" window can be spoken by tapping the "Speak Dialog" button. | |
2. Chat: | |
2.1 Enter prompt and tap the "Submit Prompt/Question" button. The responses appear in the Dialog window. | |
2.2 Enter follow-up questions in the Prompt window either by typing or speaking. Tap the voice | |
entry "Reset Voice Entry" button to enable additional voice entry. Then tap "Submit Prompt/Question". | |
2.3 If topic changes or when done chatting, tap the "Restart Conversation" button. | |
3. Solve math equations or logic problems providing step-by-step analysis: | |
3.1 Math: Make "solve" the first word in your prompt, followed by the equation, e.g., x^2 - x + 1 = 0 | |
3.2 Logic: Make "puzzle" the first word in your prompt, followed by a detailed description of a logic | |
problem with the answer(s) you desire. | |
4. Make Image: | |
4.1 Enter description of desired image in prompt window via either typing or voice entry | |
4.2 Tap the "Make Image" button. This can take a few seconds. | |
4.3 There is a download button on the image display if your system supports file downloads. | |
4.4 When done viewing image, tap the "Restart Conversation" button | |
5. Analyze an Image you provide: | |
5.1 Enter what you want to know about the image in the prompt window. You can include instructions | |
to write a poem about something in the image, for example. Or just say "what's in this image?" | |
5.2 Tap the "Upload Image to Analyze" button. | |
5.3 An empty image box will appear lower left. Drag or upload image into it. It offers web cam or camera | |
input also. | |
5.4 The image should appear. This can take some time with a slow internet connection and large image. | |
5.5 Tap the "Submit Prompt/Question" button to start the analysis. This initiates a chat dialog and | |
you can ask follow-up questions. However, the image is not re-analyzed for follow-up dialog. | |
Hints: | |
1. Better chat and image results are obtained by including detailed descriptions and instructions | |
in the prompt. | |
2. Always tap "Restart Conversation" before requesting an image or changing chat topics. | |
3. Audio input and output functions depend on the hardware capability of your device. | |
4. "Speak Dialog" will voice whatever is currently in the Dialog window. You can repeat it and you | |
can edit what's to be spoken. Except: In a chat conversation, spoken dialog will only include | |
the latest prompt/response ("YOU:/GPT:") sequence.''' | |
def upload_image(prompt, user, password): | |
if not (user in unames and password == pwdList[unames.index(user)]): | |
return [gr.Image(visible=False, interactive=True), "Incorrect user name and/or password"] | |
if len(prompt) < 3: | |
return [gr.Image(visible=False, interactive=True), "You must provide prompt/instructions (what to do with the image)"] | |
return [gr.Image(visible=True, interactive=True), ''] | |
def load_image(image, user): | |
status = 'OK, image is ready! Tap "Submit Prompt/Question" to start analyzing' | |
try: | |
with open(image, 'rb') as image_file: | |
base64_image = base64.b64encode(image_file.read()).decode('utf-8') | |
fpath = dataDir + user + '_image.b64' | |
with open(fpath, 'wt') as fp: | |
fp.write(base64_image) | |
except: | |
status = 'Unable to upload image' | |
return [fpath, status] | |
def analyze_image(user, model, prompt): | |
status = 'ok' | |
try: | |
with open(dataDir + user + '_image.b64', 'rt') as fp: | |
base64_image = fp.read() | |
except: | |
status = "base64 image file not found" | |
return [None, status] | |
completion = Client().chat.completions.create( | |
model=model, | |
messages=[ | |
{ "role": "user", | |
"content": [ | |
{ | |
"type": "text", | |
"text": prompt | |
}, | |
{ | |
"type": "image_url", | |
"image_url": { | |
"url": f"data:image/jpeg;base64,{base64_image}", | |
"detail": "high" | |
} | |
} | |
] | |
} | |
], | |
max_tokens= 500 | |
) | |
# response = completion.choices[0].message.content | |
return [completion, status] | |
with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
history = gr.State([]) | |
password = gr.State("") | |
user = gr.State("unknown") | |
model = gr.State("gpt-4o-mini") | |
q = gr.State([]) | |
qsave = gr.State([]) | |
uploaded_image_file = gr.State('') | |
def clean_up(user): | |
flist = glob(dataDir + f'{user}_speech*.wav') | |
for fpath in flist: | |
try: | |
os.remove(fpath) | |
except: | |
continue | |
def initial_audio_output(txt, user): | |
global digits | |
global abbrevs | |
if not user in unames: | |
return [gr.Audio(sources=None), []] | |
clean_up(user) | |
q = [] | |
if len(txt.strip()) < 5: | |
return ['None', q] | |
try: | |
loc = txt.rindex('YOU:') | |
txt = txt[loc:] | |
except: | |
pass | |
for s,x in abbrevs.items(): | |
txt = txt.replace(s, x) | |
words_in = txt.replace('**', '').splitlines(False) | |
words_out = [] | |
for s in words_in: | |
s = s.lstrip('- *@#$%^&_=+-') | |
if len(s) > 0: | |
loc = s.find(' ') | |
if loc > 1: | |
val = s[0:loc] | |
isnum = val.replace('.','0').isdecimal() | |
if isnum: | |
if val.endswith('.'): | |
val = val[:-1].replace('.',' point ') + '., ' | |
else: | |
val = val.replace('.', ' point ') + ', ' | |
s = 'num'+ val + s[loc:] | |
words_out.append(s) | |
chunklist = [] | |
for chunk in words_out: | |
if chunk.strip() == '': | |
continue | |
isnumbered = chunk.startswith('num') | |
number = '' | |
loc = 0 | |
if isnumbered: | |
chunk = chunk[3:] | |
loc = chunk.index(',') | |
number = chunk[0:loc] | |
chunk = chunk[loc:] | |
locs = [] | |
for i in range(1,len(chunk)-1): | |
(a, b, c) = chunk[i-1:i+2] | |
if a.isdecimal() and b == '.' and c.isdecimal(): | |
locs.append(i) | |
for i in locs: | |
chunk = chunk[:i] + ' point ' + chunk[i+1:] | |
if len(chunk) > 50: | |
finechunks = chunk.split('.') | |
for fchunk in finechunks: | |
if isnumbered: | |
fchunk = number + fchunk | |
isnumbered = False | |
if len(fchunk) > 0: | |
if fchunk != '"': | |
chunklist.append(fchunk) | |
else: | |
line = number + chunk | |
if line != '"': | |
chunklist.append(line) | |
total_speech = 0 | |
for chunk in chunklist: | |
total_speech += len(chunk) | |
with open(dataDir + user + '_speech.txt','a') as f: | |
f.write(f'speech:{str(total_speech)}\n') | |
chunk = chunklist[0] | |
if chunk.strip() == '': | |
return gr.Audio(sources=None) | |
fname_list = gen_speech_file_names(user, len(chunklist)) | |
q = fname_list.copy() | |
qsave = fname_list.copy() | |
fname = q.pop(0) | |
if len(chunklist) > 0: | |
threading.Thread(target=speech_worker, daemon=True, args=(chunklist[1:],fname_list[1:])).start() | |
response = Client().audio.speech.create(model="tts-1", voice="fable", input=chunk, speed=0.85, response_format='wav') | |
with open(fname, 'wb') as fp: | |
fp.write(response.content) | |
return [fname, q] | |
def gen_output_audio(q, user): | |
try: | |
fname = q.pop(0) | |
except: | |
final_clean_up(user) | |
return [None, gr.Audio(sources=None)] | |
if not os.path.exists(fname): | |
sleep(3) | |
if not os.path.exists(fname): | |
response = Client().audio.speech.create(model="tts-1", voice="fable", | |
input='Sorry, text-to-speech is responding too slow right now', speed=0.85, response_format='wav') | |
with open(fname, 'wb') as fp: | |
fp.write(response.content) | |
q = [] | |
return [fname, q] | |
gr.Markdown('# GPT Chat') | |
gr.Markdown('Enter user name & password. Tap "Help & Hints" button for more instructions.') | |
with gr.Row(): | |
user_window = gr.Textbox(label = "User Name") | |
user_window.blur(fn=update_user, inputs=user_window, outputs=[user, user_window]) | |
pwd_window = gr.Textbox(label = "Password") | |
pwd_window.blur(updatePassword, inputs = pwd_window, outputs = [password, pwd_window]) | |
help_button = gr.Button(value='Help & Hints') | |
with gr.Row(): | |
audio_widget = gr.Audio(type='filepath', format='wav',waveform_options=gr.WaveformOptions( | |
show_recording_waveform=True), sources=['microphone'], scale = 3, label="Prompt/Question Voice Entry", max_length=120) | |
reset_button = gr.ClearButton(value="Reset Voice Entry", scale=1) #new_func1() | |
with gr.Row(): | |
clear_button = gr.Button(value="Restart Conversation") | |
# gpt_chooser=gr.Radio(choices=[("GPT-3.5","gpt-3.5-turbo"),("GPT-4o","gpt-4o-mini")], | |
# value="gpt-3.5-turbo", label="GPT Model", interactive=True) | |
button_do_image = gr.Button(value='Make Image') | |
button_get_image = gr.Button(value='Upload Image to Analyze') | |
submit_button = gr.Button(value="Submit Prompt/Question") | |
speak_output = gr.Button(value="Speak Dialog", visible=False) | |
prompt_window = gr.Textbox(label = "Prompt or Question") | |
output_window = gr.Textbox(label = "Dialog") | |
with gr.Row(): | |
with gr.Column(): | |
image_window2 = gr.Image(visible=False, interactive=True, label='Image to Analyze', type='filepath') | |
with gr.Column(): | |
image_window = gr.Image(visible=False, label='Generated Image') | |
submit_button.click(chat, | |
inputs=[prompt_window, user_window, password, history, output_window, model, uploaded_image_file], | |
outputs=[history, output_window, prompt_window, model, uploaded_image_file]) | |
clear_button.click(fn=new_conversation, inputs=user_window, | |
outputs=[prompt_window, history, output_window, image_window, image_window2, uploaded_image_file]) | |
audio_widget.stop_recording(fn=transcribe, inputs=[user_window, password, audio_widget], | |
outputs=[prompt_window]) | |
audio_widget.pause_recording(fn=pause_message, outputs=[prompt_window]) | |
reset_button.add(audio_widget) | |
audio_out = gr.Audio(autoplay=True, visible=False) | |
audio_out.stop(fn=gen_output_audio, inputs=[q, user_window], outputs = [audio_out, q]) | |
speak_output.click(fn=initial_audio_output, inputs=[output_window, user_window], outputs=[audio_out, q]) | |
output_window.change(fn=set_speak_button, inputs=output_window,outputs=speak_output) | |
button_do_image.click(fn=make_image, inputs=[prompt_window,user_window, password],outputs=[image_window, output_window]) | |
image_window.change(fn=delete_image, inputs=[user]) | |
help_button.click(fn=show_help, outputs=output_window) | |
button_get_image.click(fn=upload_image,inputs = [prompt_window, user, password], outputs = [image_window2, output_window]) | |
image_window2.upload(fn=load_image, inputs=[image_window2, user], outputs=[uploaded_image_file, output_window]) | |
# demo.unload(final_clean_up(user)) | |
demo.launch(share=True) | |