dlflannery commited on
Commit
c152ad6
·
verified ·
1 Parent(s): 0ac5c98

Update app.py

Browse files

counting image gens, logging, stats

Files changed (1) hide show
  1. app.py +59 -7
app.py CHANGED
@@ -49,6 +49,7 @@ def genUsageStats(do_reset=False):
49
  ttotal4mini_out = 0
50
  totalAudio = 0
51
  totalSpeech = 0
 
52
  for user in unames:
53
  tokens4o_in = 0
54
  tokens4o_out = 0
@@ -128,8 +129,29 @@ def genUsageStats(do_reset=False):
128
  sleep(3)
129
  if not accessOk:
130
  return f'File access failed reading speech stats for user: {user}'
131
- result.append([user, f'{tokens4mini_in}/{tokens4mini_out}', f'{tokens4o_in}/{tokens4o_out}', f'audio:{userAudio}',f'speech:{userSpeech}'])
132
- result.append(['totals', f'{ttotal4mini_in}/{ttotal4mini_out}', f'{ttotal4o_in}/{ttotal4o_out}', f'audio:{totalAudio}',f'speech:{totalSpeech}'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  return result
134
 
135
  def new_conversation(user):
@@ -204,6 +226,10 @@ def new_func(user):
204
  dataFile = dataDir + user + '_log.txt'
205
  return dataFile
206
 
 
 
 
 
207
  def transcribe(user, pwd, fpath):
208
  user = user.lower().strip()
209
  pwd = pwd.lower().strip()
@@ -298,7 +324,7 @@ def make_image(prompt, user, pwd):
298
  fpath = None
299
  if user in unames and pwd == pwdList[unames.index(user)]:
300
  if len(prompt.strip()) == 0:
301
- return [None, 'You must provide a prompt']
302
  try:
303
  response = client.images.generate(model='dall-e-2', prompt=prompt,size='512x512',
304
  quality='standard', response_format='b64_json')
@@ -306,6 +332,8 @@ def make_image(prompt, user, pwd):
306
  image = Image.open(BytesIO(base64.b64decode(image_data)))
307
  fpath = dataDir + user + '.png'
308
  image.save(fpath)
 
 
309
  msg = 'Image created!'
310
  except:
311
  return [None, msg]
@@ -314,6 +342,31 @@ def make_image(prompt, user, pwd):
314
  return [None, msg]
315
  return [fpath, msg]
316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
 
318
  with gr.Blocks() as demo:
319
  history = gr.State([])
@@ -419,15 +472,13 @@ with gr.Blocks() as demo:
419
 
420
 
421
  gr.Markdown('# GPT Chat')
422
- gr.Markdown('Enter user name & password then enter prompt and click submit button. Restart conversation if topic changes. ' +
423
- 'You can enter prompts by voice. Tap "Record", speak, then tap "Stop". ' +
424
- 'Tap "Reset Voice Entry" to enter more voice. Tap "Speak Dialog" to hear dialog. ' +
425
- 'Note: first voice response may take a longer time.')
426
  with gr.Row():
427
  user_window = gr.Textbox(label = "User Name")
428
  user_window.blur(fn=update_user, inputs=user_window, outputs=[user, user_window])
429
  pwd_window = gr.Textbox(label = "Password")
430
  pwd_window.blur(updatePassword, inputs = pwd_window, outputs = [password, pwd_window])
 
431
  with gr.Row():
432
  audio_widget = gr.Audio(type='filepath', format='wav',waveform_options=gr.WaveformOptions(
433
  show_recording_waveform=True), sources=['microphone'], scale = 3, label="Prompt/Question Voice Entry", max_length=120)
@@ -455,5 +506,6 @@ with gr.Blocks() as demo:
455
  output_window.change(fn=set_speak_button, inputs=output_window,outputs=speak_output)
456
  button_do_image.click(fn=make_image, inputs=[prompt_window,user_window, password],outputs=[image_window, output_window])
457
  image_window.change(fn=delete_image, inputs=[user])
 
458
  # demo.unload(final_clean_up(user))
459
  demo.launch(share=True)
 
49
  ttotal4mini_out = 0
50
  totalAudio = 0
51
  totalSpeech = 0
52
+ totalImages = 0
53
  for user in unames:
54
  tokens4o_in = 0
55
  tokens4o_out = 0
 
129
  sleep(3)
130
  if not accessOk:
131
  return f'File access failed reading speech stats for user: {user}'
132
+ user_images = 0
133
+ fp = image_count_path(user)
134
+ if os.path.exists(fp):
135
+ accessOk = False
136
+ for i in range(3):
137
+ try:
138
+ with open(fp) as f:
139
+ dataList = f.readlines()
140
+ if do_reset:
141
+ os.remove(fp)
142
+ else:
143
+ for line in dataList:
144
+ cnt = line.strip()
145
+ user_images += int(cnt)
146
+ totalImages += int(user_images)
147
+ accessOk = True
148
+ break
149
+ except:
150
+ sleep(3)
151
+ if not accessOk:
152
+ return f'File access failed reading image gen stats for user: {user}'
153
+ result.append([user, f'{tokens4mini_in}/{tokens4mini_out}', f'{tokens4o_in}/{tokens4o_out}', f'audio:{userAudio}',f'speech:{userSpeech}', f'images:{user_images}'])
154
+ result.append(['totals', f'{ttotal4mini_in}/{ttotal4mini_out}', f'{ttotal4o_in}/{ttotal4o_out}', f'audio:{totalAudio}',f'speech:{totalSpeech}', f'images:{totalImages}'])
155
  return result
156
 
157
  def new_conversation(user):
 
226
  dataFile = dataDir + user + '_log.txt'
227
  return dataFile
228
 
229
+ def image_count_path(user):
230
+ fpath = dataDir + user + '_image_count.txt'
231
+ return fpath
232
+
233
  def transcribe(user, pwd, fpath):
234
  user = user.lower().strip()
235
  pwd = pwd.lower().strip()
 
324
  fpath = None
325
  if user in unames and pwd == pwdList[unames.index(user)]:
326
  if len(prompt.strip()) == 0:
327
+ return [None, 'You must provide a prompt describing image you desire']
328
  try:
329
  response = client.images.generate(model='dall-e-2', prompt=prompt,size='512x512',
330
  quality='standard', response_format='b64_json')
 
332
  image = Image.open(BytesIO(base64.b64decode(image_data)))
333
  fpath = dataDir + user + '.png'
334
  image.save(fpath)
335
+ with open(image_count_path(user), 'at') as fp:
336
+ fp.write('1\n')
337
  msg = 'Image created!'
338
  except:
339
  return [None, msg]
 
342
  return [None, msg]
343
  return [fpath, msg]
344
 
345
+ def show_help():
346
+ return '''
347
+ 1. Login with user name and password (not case-sensitive)
348
+ 2. Type prompts (questions, instructions) into prompt window (OR) you can speak prompts by
349
+ tapping the audio "Record" button, saying your prompt, then tapping the "Stop" button.
350
+ Your prompt will appear in the Prompt window, and you can edit it there if needed.
351
+ 3. Chat:
352
+ 1.1 tap the "Submit Prompt/Question" button. The response will appear in the Dialog window.
353
+ 1.2 To speak the response, tap the "Speak Dialog" button.
354
+ 1.3 Enter follow-up questions in the Prompt window either by typing or speaking. Tap the voice
355
+ entry "Reset Voice Entry" button to enable additional voice entry. Then tap "Submit Prompt/Question".
356
+ 1.4 If topic changes or when done chatting, tap the "Restart Conversation" button.
357
+ 4. Make Image:
358
+ 1.1 Enter description of desired image in prompt window via either typing or voice entry
359
+ 1.2 Tap the "Make Image" button. This can take a few seconds.
360
+ 1.3 There is a download button on the image display if your system supports file downloads.
361
+ 1.4 When done viewing image, tap the "Restart Conversation" button
362
+
363
+ Hints:
364
+ 1. Better chat and image results are obtained by including detailed descriptions and instructions
365
+ in the prompt.
366
+ 2. Always tap "Restart Conversation" before requesting an image or changing chat topics.
367
+ 3. Audio input and output functions depend on the hardware capability of your device.'''
368
+
369
+
370
 
371
  with gr.Blocks() as demo:
372
  history = gr.State([])
 
472
 
473
 
474
  gr.Markdown('# GPT Chat')
475
+ gr.Markdown('Enter user name & password. Tap "Help & Hints" button for more instructions.')
 
 
 
476
  with gr.Row():
477
  user_window = gr.Textbox(label = "User Name")
478
  user_window.blur(fn=update_user, inputs=user_window, outputs=[user, user_window])
479
  pwd_window = gr.Textbox(label = "Password")
480
  pwd_window.blur(updatePassword, inputs = pwd_window, outputs = [password, pwd_window])
481
+ help_button = gr.Button(value='Help & Hints')
482
  with gr.Row():
483
  audio_widget = gr.Audio(type='filepath', format='wav',waveform_options=gr.WaveformOptions(
484
  show_recording_waveform=True), sources=['microphone'], scale = 3, label="Prompt/Question Voice Entry", max_length=120)
 
506
  output_window.change(fn=set_speak_button, inputs=output_window,outputs=speak_output)
507
  button_do_image.click(fn=make_image, inputs=[prompt_window,user_window, password],outputs=[image_window, output_window])
508
  image_window.change(fn=delete_image, inputs=[user])
509
+ help_button.click(fn=show_help, outputs=output_window)
510
  # demo.unload(final_clean_up(user))
511
  demo.launch(share=True)