dlflannery commited on
Commit
cd53d09
·
verified ·
1 Parent(s): f4990f9

Update app.py

Browse files

added perm files list.

Files changed (1) hide show
  1. app.py +33 -6
app.py CHANGED
@@ -3,7 +3,7 @@ from re import L
3
  import tempfile
4
  import gradio as gr
5
  # import openai
6
- from numpy._core.defchararray import isdecimal
7
  from openai import OpenAI
8
  from dotenv import load_dotenv
9
  from pathlib import Path
@@ -147,6 +147,7 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel):
147
  isBoss = True
148
  if prompt == 'stats':
149
  response = genUsageStats()
 
150
  return [past, response, None, gptModel]
151
  if prompt == 'reset':
152
  response = genUsageStats(True)
@@ -159,6 +160,10 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel):
159
  response = f'cleaned all .wav files for {user}'
160
  final_clean_up(user)
161
  return [past, response, None, gptModel]
 
 
 
 
162
  if user_window in unames and pwd_window == pwdList[unames.index(user_window)]:
163
  past.append({"role":"user", "content":prompt})
164
  completion = client.chat.completions.create(model=gptModel,
@@ -249,17 +254,38 @@ def gen_speech_file_names(user, cnt):
249
  return rv
250
 
251
  def final_clean_up(user):
252
- flist = glob(dataDir + f'{user}_speech*.wav')
 
 
 
253
  for fpath in flist:
254
  try:
255
  os.remove(fpath)
256
  except:
257
  continue
258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
  with gr.Blocks() as demo:
260
  history = gr.State([])
261
  password = gr.State("")
262
- user = gr.State("")
263
  model = gr.State("gpt-4o-mini")
264
  q = gr.State([])
265
  qsave = gr.State([])
@@ -350,10 +376,11 @@ with gr.Blocks() as demo:
350
  fp.write(response.content)
351
  return [fname, q]
352
 
353
- def gen_output_audio(q):
354
  try:
355
  fname = q.pop(0)
356
  except:
 
357
  return [None, gr.Audio(sources=None)]
358
  return [fname, q]
359
 
@@ -388,8 +415,8 @@ with gr.Blocks() as demo:
388
  audio_widget.pause_recording(fn=pause_message, outputs=[prompt_window])
389
  reset_button.add(audio_widget)
390
  audio_out = gr.Audio(autoplay=True, visible=False)
391
- audio_out.stop(fn=gen_output_audio, inputs=q, outputs = [audio_out, q])
392
  speak_output.click(fn=initial_audio_output, inputs=[output_window, user_window], outputs=[audio_out, q])
393
  output_window.change(fn=set_speak_button, inputs=output_window,outputs=speak_output)
394
- demo.unload(final_clean_up(user_window))
395
  demo.launch(share=True)
 
3
  import tempfile
4
  import gradio as gr
5
  # import openai
6
+ from numpy._core.defchararray import endswith, isdecimal
7
  from openai import OpenAI
8
  from dotenv import load_dotenv
9
  from pathlib import Path
 
147
  isBoss = True
148
  if prompt == 'stats':
149
  response = genUsageStats()
150
+ list_permanent_files()
151
  return [past, response, None, gptModel]
152
  if prompt == 'reset':
153
  response = genUsageStats(True)
 
160
  response = f'cleaned all .wav files for {user}'
161
  final_clean_up(user)
162
  return [past, response, None, gptModel]
163
+ if prompt.startswith('files'):
164
+ (log_cnt, wav_cnt, other_cnt, others) = list_permanent_files()
165
+ response = f'{log_cnt} log files\n{wav_cnt} .wav files\n{other_cnt} Other files:\n{others}'
166
+ return [past, response, None, gptModel]
167
  if user_window in unames and pwd_window == pwdList[unames.index(user_window)]:
168
  past.append({"role":"user", "content":prompt})
169
  completion = client.chat.completions.create(model=gptModel,
 
254
  return rv
255
 
256
  def final_clean_up(user):
257
+ if user.strip().lower() == 'all':
258
+ flist = glob(dataDir + '*_speech*.wav')
259
+ else:
260
+ flist = glob(dataDir + f'{user}_speech*.wav')
261
  for fpath in flist:
262
  try:
263
  os.remove(fpath)
264
  except:
265
  continue
266
 
267
+
268
+ def list_permanent_files():
269
+ flist = os.listdir(dataDir)
270
+ others = []
271
+ log_cnt = 0
272
+ wav_cnt = 0
273
+ other_cnt = 0
274
+ for fpath in flist:
275
+ if fpath.endswith('.txt'):
276
+ log_cnt += 1
277
+ elif fpath.endswith('.wav'):
278
+ wav_cnt += 1
279
+ else:
280
+ others.append(fpath)
281
+ other_cnt = len(others)
282
+ return (str(log_cnt), str(wav_cnt), str(other_cnt), str(others))
283
+
284
+
285
  with gr.Blocks() as demo:
286
  history = gr.State([])
287
  password = gr.State("")
288
+ user = gr.State("unknown")
289
  model = gr.State("gpt-4o-mini")
290
  q = gr.State([])
291
  qsave = gr.State([])
 
376
  fp.write(response.content)
377
  return [fname, q]
378
 
379
+ def gen_output_audio(q, user):
380
  try:
381
  fname = q.pop(0)
382
  except:
383
+ final_clean_up(user)
384
  return [None, gr.Audio(sources=None)]
385
  return [fname, q]
386
 
 
415
  audio_widget.pause_recording(fn=pause_message, outputs=[prompt_window])
416
  reset_button.add(audio_widget)
417
  audio_out = gr.Audio(autoplay=True, visible=False)
418
+ audio_out.stop(fn=gen_output_audio, inputs=[q, user], outputs = [audio_out, q])
419
  speak_output.click(fn=initial_audio_output, inputs=[output_window, user_window], outputs=[audio_out, q])
420
  output_window.change(fn=set_speak_button, inputs=output_window,outputs=speak_output)
421
+ # demo.unload(final_clean_up(user))
422
  demo.launch(share=True)