awacke1 commited on
Commit
67957ff
ยท
1 Parent(s): b002e6c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -12
app.py CHANGED
@@ -18,7 +18,6 @@ from xml.etree import ElementTree as ET
18
  from bs4 import BeautifulSoup
19
  from collections import deque
20
  from audio_recorder_streamlit import audio_recorder
21
-
22
  from dotenv import load_dotenv
23
  from PyPDF2 import PdfReader
24
  from langchain.text_splitter import CharacterTextSplitter
@@ -31,6 +30,7 @@ from templates import css, bot_template, user_template
31
 
32
 
33
 
 
34
  def generate_filename(prompt, file_type):
35
  central = pytz.timezone('US/Central')
36
  safe_date_time = datetime.now(central).strftime("%m%d_%H%M") # Date and time DD-HHMM
@@ -70,7 +70,9 @@ def save_and_play_audio(audio_recorder):
70
  return filename
71
  return None
72
 
73
- def create_file(filename, prompt, response):
 
 
74
  if filename.endswith(".txt"):
75
  with open(filename, 'w') as file:
76
  file.write(f"{prompt}\n{response}")
@@ -294,21 +296,21 @@ def divide_prompt(prompt, max_length):
294
  return chunks
295
 
296
  def main():
297
- # Sidebar and global
298
  openai.api_key = os.getenv('OPENAI_API_KEY')
299
- st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")
300
 
301
  # File type for output, model choice
302
- menu = ["txt", "htm", "xlsx", "csv", "md", "py"] #619
303
  choice = st.sidebar.selectbox("Output File Type:", menu)
304
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
305
-
306
  # Audio, transcribe, GPT:
307
  filename = save_and_play_audio(audio_recorder)
308
  if filename is not None:
309
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
310
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
311
- filename=None # since transcription is finished next time just use the saved transcript
312
 
313
  # prompt interfaces
314
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
@@ -318,9 +320,15 @@ def main():
318
  with collength:
319
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
320
  with colupload:
321
- uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx","csv","html", "htm", "md", "txt"])
 
 
 
322
 
 
 
323
  # Document section chat
 
324
  document_sections = deque()
325
  document_responses = {}
326
  if uploaded_file is not None:
@@ -343,7 +351,7 @@ def main():
343
  st.write(response)
344
  document_responses[i] = response
345
  filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
346
- create_file(filename, user_prompt, response)
347
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
348
 
349
  if st.button('๐Ÿ’ฌ Chat'):
@@ -367,7 +375,7 @@ def main():
367
  st.write(response)
368
 
369
  filename = generate_filename(user_prompt, choice)
370
- create_file(filename, user_prompt, response)
371
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
372
 
373
  all_files = glob.glob("*.*")
@@ -411,7 +419,7 @@ def main():
411
  st.write('Reasoning with your inputs...')
412
  response = chat_with_model(user_prompt, file_contents, model_choice)
413
  filename = generate_filename(file_contents, choice)
414
- create_file(filename, file_contents, response)
415
 
416
  st.experimental_rerun()
417
  #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
@@ -439,4 +447,11 @@ with st.sidebar:
439
  st.session_state.conversation = get_chain(vectorstore)
440
  st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
441
  filename = generate_filename(raw, 'txt')
442
- create_file(filename, raw, '')
 
 
 
 
 
 
 
 
18
  from bs4 import BeautifulSoup
19
  from collections import deque
20
  from audio_recorder_streamlit import audio_recorder
 
21
  from dotenv import load_dotenv
22
  from PyPDF2 import PdfReader
23
  from langchain.text_splitter import CharacterTextSplitter
 
30
 
31
 
32
 
33
+
34
  def generate_filename(prompt, file_type):
35
  central = pytz.timezone('US/Central')
36
  safe_date_time = datetime.now(central).strftime("%m%d_%H%M") # Date and time DD-HHMM
 
70
  return filename
71
  return None
72
 
73
+ def create_file(filename, prompt, response, should_save=True):
74
+ if not should_save:
75
+ return
76
  if filename.endswith(".txt"):
77
  with open(filename, 'w') as file:
78
  file.write(f"{prompt}\n{response}")
 
296
  return chunks
297
 
298
  def main():
299
+ # Sidebar and global setup
300
  openai.api_key = os.getenv('OPENAI_API_KEY')
301
+ st.set_page_config(page_title="GPT Streamlit Document Reasoner", layout="wide")
302
 
303
  # File type for output, model choice
304
+ menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
305
  choice = st.sidebar.selectbox("Output File Type:", menu)
306
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
307
+
308
  # Audio, transcribe, GPT:
309
  filename = save_and_play_audio(audio_recorder)
310
  if filename is not None:
311
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
312
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
313
+ filename = None
314
 
315
  # prompt interfaces
316
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
 
320
  with collength:
321
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
322
  with colupload:
323
+ uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"])
324
+
325
+ should_save = st.sidebar.checkbox("๐Ÿ’พ Save")
326
+
327
 
328
+
329
+
330
  # Document section chat
331
+
332
  document_sections = deque()
333
  document_responses = {}
334
  if uploaded_file is not None:
 
351
  st.write(response)
352
  document_responses[i] = response
353
  filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
354
+ create_file(filename, user_prompt, response, should_save)
355
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
356
 
357
  if st.button('๐Ÿ’ฌ Chat'):
 
375
  st.write(response)
376
 
377
  filename = generate_filename(user_prompt, choice)
378
+ create_file(filename, user_prompt, response, should_save)
379
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
380
 
381
  all_files = glob.glob("*.*")
 
419
  st.write('Reasoning with your inputs...')
420
  response = chat_with_model(user_prompt, file_contents, model_choice)
421
  filename = generate_filename(file_contents, choice)
422
+ create_file(filename, user_prompt, response, should_save)
423
 
424
  st.experimental_rerun()
425
  #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
 
447
  st.session_state.conversation = get_chain(vectorstore)
448
  st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
449
  filename = generate_filename(raw, 'txt')
450
+ create_file(filename, raw, '', should_save)
451
+ #create_file(filename, raw, '')
452
+
453
+ # Added "Delete All" button
454
+ if st.sidebar.button("๐Ÿ—‘ Delete All"):
455
+ for file in all_files:
456
+ os.remove(file)
457
+ st.experimental_rerun()