vishwask commited on
Commit
b770e0b
·
verified ·
1 Parent(s): fb9a319

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -40
app.py CHANGED
@@ -246,55 +246,55 @@ def demo():
246
  When generating answers, it takes past questions into account (via conversational memory), and includes document references for clarity purposes.</i>
247
  <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate an output.<br>
248
  """)
249
- with gr.Tab("Step 1 - Document pre-processing"):
250
- with gr.Row():
251
- document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
252
- # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
253
- with gr.Row():
254
- db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
255
- with gr.Accordion("Advanced options - Document text splitter", open=False):
256
- with gr.Row():
257
- slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
258
- with gr.Row():
259
- slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
260
  with gr.Row():
261
- db_progress = gr.Textbox(label="Vector database initialization", value="None")
262
  with gr.Row():
263
- db_btn = gr.Button("Generate vector database...")
 
 
 
 
264
 
265
- with gr.Tab("Step 2 - QA chain initialization"):
 
 
 
 
266
  with gr.Row():
267
- llm_btn = gr.Radio(list_llm_simple, \
268
- label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
269
- with gr.Accordion("Advanced options - LLM model", open=False):
270
- with gr.Row():
271
- slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
272
- with gr.Row():
273
- slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
274
- with gr.Row():
275
- slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
276
  with gr.Row():
277
- llm_progress = gr.Textbox(value="None",label="QA chain initialization")
278
  with gr.Row():
279
- qachain_btn = gr.Button("Initialize question-answering chain...")
 
 
 
 
 
280
 
281
- with gr.Tab("Step 3 - Conversation with chatbot"):
282
- chatbot = gr.Chatbot(height=300)
283
- with gr.Accordion("Advanced - Document references", open=False):
284
- with gr.Row():
285
- doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
286
- source1_page = gr.Number(label="Page", scale=1)
287
- with gr.Row():
288
- doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
289
- source2_page = gr.Number(label="Page", scale=1)
290
- with gr.Row():
291
- doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
292
- source3_page = gr.Number(label="Page", scale=1)
293
  with gr.Row():
294
- msg = gr.Textbox(placeholder="Type message", container=True)
 
295
  with gr.Row():
296
- submit_btn = gr.Button("Submit")
297
- clear_btn = gr.ClearButton([msg, chatbot])
 
 
 
 
 
298
 
299
  # Preprocessing events
300
  #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
 
246
  When generating answers, it takes past questions into account (via conversational memory), and includes document references for clarity purposes.</i>
247
  <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate an output.<br>
248
  """)
249
+
250
+ with gr.Row():
251
+ document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
252
+ # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
253
+ with gr.Row():
254
+ db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
255
+ with gr.Accordion("Advanced options - Document text splitter", open=False):
 
 
 
 
256
  with gr.Row():
257
+ slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
258
  with gr.Row():
259
+ slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
260
+ with gr.Row():
261
+ db_progress = gr.Textbox(label="Vector database initialization", value="None")
262
+ with gr.Row():
263
+ db_btn = gr.Button("Generate vector database...")
264
 
265
+
266
+ with gr.Row():
267
+ llm_btn = gr.Radio(list_llm_simple, \
268
+ label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
269
+ with gr.Accordion("Advanced options - LLM model", open=False):
270
  with gr.Row():
271
+ slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
 
 
 
 
 
 
 
 
272
  with gr.Row():
273
+ slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
274
  with gr.Row():
275
+ slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
276
+ with gr.Row():
277
+ llm_progress = gr.Textbox(value="None",label="QA chain initialization")
278
+ with gr.Row():
279
+ qachain_btn = gr.Button("Initialize question-answering chain...")
280
+
281
 
282
+ chatbot = gr.Chatbot(height=300)
283
+ with gr.Accordion("Advanced - Document references", open=False):
284
+ with gr.Row():
285
+ doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
286
+ source1_page = gr.Number(label="Page", scale=1)
 
 
 
 
 
 
 
287
  with gr.Row():
288
+ doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
289
+ source2_page = gr.Number(label="Page", scale=1)
290
  with gr.Row():
291
+ doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
292
+ source3_page = gr.Number(label="Page", scale=1)
293
+ with gr.Row():
294
+ msg = gr.Textbox(placeholder="Type message", container=True)
295
+ with gr.Row():
296
+ submit_btn = gr.Button("Submit")
297
+ clear_btn = gr.ClearButton([msg, chatbot])
298
 
299
  # Preprocessing events
300
  #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])