yogesh69 commited on
Commit
ca0df93
·
verified ·
1 Parent(s): 40cb125

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -57
app.py CHANGED
@@ -284,53 +284,53 @@ def demo():
284
  qa_chain = gr.State()
285
  collection_name = gr.State()
286
 
 
287
  gr.Markdown(
288
- """<center><h2>PDF-based chatbot</center></h2>
289
- <h3>Ask any questions about your PDF documents</h3>""")
 
290
  gr.Markdown(
291
- """<b>Note:</b> This AI assistant, using Langchain and open-source LLMs, performs retrieval-augmented generation (RAG) from your PDF documents. \
292
- The user interface explicitely shows multiple steps to help understand the RAG workflow.
293
- This chatbot takes past questions into account when generating answers (via conversational memory), and includes document references for clarity purposes.<br>
294
- <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate a reply.
295
- """)
296
 
297
- with gr.Tab("Step 1 - Upload PDF"):
298
  with gr.Row():
299
- document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
300
- # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
301
-
302
- with gr.Tab("Step 2 - Process document"):
303
  with gr.Row():
304
- db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
305
  with gr.Accordion("Advanced options - Document text splitter", open=False):
306
  with gr.Row():
307
- slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
308
  with gr.Row():
309
- slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
310
  with gr.Row():
311
- db_progress = gr.Textbox(label="Vector database initialization", value="None")
312
  with gr.Row():
313
- db_btn = gr.Button("Generate vector database")
314
-
315
- with gr.Tab("Step 3 - Initialize QA chain"):
316
  with gr.Row():
317
  llm_btn = gr.Radio(list_llm_simple, \
318
- label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
319
- with gr.Accordion("Advanced options - LLM model", open=False):
320
  with gr.Row():
321
- slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
322
  with gr.Row():
323
- slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
324
  with gr.Row():
325
- slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
326
  with gr.Row():
327
- llm_progress = gr.Textbox(value="None",label="QA chain initialization")
328
  with gr.Row():
329
- qachain_btn = gr.Button("Initialize Question Answering chain")
330
 
331
- with gr.Tab("Step 4 - Chatbot"):
332
  chatbot = gr.Chatbot(height=300)
333
- with gr.Accordion("Advanced - Document references", open=False):
334
  with gr.Row():
335
  doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
336
  source1_page = gr.Number(label="Page", scale=1)
@@ -341,38 +341,14 @@ def demo():
341
  doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
342
  source3_page = gr.Number(label="Page", scale=1)
343
  with gr.Row():
344
- msg = gr.Textbox(placeholder="Type message (e.g. 'What is this document about?')", container=True)
345
  with gr.Row():
346
- submit_btn = gr.Button("Submit message")
347
- clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
348
 
349
- # Preprocessing events
350
- #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
351
- db_btn.click(initialize_database, \
352
- inputs=[document, slider_chunk_size, slider_chunk_overlap], \
353
- outputs=[vector_db, collection_name, db_progress])
354
- qachain_btn.click(initialize_LLM, \
355
- inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
356
- outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
357
- inputs=None, \
358
- outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
359
- queue=False)
360
-
361
- # Chatbot events
362
- msg.submit(conversation, \
363
- inputs=[qa_chain, msg, chatbot], \
364
- outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
365
- queue=False)
366
- submit_btn.click(conversation, \
367
- inputs=[qa_chain, msg, chatbot], \
368
- outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
369
- queue=False)
370
- clear_btn.click(lambda:[None,"",0,"",0,"",0], \
371
- inputs=None, \
372
- outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
373
- queue=False)
374
  demo.queue().launch(debug=True)
375
 
376
-
377
  if __name__ == "__main__":
378
  demo()
 
284
  qa_chain = gr.State()
285
  collection_name = gr.State()
286
 
287
+ # Update the introduction with BookMyDarshan branding
288
  gr.Markdown(
289
+ """<center><h2>BookMyDarshan: Your Personalized Spiritual Assistant</h2></center>
290
+ <h3>Explore Sacred Texts and Enhance Your Spiritual Journey</h3>""")
291
+
292
  gr.Markdown(
293
+ """<b>About BookMyDarshan.in:</b> We are a Hyderabad-based startup dedicated to providing pilgrims with exceptional temple darshan experiences.
294
+ Our platform offers a comprehensive suite of spiritual and religious services, customized to meet your devotional needs.<br><br>
295
+ <b>Note:</b> This spiritual assistant uses state-of-the-art AI to help you explore and understand your uploaded spiritual documents.
296
+ With a blend of technology and tradition, this tool assists in connecting you more deeply with your faith.<br>""")
 
297
 
298
+ with gr.Tab("Step 1 - Upload Your Spiritual Texts"):
299
  with gr.Row():
300
+ document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your spiritual PDFs (e.g., scriptures, pilgrim guides)")
301
+
302
+ with gr.Tab("Step 2 - Process Document"):
 
303
  with gr.Row():
304
+ db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value="ChromaDB", type="index", info="Choose your vector database")
305
  with gr.Accordion("Advanced options - Document text splitter", open=False):
306
  with gr.Row():
307
+ slider_chunk_size = gr.Slider(minimum=100, maximum=1000, value=600, step=20, label="Chunk size", info="Size of text chunks to process", interactive=True)
308
  with gr.Row():
309
+ slider_chunk_overlap = gr.Slider(minimum=10, maximum=200, value=40, step=10, label="Chunk overlap", info="Overlap between text chunks", interactive=True)
310
  with gr.Row():
311
+ db_progress = gr.Textbox(label="Database initialization status", value="None")
312
  with gr.Row():
313
+ db_btn = gr.Button("Generate Spiritual Knowledge Database")
314
+
315
+ with gr.Tab("Step 3 - Set Up Your Assistant"):
316
  with gr.Row():
317
  llm_btn = gr.Radio(list_llm_simple, \
318
+ label="LLM models", value=list_llm_simple[0], type="index", info="Choose an AI model to assist your queries")
319
+ with gr.Accordion("Advanced options - AI model settings", open=False):
320
  with gr.Row():
321
+ slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.1, label="Response Creativity", info="Controls the creativity of the model", interactive=True)
322
  with gr.Row():
323
+ slider_maxtokens = gr.Slider(minimum=224, maximum=4096, value=1024, step=32, label="Max Tokens", info="Maximum response length", interactive=True)
324
  with gr.Row():
325
+ slider_topk = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Top-k Responses", info="Number of alternative answers generated", interactive=True)
326
  with gr.Row():
327
+ llm_progress = gr.Textbox(value="None", label="Assistant Initialization Status")
328
  with gr.Row():
329
+ qachain_btn = gr.Button("Initialize Your Spiritual Assistant")
330
 
331
+ with gr.Tab("Step 4 - Engage with the Assistant"):
332
  chatbot = gr.Chatbot(height=300)
333
+ with gr.Accordion("Advanced - Document References", open=False):
334
  with gr.Row():
335
  doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
336
  source1_page = gr.Number(label="Page", scale=1)
 
341
  doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
342
  source3_page = gr.Number(label="Page", scale=1)
343
  with gr.Row():
344
+ msg = gr.Textbox(placeholder="Type your query (e.g., 'What is this scripture about?')", container=True)
345
  with gr.Row():
346
+ submit_btn = gr.Button("Submit Query")
347
+ clear_btn = gr.ClearButton([msg, chatbot], value="Clear Conversation")
348
 
349
+ # (Event handling and logic connections remain the same)
350
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
  demo.queue().launch(debug=True)
352
 
 
353
  if __name__ == "__main__":
354
  demo()