yogesh69 commited on
Commit
ed78e95
·
verified ·
1 Parent(s): 089a548

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -17
app.py CHANGED
@@ -278,7 +278,7 @@ def upload_file(file_obj):
278
  return list_file_path
279
 
280
 
281
- import gradio as gr
282
 
283
  def demo():
284
  with gr.Blocks(theme="base") as demo:
@@ -287,15 +287,14 @@ def demo():
287
  collection_name = gr.State()
288
 
289
  gr.Markdown(
290
- """<center><h2>PDF-based Chatbot</h2></center>
291
- <h3>Ask any questions about your PDF documents</h3>"""
292
- )
293
  gr.Markdown(
294
- """<b>Note:</b> This AI assistant uses LangChain and open-source LLMs for retrieval-augmented generation (RAG) from your PDF documents.
295
- The UI shows multiple steps to help you understand the RAG workflow. This chatbot considers past questions when generating answers (via conversational memory)
296
- and includes document references for clarity.<br>
297
- <br><b>Warning:</b> This demo uses free CPU Basic hardware from Hugging Face, so some steps may take time."""
298
- )
299
 
300
  with gr.Tab("Step 1: Upload PDF"):
301
  document = gr.Files(label="Upload your PDF documents", file_count="multiple", file_types=["pdf"], interactive=True)
@@ -308,14 +307,22 @@ def demo():
308
  db_progress = gr.Textbox(label="Vector Database Initialization Status", value="None", interactive=False)
309
  generate_db_btn = gr.Button("Generate Vector Database")
310
 
311
- with gr.Tab("Step 3: Initialize QA Chain"):
312
- llm_btn = gr.Radio(["LLM Model 1", "LLM Model 2"], label="Select LLM Model", value="LLM Model 1", info="Choose the LLM model")
313
- with gr.Accordion("Advanced Options: LLM Model", open=False):
314
- slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.1, label="Temperature", info="Adjust the model's creativity level")
315
- slider_maxtokens = gr.Slider(minimum=224, maximum=4096, value=1024, step=32, label="Max Tokens", info="Set the maximum number of tokens")
316
- slider_topk = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Top-K Samples", info="Select the number of top-k samples")
317
- llm_progress = gr.Textbox(label="QA Chain Initialization Status", value="None", interactive=False)
318
- qachain_btn = gr.Button("Initialize QA Chain")
 
 
 
 
 
 
 
 
319
 
320
  with gr.Tab("Step 4: Chatbot"):
321
  chatbot = gr.Chatbot(label="Chat with your PDF", height=300)
 
278
  return list_file_path
279
 
280
 
281
+
282
 
283
  def demo():
284
  with gr.Blocks(theme="base") as demo:
 
287
  collection_name = gr.State()
288
 
289
  gr.Markdown(
290
+ """<center><h2>BookMyDarshan: Your Personalized Spiritual Assistant</h2></center>
291
+ <h3>Explore Sacred Texts and Enhance Your Spiritual Journey</h3>""")
292
+
293
  gr.Markdown(
294
+ """<b>About BookMyDarshan.in:</b> We are a Hyderabad-based startup dedicated to providing pilgrims with exceptional temple darshan experiences.
295
+ Our platform offers a comprehensive suite of spiritual and religious services, customized to meet your devotional needs.<br><br>
296
+ <b>Note:</b> This spiritual assistant uses state-of-the-art AI to help you explore and understand your uploaded spiritual documents.
297
+ With a blend of technology and tradition, this tool assists in connecting you more deeply with your faith.<br>""")
 
298
 
299
  with gr.Tab("Step 1: Upload PDF"):
300
  document = gr.Files(label="Upload your PDF documents", file_count="multiple", file_types=["pdf"], interactive=True)
 
307
  db_progress = gr.Textbox(label="Vector Database Initialization Status", value="None", interactive=False)
308
  generate_db_btn = gr.Button("Generate Vector Database")
309
 
310
+ with gr.Tab("Step 3 - Initialize QA chain"):
311
+ with gr.Row():
312
+ llm_btn = gr.Radio(list_llm_simple, \
313
+ label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
314
+ with gr.Accordion("Advanced options - LLM model", open=False):
315
+ with gr.Row():
316
+ slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
317
+ with gr.Row():
318
+ slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
319
+ with gr.Row():
320
+ slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
321
+ with gr.Row():
322
+ llm_progress = gr.Textbox(value="None",label="QA chain initialization")
323
+ with gr.Row():
324
+ qachain_btn = gr.Button("Initialize Question Answering chain")
325
+
326
 
327
  with gr.Tab("Step 4: Chatbot"):
328
  chatbot = gr.Chatbot(label="Chat with your PDF", height=300)