Shreyas094 commited on
Commit
7038b6e
·
verified ·
1 Parent(s): 5593d08

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -60
app.py CHANGED
@@ -88,7 +88,7 @@ def update_vectors(files, parser):
88
 
89
  return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}."
90
 
91
- def generate_chunked_response(prompt, model, max_tokens=8000, num_calls=3, temperature=0.2, should_stop=False):
92
  print(f"Starting generate_chunked_response with {num_calls} calls")
93
  full_response = ""
94
  messages = [{"role": "user", "content": prompt}]
@@ -188,14 +188,14 @@ class CitingSources(BaseModel):
188
  ...,
189
  description="List of sources to cite. Should be an URL of the source."
190
  )
191
-
192
- def chatbot_interface(message, history, use_web_search, model, temperature, num_calls, is_interrupted=False, partial_response=""):
193
  if not message.strip():
194
  return "", history
195
 
196
  history = history + [(message, "")]
 
197
  try:
198
- for response in respond(message, history, model, temperature, num_calls, use_web_search, is_interrupted, partial_response):
199
  history[-1] = (message, response)
200
  yield history
201
  except gr.CancelledError:
@@ -214,7 +214,7 @@ def retry_last_response(history, use_web_search, model, temperature, num_calls):
214
 
215
  return chatbot_interface(last_user_msg, history, use_web_search, model, temperature, num_calls)
216
 
217
- def respond(message, history, model, temperature, num_calls, use_web_search, is_interrupted=False, partial_response=""):
218
  logging.info(f"User Query: {message}")
219
  logging.info(f"Model Used: {model}")
220
  logging.info(f"Search Type: {'Web Search' if use_web_search else 'PDF Search'}")
@@ -223,11 +223,9 @@ def respond(message, history, model, temperature, num_calls, use_web_search, is_
223
  if use_web_search:
224
  for main_content, sources in get_response_with_search(message, model, num_calls=num_calls, temperature=temperature):
225
  response = f"{main_content}\n\n{sources}"
226
- if is_interrupted:
227
- partial_response += response
228
- yield partial_response
229
- else:
230
- yield response
231
  else:
232
  embed = get_embeddings()
233
  if os.path.exists("faiss_database"):
@@ -237,21 +235,19 @@ def respond(message, history, model, temperature, num_calls, use_web_search, is_
237
  context_str = "\n".join([doc.page_content for doc in relevant_docs])
238
  else:
239
  context_str = "No documents available."
240
-
241
  if model == "@cf/meta/llama-3.1-8b-instruct":
242
- for response in get_response_from_cloudflare(prompt="", context=context_str, query=message, num_calls=num_calls, temperature=temperature, search_type="pdf"):
243
- if is_interrupted:
244
- partial_response += response
245
- yield partial_response
246
- else:
247
- yield response
248
  else:
249
- for response in get_response_from_pdf(message, model, num_calls=num_calls, temperature=temperature):
250
- if is_interrupted:
251
- partial_response += response
252
- yield partial_response
253
- else:
254
- yield response
255
  except Exception as e:
256
  logging.error(f"Error with {model}: {str(e)}")
257
  if "microsoft/Phi-3-mini-4k-instruct" in model:
@@ -260,8 +256,6 @@ def respond(message, history, model, temperature, num_calls, use_web_search, is_
260
  yield from respond(message, history, fallback_model, temperature, num_calls, use_web_search)
261
  else:
262
  yield f"An error occurred with the {model} model: {str(e)}. Please try again or select a different model."
263
- finally:
264
- pass
265
 
266
  logging.basicConfig(level=logging.DEBUG)
267
 
@@ -401,14 +395,12 @@ css = """
401
  use_web_search = gr.Checkbox(label="Use Web Search", value=False)
402
 
403
  demo = gr.ChatInterface(
404
- chatbot_interface,
405
  additional_inputs=[
406
  gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0]),
407
  gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
408
  gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
409
- use_web_search,
410
- gr.Checkbox(value=False, label="Is Interrupted"), # Flag to indicate interruption
411
- gr.Textbox(label="Partial Response"), # Store partial response
412
  ],
413
  title="AI-powered Web Search and PDF Chat Assistant",
414
  description="Chat with your PDFs or use web search to answer questions.",
@@ -430,41 +422,12 @@ demo = gr.ChatInterface(
430
  color_accent_soft_dark="transparent",
431
  code_background_fill_dark="#140b0b"
432
  ),
 
433
  css=css,
434
  examples=[
435
  ["Tell me about the contents of the uploaded PDFs."],
436
  ["What are the main topics discussed in the documents?"],
437
- ["Can you summarize the key points from the PDFs?"],
438
- ],
439
- cache_examples=False,
440
- analytics_enabled=False,
441
- )
442
-
443
- continue_button = gr.Button("Continue Generation")
444
- continue_output = gr.Textbox(label="Response")
445
-
446
- message_input = gr.Textbox(label="Message")
447
-
448
- history = gr.Textbox(label="History", lines=10, visible=False)
449
-
450
- continue_button.click(
451
- chatbot_interface,
452
- inputs=[
453
- message_input,
454
- history,
455
- use_web_search,
456
- model_dropdown,
457
- temperature_slider,
458
- num_calls_slider,
459
- gr.Boolean(value=True, label="Is Interrupted"), # Set is_interrupted to True
460
- gr.Textbox(label="Partial Response"),
461
- ],
462
- outputs=continue_output, # Add outputs parameter
463
- css=css,
464
- examples=[
465
- ["Tell me about the contents of the uploaded PDFs."],
466
- ["What are the main topics discussed in the documents?"],
467
- ["Can you summarize the key points from the PDFs?"],
468
  ],
469
  cache_examples=False,
470
  analytics_enabled=False,
 
88
 
89
  return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}."
90
 
91
+ def generate_chunked_response(prompt, model, max_tokens=1000, num_calls=3, temperature=0.2, should_stop=False):
92
  print(f"Starting generate_chunked_response with {num_calls} calls")
93
  full_response = ""
94
  messages = [{"role": "user", "content": prompt}]
 
188
  ...,
189
  description="List of sources to cite. Should be an URL of the source."
190
  )
191
+ def chatbot_interface(message, history, use_web_search, model, temperature, num_calls):
 
192
  if not message.strip():
193
  return "", history
194
 
195
  history = history + [(message, "")]
196
+
197
  try:
198
+ for response in respond(message, history, model, temperature, num_calls, use_web_search):
199
  history[-1] = (message, response)
200
  yield history
201
  except gr.CancelledError:
 
214
 
215
  return chatbot_interface(last_user_msg, history, use_web_search, model, temperature, num_calls)
216
 
217
+ def respond(message, history, model, temperature, num_calls, use_web_search):
218
  logging.info(f"User Query: {message}")
219
  logging.info(f"Model Used: {model}")
220
  logging.info(f"Search Type: {'Web Search' if use_web_search else 'PDF Search'}")
 
223
  if use_web_search:
224
  for main_content, sources in get_response_with_search(message, model, num_calls=num_calls, temperature=temperature):
225
  response = f"{main_content}\n\n{sources}"
226
+ first_line = response.split('\n')[0] if response else ''
227
+ logging.info(f"Generated Response (first line): {first_line}")
228
+ yield response
 
 
229
  else:
230
  embed = get_embeddings()
231
  if os.path.exists("faiss_database"):
 
235
  context_str = "\n".join([doc.page_content for doc in relevant_docs])
236
  else:
237
  context_str = "No documents available."
238
+
239
  if model == "@cf/meta/llama-3.1-8b-instruct":
240
+ # Use Cloudflare API
241
+ for partial_response in get_response_from_cloudflare(prompt="", context=context_str, query=message, num_calls=num_calls, temperature=temperature, search_type="pdf"):
242
+ first_line = partial_response.split('\n')[0] if partial_response else ''
243
+ logging.info(f"Generated Response (first line): {first_line}")
244
+ yield partial_response
 
245
  else:
246
+ # Use Hugging Face API
247
+ for partial_response in get_response_from_pdf(message, model, num_calls=num_calls, temperature=temperature):
248
+ first_line = partial_response.split('\n')[0] if partial_response else ''
249
+ logging.info(f"Generated Response (first line): {first_line}")
250
+ yield partial_response
 
251
  except Exception as e:
252
  logging.error(f"Error with {model}: {str(e)}")
253
  if "microsoft/Phi-3-mini-4k-instruct" in model:
 
256
  yield from respond(message, history, fallback_model, temperature, num_calls, use_web_search)
257
  else:
258
  yield f"An error occurred with the {model} model: {str(e)}. Please try again or select a different model."
 
 
259
 
260
  logging.basicConfig(level=logging.DEBUG)
261
 
 
395
  use_web_search = gr.Checkbox(label="Use Web Search", value=False)
396
 
397
  demo = gr.ChatInterface(
398
+ respond,
399
  additional_inputs=[
400
  gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0]),
401
  gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
402
  gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
403
+ use_web_search # Add this line to include the checkbox
 
 
404
  ],
405
  title="AI-powered Web Search and PDF Chat Assistant",
406
  description="Chat with your PDFs or use web search to answer questions.",
 
422
  color_accent_soft_dark="transparent",
423
  code_background_fill_dark="#140b0b"
424
  ),
425
+
426
  css=css,
427
  examples=[
428
  ["Tell me about the contents of the uploaded PDFs."],
429
  ["What are the main topics discussed in the documents?"],
430
+ ["Can you summarize the key points from the PDFs?"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431
  ],
432
  cache_examples=False,
433
  analytics_enabled=False,