Pijush2023 commited on
Commit
bb26c86
·
verified ·
1 Parent(s): 1949723

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -113
app.py CHANGED
@@ -402,87 +402,6 @@ Answer:
402
  """
403
 
404
 
405
- # import re
406
-
407
- # def clean_response(response_text):
408
- # # Remove any metadata-like information and focus on the main content
409
- # # Removes "Document(metadata=...)" and other similar patterns
410
- # cleaned_response = re.sub(r'Document\(metadata=.*?\),?\s*', '', response_text, flags=re.DOTALL)
411
- # cleaned_response = re.sub(r'page_content=".*?"\),?', '', cleaned_response, flags=re.DOTALL)
412
- # cleaned_response = re.sub(r'\[.*?\]', '', cleaned_response, flags=re.DOTALL) # Remove content in brackets
413
- # cleaned_response = re.sub(r'\s+', ' ', cleaned_response).strip()
414
- # #Remove any unwanted follow-up questions or unnecessary text
415
- # cleaned_response = re.sub(r'Question:.*\nAnswer:', '', response_text, flags=re.DOTALL).strip()
416
- # return cleaned_response
417
-
418
-
419
- # def generate_answer(message, choice, retrieval_mode, selected_model):
420
- # logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
421
-
422
- # try:
423
- # if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
424
- # response = fetch_google_hotels()
425
- # return response, extract_addresses(response)
426
-
427
- # if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
428
- # response = fetch_yelp_restaurants()
429
- # return response, extract_addresses(response)
430
-
431
- # if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
432
- # response = fetch_google_flights()
433
- # return response, extract_addresses(response)
434
-
435
- # if retrieval_mode == "VDB":
436
- # if selected_model == chat_model:
437
- # retriever = gpt_retriever
438
- # prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
439
- # context = retriever.get_relevant_documents(message)
440
- # prompt = prompt_template.format(context=context, question=message)
441
-
442
- # qa_chain = RetrievalQA.from_chain_type(
443
- # llm=chat_model,
444
- # chain_type="stuff",
445
- # retriever=retriever,
446
- # chain_type_kwargs={"prompt": prompt_template}
447
- # )
448
- # response = qa_chain({"query": message})
449
- # return response['result'], extract_addresses(response['result'])
450
-
451
- # elif selected_model == phi_pipe:
452
- # retriever = phi_retriever
453
- # context = retriever.get_relevant_documents(message)
454
- # prompt = phi_short_template.format(context=context, question=message)
455
-
456
- # logging.debug(f"Phi-3.5 Prompt: {prompt}")
457
-
458
- # response = selected_model(prompt, **{
459
- # "max_new_tokens": 128, # Increased to handle longer responses
460
- # "return_full_text": False,
461
- # "temperature": 0.7, # Adjusted to avoid cutting off
462
- # "do_sample": True, # Allow sampling to increase response diversity
463
- # })
464
-
465
- # if response:
466
- # generated_text = response[0]['generated_text']
467
- # logging.debug(f"Phi-3.5 Response: {generated_text}")
468
- # cleaned_response = clean_response(generated_text)
469
- # return cleaned_response, extract_addresses(cleaned_response)
470
- # else:
471
- # logging.error("Phi-3.5 did not return any response.")
472
- # return "No response generated.", []
473
-
474
- # elif retrieval_mode == "KGF":
475
- # response = chain_neo4j.invoke({"question": message})
476
- # return response, extract_addresses(response)
477
- # else:
478
- # return "Invalid retrieval mode selected.", []
479
-
480
- # except Exception as e:
481
- # logging.error(f"Error in generate_answer: {e}")
482
- # return "Sorry, I encountered an error while processing your request.", []
483
-
484
-
485
-
486
  import re
487
 
488
  def clean_response(response_text):
@@ -492,9 +411,10 @@ def clean_response(response_text):
492
  cleaned_response = re.sub(r'page_content=".*?"\),?', '', cleaned_response, flags=re.DOTALL)
493
  cleaned_response = re.sub(r'\[.*?\]', '', cleaned_response, flags=re.DOTALL) # Remove content in brackets
494
  cleaned_response = re.sub(r'\s+', ' ', cleaned_response).strip()
495
- # Remove any unwanted follow-up questions or unnecessary text
496
- cleaned_response = re.sub(r'Question:.*\nAnswer:', '', cleaned_response, flags=re.DOTALL).strip()
497
  return cleaned_response
 
498
 
499
  def generate_answer(message, choice, retrieval_mode, selected_model):
500
  logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
@@ -535,36 +455,21 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
535
 
536
  logging.debug(f"Phi-3.5 Prompt: {prompt}")
537
 
538
- # Initialize variables for complete response generation
539
- total_response = ""
540
- stop_condition = False
541
-
542
- while not stop_condition:
543
- response = selected_model(prompt, **{
544
- "max_new_tokens": 128, # Generate 128 tokens at a time
545
- "return_full_text": False,
546
- "temperature": 0.7, # Adjusted to avoid cutting off
547
- "do_sample": True, # Allow sampling to increase response diversity
548
- })
549
-
550
- if response:
551
- generated_text = response[0]['generated_text']
552
- total_response += generated_text
553
- logging.debug(f"Phi-3.5 Partial Response: {generated_text}")
554
-
555
- # Check if the response seems complete or if we've hit the maximum length
556
- if len(generated_text.strip()) < 128: # Assuming shorter output indicates completion
557
- stop_condition = True
558
- else:
559
- # Update the prompt with the new context to continue generating
560
- prompt = generated_text.strip()
561
-
562
- else:
563
- logging.error("Phi-3.5 did not return any response.")
564
- return "No response generated.", []
565
-
566
- cleaned_response = clean_response(total_response)
567
- return cleaned_response, extract_addresses(cleaned_response)
568
 
569
  elif retrieval_mode == "KGF":
570
  response = chain_neo4j.invoke({"question": message})
@@ -582,6 +487,9 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
582
 
583
 
584
 
 
 
 
585
  def bot(history, choice, tts_choice, retrieval_mode, model_choice):
586
  if not history:
587
  return history
 
402
  """
403
 
404
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
  import re
406
 
407
  def clean_response(response_text):
 
411
  cleaned_response = re.sub(r'page_content=".*?"\),?', '', cleaned_response, flags=re.DOTALL)
412
  cleaned_response = re.sub(r'\[.*?\]', '', cleaned_response, flags=re.DOTALL) # Remove content in brackets
413
  cleaned_response = re.sub(r'\s+', ' ', cleaned_response).strip()
414
+ #Remove any unwanted follow-up questions or unnecessary text
415
+ cleaned_response = re.sub(r'Question:.*\nAnswer:', '', response_text, flags=re.DOTALL).strip()
416
  return cleaned_response
417
+
418
 
419
  def generate_answer(message, choice, retrieval_mode, selected_model):
420
  logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
 
455
 
456
  logging.debug(f"Phi-3.5 Prompt: {prompt}")
457
 
458
+ response = selected_model(prompt, **{
459
+ "max_new_tokens": 128, # Increased to handle longer responses
460
+ "return_full_text": False,
461
+ "temperature": 0.7, # Adjusted to avoid cutting off
462
+ "do_sample": True, # Allow sampling to increase response diversity
463
+ })
464
+
465
+ if response:
466
+ generated_text = response[0]['generated_text']
467
+ logging.debug(f"Phi-3.5 Response: {generated_text}")
468
+ cleaned_response = clean_response(generated_text)
469
+ return cleaned_response, extract_addresses(cleaned_response)
470
+ else:
471
+ logging.error("Phi-3.5 did not return any response.")
472
+ return "No response generated.", []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
 
474
  elif retrieval_mode == "KGF":
475
  response = chain_neo4j.invoke({"question": message})
 
487
 
488
 
489
 
490
+
491
+
492
+
493
  def bot(history, choice, tts_choice, retrieval_mode, model_choice):
494
  if not history:
495
  return history