Pijush2023 commited on
Commit
4a1881c
·
verified ·
1 Parent(s): f14dc92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -22
app.py CHANGED
@@ -389,16 +389,30 @@ chain_neo4j = (
389
  # return "Sorry, I encountered an error while processing your request.", []
390
 
391
 
392
- # # Short Prompt Template for Phi-3.5 Proprietary Model
393
 
394
- # phi_short_template = f"""
395
- # As an expert on Birmingham, Alabama, I will provide concise, accurate, and informative responses to your queries based on 128 token limit . Given the sunny weather today, {current_date}, feel free to ask me anything you need to know about the city.
396
- # Provide only the direct answer to the question without any follow-up questions.
397
- # {{context}}
398
- # Question: {{question}}
399
- # Answer:
400
- # """
 
 
 
401
 
 
 
 
 
 
 
 
 
 
 
 
402
 
403
  import re
404
 
@@ -418,24 +432,20 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
418
  logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
419
 
420
  try:
421
- # Handle hotel-related queries
422
  if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
423
  response = fetch_google_hotels()
424
  return response, extract_addresses(response)
425
 
426
- # Handle restaurant-related queries
427
  if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
428
  response = fetch_yelp_restaurants()
429
  return response, extract_addresses(response)
430
 
431
- # Handle flight-related queries
432
  if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
433
  response = fetch_google_flights()
434
  return response, extract_addresses(response)
435
 
436
  if retrieval_mode == "VDB":
437
  if selected_model == chat_model:
438
- # Use GPT-4o with its vector store and template
439
  retriever = gpt_retriever
440
  prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
441
  context = retriever.get_relevant_documents(message)
@@ -451,28 +461,24 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
451
  return response['result'], extract_addresses(response['result'])
452
 
453
  elif selected_model == phi_pipe:
454
- # Use Phi-3.5 with its vector store and a simplified prompt
455
  retriever = phi_retriever
456
  context = retriever.get_relevant_documents(message)
457
- prompt = f"""
458
- Here is the information :
459
- {context}
460
- {message}
461
- """
462
 
463
  logging.debug(f"Phi-3.5 Prompt: {prompt}")
464
 
465
  response = selected_model(prompt, **{
466
- "max_new_tokens": 128,
467
  "return_full_text": False,
468
- "temperature": 0.5,
469
- "do_sample": False,
470
  })
471
 
472
  if response:
473
  generated_text = response[0]['generated_text']
474
  logging.debug(f"Phi-3.5 Response: {generated_text}")
475
- return generated_text, extract_addresses(generated_text)
 
476
  else:
477
  logging.error("Phi-3.5 did not return any response.")
478
  return "No response generated.", []
 
389
  # return "Sorry, I encountered an error while processing your request.", []
390
 
391
 
392
+ # Short Prompt Template for Phi-3.5 Proprietary Model
393
 
394
+ phi_short_template = f"""
395
+ As an expert on Birmingham, Alabama, I will provide concise, accurate, and informative responses to your queries based on 128 token limit . Given the sunny weather today, {current_date}, feel free to ask me anything you need to know about the city.
396
+ Provide only the direct answer to the question without any follow-up questions.
397
+ {{context}}
398
+ Question: {{question}}
399
+ Answer:
400
+ """
401
+
402
+
403
+ import re
404
 
405
+ def clean_response(response_text):
406
+ # Remove any metadata-like information and focus on the main content
407
+ # Removes "Document(metadata=...)" and other similar patterns
408
+ cleaned_response = re.sub(r'Document\(metadata=.*?\),?\s*', '', response_text, flags=re.DOTALL)
409
+ cleaned_response = re.sub(r'page_content=".*?"\),?', '', cleaned_response, flags=re.DOTALL)
410
+ cleaned_response = re.sub(r'\[.*?\]', '', cleaned_response, flags=re.DOTALL) # Remove content in brackets
411
+ cleaned_response = re.sub(r'\s+', ' ', cleaned_response).strip()
412
+ #Remove any unwanted follow-up questions or unnecessary text
413
+ cleaned_response = re.sub(r'Question:.*\nAnswer:', '', response_text, flags=re.DOTALL).strip()
414
+ return cleaned_response
415
+
416
 
417
  import re
418
 
 
432
  logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
433
 
434
  try:
 
435
  if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
436
  response = fetch_google_hotels()
437
  return response, extract_addresses(response)
438
 
 
439
  if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
440
  response = fetch_yelp_restaurants()
441
  return response, extract_addresses(response)
442
 
 
443
  if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
444
  response = fetch_google_flights()
445
  return response, extract_addresses(response)
446
 
447
  if retrieval_mode == "VDB":
448
  if selected_model == chat_model:
 
449
  retriever = gpt_retriever
450
  prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
451
  context = retriever.get_relevant_documents(message)
 
461
  return response['result'], extract_addresses(response['result'])
462
 
463
  elif selected_model == phi_pipe:
 
464
  retriever = phi_retriever
465
  context = retriever.get_relevant_documents(message)
466
+ prompt = phi_short_template.format(context=context, question=message)
 
 
 
 
467
 
468
  logging.debug(f"Phi-3.5 Prompt: {prompt}")
469
 
470
  response = selected_model(prompt, **{
471
+ "max_new_tokens": 128, # Increased to handle longer responses
472
  "return_full_text": False,
473
+ "temperature": 0.7, # Adjusted to avoid cutting off
474
+ "do_sample": True, # Allow sampling to increase response diversity
475
  })
476
 
477
  if response:
478
  generated_text = response[0]['generated_text']
479
  logging.debug(f"Phi-3.5 Response: {generated_text}")
480
+ cleaned_response = clean_response(generated_text)
481
+ return cleaned_response, extract_addresses(cleaned_response)
482
  else:
483
  logging.error("Phi-3.5 did not return any response.")
484
  return "No response generated.", []