Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -391,27 +391,24 @@ chain_neo4j = (
|
|
| 391 |
|
| 392 |
# Short Prompt Template for Phi-3.5 Proprietary Model
|
| 393 |
|
| 394 |
-
phi_short_template = f"""
|
| 395 |
-
As an expert on Birmingham, Alabama, I will provide concise, accurate, and informative responses to your queries based on 128 token limit . Given the sunny weather today, {current_date}, feel free to ask me anything you need to know about the city.
|
| 396 |
-
Provide only the direct answer to the question without any follow-up questions.
|
| 397 |
-
{{context}}
|
| 398 |
-
Question: {{question}}
|
| 399 |
-
Answer:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 400 |
"""
|
| 401 |
|
| 402 |
|
| 403 |
-
import re
|
| 404 |
-
|
| 405 |
-
def clean_response(response_text):
|
| 406 |
-
# Remove any metadata-like information and focus on the main content
|
| 407 |
-
# Removes "Document(metadata=...)" and other similar patterns
|
| 408 |
-
cleaned_response = re.sub(r'Document\(metadata=.*?\),?\s*', '', response_text, flags=re.DOTALL)
|
| 409 |
-
cleaned_response = re.sub(r'page_content=".*?"\),?', '', cleaned_response, flags=re.DOTALL)
|
| 410 |
-
cleaned_response = re.sub(r'\[.*?\]', '', cleaned_response, flags=re.DOTALL) # Remove content in brackets
|
| 411 |
-
cleaned_response = re.sub(r'\s+', ' ', cleaned_response).strip()
|
| 412 |
-
#Remove any unwanted follow-up questions or unnecessary text
|
| 413 |
-
cleaned_response = re.sub(r'Question:.*\nAnswer:', '', response_text, flags=re.DOTALL).strip()
|
| 414 |
-
return cleaned_response
|
| 415 |
|
| 416 |
|
| 417 |
import re
|
|
@@ -495,6 +492,78 @@ def clean_response(response_text):
|
|
| 495 |
|
| 496 |
|
| 497 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 498 |
def generate_answer(message, choice, retrieval_mode, selected_model):
|
| 499 |
logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
|
| 500 |
|
|
@@ -535,7 +604,8 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
|
|
| 535 |
context_documents = retriever.get_relevant_documents(message)
|
| 536 |
context = "\n".join([doc.page_content for doc in context_documents])
|
| 537 |
|
| 538 |
-
|
|
|
|
| 539 |
|
| 540 |
logging.debug(f"Phi-3.5 Prompt: {prompt}")
|
| 541 |
|
|
@@ -570,8 +640,6 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
|
|
| 570 |
|
| 571 |
|
| 572 |
|
| 573 |
-
|
| 574 |
-
|
| 575 |
def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
| 576 |
if not history:
|
| 577 |
return history
|
|
|
|
| 391 |
|
| 392 |
# Short Prompt Template for Phi-3.5 Proprietary Model
|
| 393 |
|
| 394 |
+
# phi_short_template = f"""
|
| 395 |
+
# As an expert on Birmingham, Alabama, I will provide concise, accurate, and informative responses to your queries based on 128 token limit . Given the sunny weather today, {current_date}, feel free to ask me anything you need to know about the city.
|
| 396 |
+
# Provide only the direct answer to the question without any follow-up questions.
|
| 397 |
+
# {{context}}
|
| 398 |
+
# Question: {{question}}
|
| 399 |
+
# Answer:
|
| 400 |
+
# """
|
| 401 |
+
|
| 402 |
+
phi_custom_template = """
|
| 403 |
+
<|system|>
|
| 404 |
+
You are a helpful assistant.<|end|>
|
| 405 |
+
<|user|>
|
| 406 |
+
Context: {context}
|
| 407 |
+
Question: {question}<|end|>
|
| 408 |
+
<|assistant|>
|
| 409 |
"""
|
| 410 |
|
| 411 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 412 |
|
| 413 |
|
| 414 |
import re
|
|
|
|
| 492 |
|
| 493 |
|
| 494 |
|
| 495 |
+
# def generate_answer(message, choice, retrieval_mode, selected_model):
|
| 496 |
+
# logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
|
| 497 |
+
|
| 498 |
+
# try:
|
| 499 |
+
# # Handle hotel-related queries
|
| 500 |
+
# if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
|
| 501 |
+
# response = fetch_google_hotels()
|
| 502 |
+
# return response, extract_addresses(response)
|
| 503 |
+
|
| 504 |
+
# # Handle restaurant-related queries
|
| 505 |
+
# if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
|
| 506 |
+
# response = fetch_yelp_restaurants()
|
| 507 |
+
# return response, extract_addresses(response)
|
| 508 |
+
|
| 509 |
+
# # Handle flight-related queries
|
| 510 |
+
# if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
|
| 511 |
+
# response = fetch_google_flights()
|
| 512 |
+
# return response, extract_addresses(response)
|
| 513 |
+
|
| 514 |
+
# if retrieval_mode == "VDB":
|
| 515 |
+
# if selected_model == chat_model:
|
| 516 |
+
# retriever = gpt_retriever
|
| 517 |
+
# prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
|
| 518 |
+
# context = retriever.get_relevant_documents(message)
|
| 519 |
+
# prompt = prompt_template.format(context=context, question=message)
|
| 520 |
+
|
| 521 |
+
# qa_chain = RetrievalQA.from_chain_type(
|
| 522 |
+
# llm=chat_model,
|
| 523 |
+
# chain_type="stuff",
|
| 524 |
+
# retriever=retriever,
|
| 525 |
+
# chain_type_kwargs={"prompt": prompt_template}
|
| 526 |
+
# )
|
| 527 |
+
# response = qa_chain({"query": message})
|
| 528 |
+
# return response['result'], extract_addresses(response['result'])
|
| 529 |
+
|
| 530 |
+
# elif selected_model == phi_pipe:
|
| 531 |
+
# retriever = phi_retriever
|
| 532 |
+
# context_documents = retriever.get_relevant_documents(message)
|
| 533 |
+
# context = "\n".join([doc.page_content for doc in context_documents])
|
| 534 |
+
|
| 535 |
+
# prompt = phi_short_template.format(context=context, question=message)
|
| 536 |
+
|
| 537 |
+
# logging.debug(f"Phi-3.5 Prompt: {prompt}")
|
| 538 |
+
|
| 539 |
+
# response = selected_model(prompt, **{
|
| 540 |
+
# "max_new_tokens": 160, # Increased to handle longer responses
|
| 541 |
+
# "return_full_text": True,
|
| 542 |
+
# "temperature": 0.7, # Adjusted to avoid cutting off
|
| 543 |
+
# "do_sample": True, # Allow sampling to increase response diversity
|
| 544 |
+
# })
|
| 545 |
+
|
| 546 |
+
# if response:
|
| 547 |
+
# generated_text = response[0]['generated_text']
|
| 548 |
+
# logging.debug(f"Phi-3.5 Response: {generated_text}")
|
| 549 |
+
# cleaned_response = clean_response(generated_text)
|
| 550 |
+
# return cleaned_response, extract_addresses(cleaned_response)
|
| 551 |
+
# else:
|
| 552 |
+
# logging.error("Phi-3.5 did not return any response.")
|
| 553 |
+
# return "No response generated.", []
|
| 554 |
+
|
| 555 |
+
# elif retrieval_mode == "KGF":
|
| 556 |
+
# response = chain_neo4j.invoke({"question": message})
|
| 557 |
+
# return response, extract_addresses(response)
|
| 558 |
+
# else:
|
| 559 |
+
# return "Invalid retrieval mode selected.", []
|
| 560 |
+
|
| 561 |
+
# except Exception as e:
|
| 562 |
+
# logging.error(f"Error in generate_answer: {e}")
|
| 563 |
+
# return "Sorry, I encountered an error while processing your request.", []
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
|
| 567 |
def generate_answer(message, choice, retrieval_mode, selected_model):
|
| 568 |
logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
|
| 569 |
|
|
|
|
| 604 |
context_documents = retriever.get_relevant_documents(message)
|
| 605 |
context = "\n".join([doc.page_content for doc in context_documents])
|
| 606 |
|
| 607 |
+
# Integrating the custom context and question into the base prompt template
|
| 608 |
+
prompt = phi_base_template.format(context=context, question=message)
|
| 609 |
|
| 610 |
logging.debug(f"Phi-3.5 Prompt: {prompt}")
|
| 611 |
|
|
|
|
| 640 |
|
| 641 |
|
| 642 |
|
|
|
|
|
|
|
| 643 |
def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
| 644 |
if not history:
|
| 645 |
return history
|