Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -33,7 +33,7 @@ from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
|
33 |
from langchain.agents import Tool, initialize_agent
|
34 |
|
35 |
# Build prompt
|
36 |
-
template1 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska.Based on todays weather is sunny bright day and
|
37 |
memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end.If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
|
38 |
Use fifteen sentences maximum. Keep the answer as detailed as possible. Always include the address, time, date, and
|
39 |
event type and description. Always say "It was my pleasure!" at the end of the answer.
|
@@ -41,7 +41,7 @@ event type and description. Always say "It was my pleasure!" at the end of the a
|
|
41 |
Question: {question}
|
42 |
Helpful Answer:"""
|
43 |
|
44 |
-
template2 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska.Based on todays weather is sunny bright day and
|
45 |
memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end.If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
|
46 |
Use fifteen sentences maximum. Keep the answer short and sweet crisp.Always say "It was my pleasure!" at the end of the answer.
|
47 |
{context}
|
@@ -94,8 +94,6 @@ def initialize_agent_with_prompt(prompt_template):
|
|
94 |
# Define the function to generate answers
|
95 |
def generate_answer(message, choice):
|
96 |
logging.debug(f"generate_answer called with prompt_choice: {choice}")
|
97 |
-
current_date = "17th June 2024"
|
98 |
-
today_weather = get_current_weather()
|
99 |
|
100 |
if choice == "Details":
|
101 |
agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
|
@@ -104,8 +102,7 @@ def generate_answer(message, choice):
|
|
104 |
else:
|
105 |
logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
|
106 |
agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
|
107 |
-
response = agent(message
|
108 |
-
#response = agent(message)
|
109 |
return response['output']
|
110 |
|
111 |
def bot(history, choice):
|
|
|
33 |
from langchain.agents import Tool, initialize_agent
|
34 |
|
35 |
# Build prompt
|
36 |
+
template1 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska.Based on todays weather is sunny bright day and date is 17th june 2024 , Use the following pieces of context,
|
37 |
memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end.If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
|
38 |
Use fifteen sentences maximum. Keep the answer as detailed as possible. Always include the address, time, date, and
|
39 |
event type and description. Always say "It was my pleasure!" at the end of the answer.
|
|
|
41 |
Question: {question}
|
42 |
Helpful Answer:"""
|
43 |
|
44 |
+
template2 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska.Based on todays weather is sunny bright day and date is 17th june 2024 , Use the following pieces of context,
|
45 |
memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end.If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
|
46 |
Use fifteen sentences maximum. Keep the answer short and sweet crisp.Always say "It was my pleasure!" at the end of the answer.
|
47 |
{context}
|
|
|
94 |
# Define the function to generate answers
|
95 |
def generate_answer(message, choice):
|
96 |
logging.debug(f"generate_answer called with prompt_choice: {choice}")
|
|
|
|
|
97 |
|
98 |
if choice == "Details":
|
99 |
agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
|
|
|
102 |
else:
|
103 |
logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
|
104 |
agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
|
105 |
+
response = agent(message)
|
|
|
106 |
return response['output']
|
107 |
|
108 |
def bot(history, choice):
|