Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -33,7 +33,7 @@ from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
|
33 |
from langchain.agents import Tool, initialize_agent
|
34 |
|
35 |
# Build prompt
|
36 |
-
template1 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska.Based on the current weather condition as {
|
37 |
memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end.If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
|
38 |
Use fifteen sentences maximum. Keep the answer as detailed as possible. Always include the address, time, date, and
|
39 |
event type and description. Always say "It was my pleasure!" at the end of the answer.
|
@@ -41,7 +41,7 @@ event type and description. Always say "It was my pleasure!" at the end of the a
|
|
41 |
Question: {question}
|
42 |
Helpful Answer:"""
|
43 |
|
44 |
-
template2 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska.Based on the current weather condition as {
|
45 |
memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end.If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
|
46 |
Use fifteen sentences maximum. Keep the answer short and sweet crisp.Always say "It was my pleasure!" at the end of the answer.
|
47 |
{context}
|
@@ -94,6 +94,9 @@ def initialize_agent_with_prompt(prompt_template):
|
|
94 |
# Define the function to generate answers
|
95 |
def generate_answer(message, choice):
|
96 |
logging.debug(f"generate_answer called with prompt_choice: {choice}")
|
|
|
|
|
|
|
97 |
if choice == "Details":
|
98 |
agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
|
99 |
elif choice == "Conversational":
|
@@ -101,8 +104,8 @@ def generate_answer(message, choice):
|
|
101 |
else:
|
102 |
logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
|
103 |
agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
|
104 |
-
|
105 |
-
response = agent(message)
|
106 |
return response['output']
|
107 |
|
108 |
def bot(history, choice):
|
@@ -239,7 +242,22 @@ def fetch_local_news():
|
|
239 |
return news_html
|
240 |
else:
|
241 |
return "<p>Failed to fetch local news</p>"
|
242 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
# Function to fetch local events
|
244 |
def fetch_local_events():
|
245 |
api_key = os.environ['SERP_API']
|
@@ -457,8 +475,8 @@ def show_map_if_details(history,choice):
|
|
457 |
if choice in ["Details", "Conversational"]:
|
458 |
return gr.update(visible=True), update_map_with_response(history)
|
459 |
else:
|
460 |
-
|
461 |
-
return gr.update(visible(False), "")
|
462 |
|
463 |
def generate_audio_elevenlabs(text):
|
464 |
XI_API_KEY = os.environ['ELEVENLABS_API']
|
|
|
33 |
from langchain.agents import Tool, initialize_agent
|
34 |
|
35 |
# Build prompt
|
36 |
+
template1 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska.Based on the current weather condition as {today_weather} and current date as {current_date} , Use the following pieces of context,
|
37 |
memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end.If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
|
38 |
Use fifteen sentences maximum. Keep the answer as detailed as possible. Always include the address, time, date, and
|
39 |
event type and description. Always say "It was my pleasure!" at the end of the answer.
|
|
|
41 |
Question: {question}
|
42 |
Helpful Answer:"""
|
43 |
|
44 |
+
template2 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska.Based on the current weather condition as {today_weather} and current date as {current_date} , Use the following pieces of context,
|
45 |
memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end.If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
|
46 |
Use fifteen sentences maximum. Keep the answer short and sweet crisp.Always say "It was my pleasure!" at the end of the answer.
|
47 |
{context}
|
|
|
94 |
# Define the function to generate answers
|
95 |
def generate_answer(message, choice):
|
96 |
logging.debug(f"generate_answer called with prompt_choice: {choice}")
|
97 |
+
current_date = "17th June 2024"
|
98 |
+
today_weather = get_current_weather()
|
99 |
+
|
100 |
if choice == "Details":
|
101 |
agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
|
102 |
elif choice == "Conversational":
|
|
|
104 |
else:
|
105 |
logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
|
106 |
agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
|
107 |
+
response = agent(message, today_weather=today_weather, current_date=current_date)
|
108 |
+
#response = agent(message)
|
109 |
return response['output']
|
110 |
|
111 |
def bot(history, choice):
|
|
|
242 |
return news_html
|
243 |
else:
|
244 |
return "<p>Failed to fetch local news</p>"
|
245 |
+
# Function to get the current weather
|
246 |
+
def get_current_weather():
|
247 |
+
try:
|
248 |
+
api_key = os.environ['WEATHER_API']
|
249 |
+
url = f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/omaha?unitGroup=metric&include=events%2Calerts%2Chours%2Cdays%2Ccurrent&key={api_key}'
|
250 |
+
response = requests.get(url)
|
251 |
+
response.raise_for_status()
|
252 |
+
jsonData = response.json()
|
253 |
+
current_conditions = jsonData.get("currentConditions", {})
|
254 |
+
temp = current_conditions.get("temp", "N/A")
|
255 |
+
condition = current_conditions.get("conditions", "N/A")
|
256 |
+
return f"{temp}°C and {condition}"
|
257 |
+
except requests.exceptions.RequestException as e:
|
258 |
+
logging.error(f"Failed to fetch local weather: {e}")
|
259 |
+
return "N/A"
|
260 |
+
|
261 |
# Function to fetch local events
|
262 |
def fetch_local_events():
|
263 |
api_key = os.environ['SERP_API']
|
|
|
475 |
if choice in ["Details", "Conversational"]:
|
476 |
return gr.update(visible=True), update_map_with_response(history)
|
477 |
else:
|
478 |
+
return gr.update(visible=False), ""
|
479 |
+
#return gr.update(visible(False), "")
|
480 |
|
481 |
def generate_audio_elevenlabs(text):
|
482 |
XI_API_KEY = os.environ['ELEVENLABS_API']
|