Pijush2023 commited on
Commit
c8f1081
·
verified ·
1 Parent(s): a0b0f95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +297 -207
app.py CHANGED
@@ -32,10 +32,19 @@ from pathlib import Path
32
  import torchaudio
33
  import numpy as np
34
 
35
- PYTORCH_USE_CUDA_DSA = 1
36
- CUDA_LAUNCH_BLOCKING = 1
37
-
38
- # Check if the token is already set in the environment variables
 
 
 
 
 
 
 
 
 
39
  hf_token = os.getenv("HF_TOKEN")
40
  if hf_token is None:
41
  print("Please set your Hugging Face token in the environment variables.")
@@ -45,7 +54,6 @@ else:
45
  logging.basicConfig(level=logging.DEBUG)
46
 
47
  embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
48
-
49
  from pinecone import Pinecone
50
  pc = Pinecone(api_key=os.environ['PINECONE_API_KEY'])
51
 
@@ -61,167 +69,12 @@ conversational_memory = ConversationBufferWindowMemory(
61
  return_messages=True
62
  )
63
 
64
- # def get_current_time_and_date():
65
- # now = datetime.now()
66
- # return now.strftime("%Y-%m-%d %H:%M:%S")
67
-
68
- # current_time_and_date = get_current_time_and_date()
69
-
70
  def get_current_date():
71
  return datetime.now().strftime("%B %d, %Y")
72
 
73
  current_date = get_current_date()
74
 
75
- def fetch_local_events():
76
- api_key = os.environ['SERP_API']
77
- url = f'https://serpapi.com/search.json?engine=google_events&q=Events+in+Birmingham&hl=en&gl=us&api_key={api_key}'
78
- response = requests.get(url)
79
- if response.status_code == 200:
80
- events_results = response.json().get("events_results", [])
81
- events_html = """
82
- <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Events</h2>
83
- <style>
84
- table {
85
- font-family: 'Verdana', sans-serif;
86
- color: #333;
87
- border-collapse: collapse;
88
- width: 100%;
89
- }
90
- th, td {
91
- border: 1px solid #fff !important;
92
- padding: 8px;
93
- }
94
- th {
95
- background-color: #f2f2f2;
96
- color: #333;
97
- text-align: left;
98
- }
99
- tr:hover {
100
- background-color: #f5f5f5;
101
- }
102
- .event-link {
103
- color: #1E90FF;
104
- text-decoration: none;
105
- }
106
- .event-link:hover {
107
- text-decoration: underline;
108
- }
109
- </style>
110
- <table>
111
- <tr>
112
- <th>Title</th>
113
- <th>Date and Time</th>
114
- <th>Location</th>
115
- </tr>
116
- """
117
- for event in events_results:
118
- title = event.get("title", "No title")
119
- date_info = event.get("date", {})
120
- date = f"{date_info.get('start_date', '')} {date_info.get('when', '')}".replace("{", "").replace("}", "")
121
- location = event.get("address", "No location")
122
- if isinstance(location, list):
123
- location = " ".join(location)
124
- location = location.replace("[", "").replace("]", "")
125
- link = event.get("link", "#")
126
- events_html += f"""
127
- <tr>
128
- <td><a class='event-link' href='{link}' target='_blank'>{title}</a></td>
129
- <td>{date}</td>
130
- <td>{location}</td>
131
- </tr>
132
- """
133
- events_html += "</table>"
134
- return events_html
135
- else:
136
- return "<p>Failed to fetch local events</p>"
137
-
138
- def fetch_local_weather():
139
- try:
140
- api_key = os.environ['WEATHER_API']
141
- url = f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/birmingham?unitGroup=metric&include=events%2Calerts%2Chours%2Cdays%2Ccurrent&key={api_key}'
142
- response = requests.get(url)
143
- response.raise_for_status()
144
- jsonData = response.json()
145
-
146
- current_conditions = jsonData.get("currentConditions", {})
147
- temp_celsius = current_conditions.get("temp", "N/A")
148
-
149
- if temp_celsius != "N/A":
150
- temp_fahrenheit = int((temp_celsius * 9/5) + 32)
151
- else:
152
- temp_fahrenheit = "N/A"
153
-
154
- condition = current_conditions.get("conditions", "N/A")
155
- humidity = current_conditions.get("humidity", "N/A")
156
-
157
- weather_html = f"""
158
- <div class="weather-theme">
159
- <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Weather</h2>
160
- <div class="weather-content">
161
- <div class="weather-icon">
162
- <img src="https://www.weatherbit.io/static/img/icons/{get_weather_icon(condition)}.png" alt="{condition}" style="width: 100px; height: 100px;">
163
- </div>
164
- <div class="weather-details">
165
- <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Temperature: {temp_fahrenheit}°F</p>
166
- <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Condition: {condition}</p>
167
- <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Humidity: {humidity}%</p>
168
- </div>
169
- </div>
170
- </div>
171
- <style>
172
- .weather-theme {{
173
- animation: backgroundAnimation 10s infinite alternate;
174
- border-radius: 10px;
175
- padding: 10px;
176
- margin-bottom: 15px;
177
- background: linear-gradient(45deg, #ffcc33, #ff6666, #ffcc33, #ff6666);
178
- background-size: 400% 400%;
179
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
180
- transition: box-shadow 0.3s ease, background-color 0.3s ease;
181
- }}
182
- .weather-theme:hover {{
183
- box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2);
184
- background-position: 100% 100%;
185
- }}
186
- @keyframes backgroundAnimation {{
187
- 0% {{ background-position: 0% 50%; }}
188
- 100% {{ background-position: 100% 50%; }}
189
- }}
190
- .weather-content {{
191
- display: flex;
192
- align-items: center;
193
- }}
194
- .weather-icon {{
195
- flex: 1;
196
- }}
197
- .weather-details {{
198
- flex 3;
199
- }}
200
- </style>
201
- """
202
- return weather_html
203
- except requests.exceptions.RequestException as e:
204
- return f"<p>Failed to fetch local weather: {e}</p>"
205
-
206
- def get_weather_icon(condition):
207
- condition_map = {
208
- "Clear": "c01d",
209
- "Partly Cloudy": "c02d",
210
- "Cloudy": "c03d",
211
- "Overcast": "c04d",
212
- "Mist": "a01d",
213
- "Patchy rain possible": "r01d",
214
- "Light rain": "r02d",
215
- "Moderate rain": "r03d",
216
- "Heavy rain": "r04d",
217
- "Snow": "s01d",
218
- "Thunderstorm": "t01d",
219
- "Fog": "a05d",
220
- }
221
- return condition_map.get(condition, "c04d")
222
-
223
-
224
-
225
  template1 = f"""As an expert concierge in Birmingham, Alabama, known for being a helpful and renowned guide, I am here to assist you on this sunny bright day of {current_date}. Given the current weather conditions and date, I have access to a plethora of information regarding events, places, and activities in Birmingham that can enhance your experience.
226
  If you have any questions or need recommendations, feel free to ask. I have a wealth of knowledge of perennial events in Birmingham and can provide detailed information to ensure you make the most of your time here. Remember, I am here to assist you in any way possible.
227
  Now, let me guide you through some of the exciting events happening today in Birmingham, Alabama:
@@ -261,56 +114,153 @@ Helpful Answer:"""
261
  QA_CHAIN_PROMPT_1 = PromptTemplate(input_variables=["context", "question"], template=template1)
262
  QA_CHAIN_PROMPT_2 = PromptTemplate(input_variables=["context", "question"], template=template2)
263
 
264
- def build_qa_chain(prompt_template):
265
- qa_chain = RetrievalQA.from_chain_type(
266
- llm=chat_model,
267
- chain_type="stuff",
268
- retriever=retriever,
269
- chain_type_kwargs={"prompt": prompt_template}
270
- )
271
- tools = [
272
- Tool(
273
- name='Knowledge Base',
274
- func=qa_chain,
275
- description='Use this tool when answering general knowledge queries to get more information about the topic'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
  )
277
- ]
278
- return qa_chain, tools
279
-
280
- def initialize_agent_with_prompt(prompt_template):
281
- qa_chain, tools = build_qa_chain(prompt_template)
282
- agent = initialize_agent(
283
- agent='chat-conversational-react-description',
284
- tools=tools,
285
- llm=chat_model,
286
- verbose=False,
287
- max_iteration=5,
288
- early_stopping_method='generate',
289
- memory=conversational_memory
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
  )
291
- return agent
 
 
 
292
 
293
- def generate_answer(message, choice):
294
- logging.debug(f"generate_answer called with prompt_choice: {choice}")
295
-
296
- if choice == "Details":
297
- agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
298
- elif choice == "Conversational":
299
- agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
 
 
 
300
  else:
301
- logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
302
- agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
303
- response = agent(message)
304
 
305
  addresses = extract_addresses(response['output'])
306
  return response['output'], addresses
307
 
308
-
309
- def bot(history, choice, tts_choice):
310
  if not history:
311
  return history
312
 
313
- response, addresses = generate_answer(history[-1][0], choice)
314
  history[-1][1] = ""
315
 
316
  with concurrent.futures.ThreadPoolExecutor() as executor:
@@ -505,7 +455,6 @@ def show_map_if_details(history, choice):
505
  else:
506
  return gr.update(visible=False), ""
507
 
508
-
509
  def generate_audio_elevenlabs(text):
510
  XI_API_KEY = os.environ['ELEVENLABS_API']
511
  VOICE_ID = 'd9MIrwLnvDeH7aZb61E9'
@@ -676,11 +625,153 @@ def update_images():
676
  image_3 = generate_image(hardcoded_prompt_3)
677
  return image_1, image_2, image_3
678
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
679
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
680
 
681
- def clear_textbox():
682
- return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
683
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
684
 
685
  with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
686
  with gr.Row():
@@ -689,6 +780,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
689
 
690
  chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
691
  choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
 
692
 
693
  gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
694
 
@@ -713,7 +805,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
713
  retriever_sequence = (
714
  retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output],api_name="Ask_Retriever")
715
  .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input],api_name="voice_query")
716
- .then(fn=bot, inputs=[chatbot, choice, tts_choice], outputs=[chatbot, audio_output],api_name="generate_voice_response" )
717
  .then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="map_finder")
718
  .then(fn=clear_textbox, inputs=[], outputs=[chat_input])
719
  )
@@ -721,7 +813,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
721
  # Link the "Enter" key (submit event) to the same sequence of actions
722
  chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output])
723
  chat_input.submit(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input],api_name="voice_query").then(
724
- fn=bot, inputs=[chatbot, choice, tts_choice], outputs=[chatbot, audio_output], api_name="generate_voice_response"
725
  ).then(
726
  fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="map_finder"
727
  ).then(
@@ -746,5 +838,3 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
746
 
747
  demo.queue()
748
  demo.launch(share=True)
749
-
750
-
 
32
  import torchaudio
33
  import numpy as np
34
 
35
+ # Neo4j imports
36
+ from langchain.chains import GraphCypherQAChain
37
+ from langchain_community.graphs import Neo4jGraph
38
+ from langchain_community.document_loaders import HuggingFaceDatasetLoader
39
+ from langchain_text_splitters import CharacterTextSplitter
40
+ from langchain_experimental.graph_transformers import LLMGraphTransformer
41
+ from langchain_core.prompts import ChatPromptTemplate
42
+ from langchain_core.pydantic_v1 import BaseModel, Field
43
+ from langchain_core.messages import AIMessage, HumanMessage
44
+ from langchain_core.output_parsers import StrOutputParser
45
+ from langchain_core.runnables import RunnableBranch, RunnableLambda, RunnableParallel, RunnablePassthrough
46
+
47
+ # Pinecone setup
48
  hf_token = os.getenv("HF_TOKEN")
49
  if hf_token is None:
50
  print("Please set your Hugging Face token in the environment variables.")
 
54
  logging.basicConfig(level=logging.DEBUG)
55
 
56
  embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
 
57
  from pinecone import Pinecone
58
  pc = Pinecone(api_key=os.environ['PINECONE_API_KEY'])
59
 
 
69
  return_messages=True
70
  )
71
 
72
+ # Prompt templates
 
 
 
 
 
73
  def get_current_date():
74
  return datetime.now().strftime("%B %d, %Y")
75
 
76
  current_date = get_current_date()
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  template1 = f"""As an expert concierge in Birmingham, Alabama, known for being a helpful and renowned guide, I am here to assist you on this sunny bright day of {current_date}. Given the current weather conditions and date, I have access to a plethora of information regarding events, places, and activities in Birmingham that can enhance your experience.
79
  If you have any questions or need recommendations, feel free to ask. I have a wealth of knowledge of perennial events in Birmingham and can provide detailed information to ensure you make the most of your time here. Remember, I am here to assist you in any way possible.
80
  Now, let me guide you through some of the exciting events happening today in Birmingham, Alabama:
 
114
  QA_CHAIN_PROMPT_1 = PromptTemplate(input_variables=["context", "question"], template=template1)
115
  QA_CHAIN_PROMPT_2 = PromptTemplate(input_variables=["context", "question"], template=template2)
116
 
117
+ # Neo4j setup
118
+ graph = Neo4jGraph(
119
+ url="neo4j+s://d63baadd.databases.neo4j.io",
120
+ username="neo4j",
121
+ password="XCSXe1Jl_gjyJqoBGXDqY1UrfgDc4Z_RT5YGrxPAy-g"
122
+ )
123
+
124
+ dataset_name = "Pijush2023/birmindata07312024"
125
+ page_content_column = 'events_description'
126
+ loader = HuggingFaceDatasetLoader(dataset_name, page_content_column)
127
+ data = loader.load()
128
+
129
+ text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=50)
130
+ documents = text_splitter.split_documents(data)
131
+
132
+ embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
133
+ llm = ChatOpenAI(temperature=0, model='gpt-4o', api_key=os.environ['OPENAI_API_KEY'])
134
+
135
+ llm_transformer = LLMGraphTransformer(llm=llm)
136
+ graph_documents = llm_transformer.convert_to_graph_documents(documents)
137
+ graph.add_graph_documents(graph_documents, baseEntityLabel=True, include_source=True)
138
+
139
+ class Entities(BaseModel):
140
+ names: list[str] = Field(..., description="All the person, organization, or business entities that appear in the text")
141
+
142
+ prompt = ChatPromptTemplate.from_messages([
143
+ ("system", "You are extracting organization and person entities from the text."),
144
+ ("human", "Use the given format to extract information from the following input: {question}"),
145
+ ])
146
+
147
+ entity_chain = prompt | llm.with_structured_output(Entities)
148
+
149
+ def remove_lucene_chars(input: str) -> str:
150
+ return input.translate(str.maketrans({"\\": r"\\", "+": r"\+", "-": r"\-", "&": r"\&", "|": r"\|", "!": r"\!",
151
+ "(": r"\(", ")": r"\)", "{": r"\{", "}": r"\}", "[": r"\[", "]": r"\]",
152
+ "^": r"\^", "~": r"\~", "*": r"\*", "?": r"\?", ":": r"\:", '"': r'\"',
153
+ ";": r"\;", " ": r"\ "}))
154
+
155
+ def generate_full_text_query(input: str) -> str:
156
+ full_text_query = ""
157
+ words = [el for el in remove_lucene_chars(input).split() if el]
158
+ for word in words[:-1]:
159
+ full_text_query += f" {word}~2 AND"
160
+ full_text_query += f" {words[-1]}~2"
161
+ return full_text_query.strip()
162
+
163
+ def structured_retriever(question: str) -> str:
164
+ result = ""
165
+ entities = entity_chain.invoke({"question": question})
166
+ for entity in entities.names:
167
+ response = graph.query(
168
+ """CALL db.index.fulltext.queryNodes('entity', $query, {limit:2})
169
+ YIELD node,score
170
+ CALL {
171
+ WITH node
172
+ MATCH (node)-[r:!MENTIONS]->(neighbor)
173
+ RETURN node.id + ' - ' + type(r) + ' -> ' + neighbor.id AS output
174
+ UNION ALL
175
+ WITH node
176
+ MATCH (node)<-[r:!MENTIONS]-(neighbor)
177
+ RETURN neighbor.id + ' - ' + type(r) + ' -> ' + node.id AS output
178
+ }
179
+ RETURN output LIMIT 50
180
+ """,
181
+ {"query": generate_full_text_query(entity)},
182
  )
183
+ result += "\n".join([el['output'] for el in response])
184
+ return result
185
+
186
+ def retriever_neo4j(question: str):
187
+ structured_data = structured_retriever(question)
188
+ return structured_data
189
+
190
+ _template = """Given the following conversation and a follow-up question, rephrase the follow-up question to be a standalone question,
191
+ in its original language.
192
+ Chat History:
193
+ {chat_history}
194
+ Follow Up Input: {question}
195
+ Standalone question:"""
196
+
197
+ CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
198
+
199
+ def _format_chat_history(chat_history: list[tuple[str, str]]) -> list:
200
+ buffer = []
201
+ for human, ai in chat_history:
202
+ buffer.append(HumanMessage(content=human))
203
+ buffer.append(AIMessage(content=ai))
204
+ return buffer
205
+
206
+ _search_query = RunnableBranch(
207
+ (
208
+ RunnableLambda(lambda x: bool(x.get("chat_history"))).with_config(
209
+ run_name="HasChatHistoryCheck"
210
+ ),
211
+ RunnablePassthrough.assign(
212
+ chat_history=lambda x: _format_chat_history(x["chat_history"])
213
+ )
214
+ | CONDENSE_QUESTION_PROMPT
215
+ | ChatOpenAI(temperature=0, api_key=os.environ['OPENAI_API_KEY'])
216
+ | StrOutputParser(),
217
+ ),
218
+ RunnableLambda(lambda x : x["question"]),
219
+ )
220
+
221
+ template = """Answer the question based only on the following context:
222
+ {context}
223
+
224
+ Question: {question}
225
+ Use natural language and be concise.
226
+ Answer:"""
227
+
228
+ prompt = ChatPromptTemplate.from_template(template)
229
+
230
+ chain_neo4j = (
231
+ RunnableParallel(
232
+ {
233
+ "context": _search_query | retriever_neo4j,
234
+ "question": RunnablePassthrough(),
235
+ }
236
  )
237
+ | prompt
238
+ | llm
239
+ | StrOutputParser()
240
+ )
241
 
242
+ # Define a function to select between Pinecone and Neo4j
243
+ def generate_answer(message, choice, retrieval_mode):
244
+ logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
245
+
246
+ if retrieval_mode == "Vector":
247
+ qa_chain = build_qa_chain(QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2)
248
+ agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2)
249
+ response = agent(message)
250
+ elif retrieval_mode == "Knowledge-Graph":
251
+ response = chain_neo4j.invoke({"question": message})
252
  else:
253
+ response = "Invalid retrieval mode selected."
 
 
254
 
255
  addresses = extract_addresses(response['output'])
256
  return response['output'], addresses
257
 
258
+ # The rest of your Gradio code...
259
+ def bot(history, choice, tts_choice, retrieval_mode):
260
  if not history:
261
  return history
262
 
263
+ response, addresses = generate_answer(history[-1][0], choice, retrieval_mode)
264
  history[-1][1] = ""
265
 
266
  with concurrent.futures.ThreadPoolExecutor() as executor:
 
455
  else:
456
  return gr.update(visible=False), ""
457
 
 
458
  def generate_audio_elevenlabs(text):
459
  XI_API_KEY = os.environ['ELEVENLABS_API']
460
  VOICE_ID = 'd9MIrwLnvDeH7aZb61E9'
 
625
  image_3 = generate_image(hardcoded_prompt_3)
626
  return image_1, image_2, image_3
627
 
628
+ def fetch_local_events():
629
+ api_key = os.environ['SERP_API']
630
+ url = f'https://serpapi.com/search.json?engine=google_events&q=Events+in+Birmingham&hl=en&gl=us&api_key={api_key}'
631
+ response = requests.get(url)
632
+ if response.status_code == 200:
633
+ events_results = response.json().get("events_results", [])
634
+ events_html = """
635
+ <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Events</h2>
636
+ <style>
637
+ table {
638
+ font-family: 'Verdana', sans-serif;
639
+ color: #333;
640
+ border-collapse: collapse;
641
+ width: 100%;
642
+ }
643
+ th, td {
644
+ border: 1px solid #fff !important;
645
+ padding: 8px;
646
+ }
647
+ th {
648
+ background-color: #f2f2f2;
649
+ color: #333;
650
+ text-align: left;
651
+ }
652
+ tr:hover {
653
+ background-color: #f5f5f5;
654
+ }
655
+ .event-link {
656
+ color: #1E90FF;
657
+ text-decoration: none;
658
+ }
659
+ .event-link:hover {
660
+ text-decoration: underline;
661
+ }
662
+ </style>
663
+ <table>
664
+ <tr>
665
+ <th>Title</th>
666
+ <th>Date and Time</th>
667
+ <th>Location</th>
668
+ </tr>
669
+ """
670
+ for event in events_results:
671
+ title = event.get("title", "No title")
672
+ date_info = event.get("date", {})
673
+ date = f"{date_info.get('start_date', '')} {date_info.get('when', '')}".replace("{", "").replace("}", "")
674
+ location = event.get("address", "No location")
675
+ if isinstance(location, list):
676
+ location = " ".join(location)
677
+ location = location.replace("[", "").replace("]", "")
678
+ link = event.get("link", "#")
679
+ events_html += f"""
680
+ <tr>
681
+ <td><a class='event-link' href='{link}' target='_blank'>{title}</a></td>
682
+ <td>{date}</td>
683
+ <td>{location}</td>
684
+ </tr>
685
+ """
686
+ events_html += "</table>"
687
+ return events_html
688
+ else:
689
+ return "<p>Failed to fetch local events</p>"
690
 
691
+ def get_weather_icon(condition):
692
+ condition_map = {
693
+ "Clear": "c01d",
694
+ "Partly Cloudy": "c02d",
695
+ "Cloudy": "c03d",
696
+ "Overcast": "c04d",
697
+ "Mist": "a01d",
698
+ "Patchy rain possible": "r01d",
699
+ "Light rain": "r02d",
700
+ "Moderate rain": "r03d",
701
+ "Heavy rain": "r04d",
702
+ "Snow": "s01d",
703
+ "Thunderstorm": "t01d",
704
+ "Fog": "a05d",
705
+ }
706
+ return condition_map.get(condition, "c04d")
707
 
708
+ def fetch_local_weather():
709
+ try:
710
+ api_key = os.environ['WEATHER_API']
711
+ url = f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/birmingham?unitGroup=metric&include=events%2Calerts%2Chours%2Cdays%2Ccurrent&key={api_key}'
712
+ response = requests.get(url)
713
+ response.raise_for_status()
714
+ jsonData = response.json()
715
+
716
+ current_conditions = jsonData.get("currentConditions", {})
717
+ temp_celsius = current_conditions.get("temp", "N/A")
718
+
719
+ if temp_celsius != "N/A":
720
+ temp_fahrenheit = int((temp_celsius * 9/5) + 32)
721
+ else:
722
+ temp_fahrenheit = "N/A"
723
+
724
+ condition = current_conditions.get("conditions", "N/A")
725
+ humidity = current_conditions.get("humidity", "N/A")
726
 
727
+ weather_html = f"""
728
+ <div class="weather-theme">
729
+ <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Weather</h2>
730
+ <div class="weather-content">
731
+ <div class="weather-icon">
732
+ <img src="https://www.weatherbit.io/static/img/icons/{get_weather_icon(condition)}.png" alt="{condition}" style="width: 100px; height: 100px;">
733
+ </div>
734
+ <div class="weather-details">
735
+ <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Temperature: {temp_fahrenheit}°F</p>
736
+ <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Condition: {condition}</p>
737
+ <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Humidity: {humidity}%</p>
738
+ </div>
739
+ </div>
740
+ </div>
741
+ <style>
742
+ .weather-theme {{
743
+ animation: backgroundAnimation 10s infinite alternate;
744
+ border-radius: 10px;
745
+ padding: 10px;
746
+ margin-bottom: 15px;
747
+ background: linear-gradient(45deg, #ffcc33, #ff6666, #ffcc33, #ff6666);
748
+ background-size: 400% 400%;
749
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
750
+ transition: box-shadow 0.3s ease, background-color 0.3s ease;
751
+ }}
752
+ .weather-theme:hover {{
753
+ box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2);
754
+ background-position: 100% 100%;
755
+ }}
756
+ @keyframes backgroundAnimation {{
757
+ 0% {{ background-position: 0% 50%; }}
758
+ 100% {{ background-position: 100% 50%; }}
759
+ }}
760
+ .weather-content {{
761
+ display: flex;
762
+ align-items: center;
763
+ }}
764
+ .weather-icon {{
765
+ flex: 1;
766
+ }}
767
+ .weather-details {{
768
+ flex 3;
769
+ }}
770
+ </style>
771
+ """
772
+ return weather_html
773
+ except requests.exceptions.RequestException as e:
774
+ return f"<p>Failed to fetch local weather: {e}</p>"
775
 
776
  with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
777
  with gr.Row():
 
780
 
781
  chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
782
  choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
783
+ retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["Vector", "Knowledge-Graph"], value="Vector")
784
 
785
  gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
786
 
 
805
  retriever_sequence = (
806
  retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output],api_name="Ask_Retriever")
807
  .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input],api_name="voice_query")
808
+ .then(fn=bot, inputs=[chatbot, choice, tts_choice, retrieval_mode], outputs=[chatbot, audio_output],api_name="generate_voice_response" )
809
  .then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="map_finder")
810
  .then(fn=clear_textbox, inputs=[], outputs=[chat_input])
811
  )
 
813
  # Link the "Enter" key (submit event) to the same sequence of actions
814
  chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output])
815
  chat_input.submit(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input],api_name="voice_query").then(
816
+ fn=bot, inputs=[chatbot, choice, tts_choice, retrieval_mode], outputs=[chatbot, audio_output], api_name="generate_voice_response"
817
  ).then(
818
  fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="map_finder"
819
  ).then(
 
838
 
839
  demo.queue()
840
  demo.launch(share=True)