Pijush2023 commited on
Commit
7bd6901
·
verified ·
1 Parent(s): 9711981

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +982 -450
app.py CHANGED
@@ -1,73 +1,227 @@
1
- import os
2
- import re
3
- import time
4
- import requests
5
- import logging
6
- import folium
7
- import gradio as gr
8
- import tempfile
9
- import torch
10
- from datetime import datetime
11
- import numpy as np
12
- from gtts import gTTS
13
- from googlemaps import Client as GoogleMapsClient
14
- from diffusers import StableDiffusion3Pipeline
15
- import concurrent.futures
16
- from PIL import Image
17
-
18
- from langchain_openai import OpenAIEmbeddings, ChatOpenAI
19
- from langchain_pinecone import PineconeVectorStore
20
- from langchain.prompts import PromptTemplate
21
- from langchain.chains import RetrievalQA
22
- from langchain.chains.conversation.memory import ConversationBufferWindowMemory
23
- from langchain.agents import Tool, initialize_agent
24
- from huggingface_hub import login
25
-
26
- # Check if the token is already set in the environment variables
27
- hf_token = os.getenv("HF_TOKEN")
28
-
29
- if hf_token is None:
30
- # If the token is not set, prompt for it (this should be done securely)
31
- print("Please set your Hugging Face token in the environment variables.")
32
- else:
33
- # Login using the token
34
- login(token=hf_token)
35
-
36
- # Your application logic goes here
37
- print("Logged in successfully to Hugging Face Hub!")
38
-
39
-
40
-
41
- # Set up logging
42
- logging.basicConfig(level=logging.DEBUG)
43
-
44
- # Initialize OpenAI embeddings
45
- embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
46
-
47
- # Initialize Pinecone
48
- from pinecone import Pinecone
49
- pc = Pinecone(api_key=os.environ['PINECONE_API_KEY'])
50
-
51
- index_name = "omaha-details"
52
- vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
53
- retriever = vectorstore.as_retriever(search_kwargs={'k': 5})
54
-
55
- # Initialize ChatOpenAI model
56
- chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'],
57
- temperature=0, model='gpt-4o')
58
-
59
- conversational_memory = ConversationBufferWindowMemory(
60
- memory_key='chat_history',
61
- k=10,
62
- return_messages=True
63
- )
64
-
65
- def get_current_time_and_date():
66
- now = datetime.now()
67
- return now.strftime("%Y-%m-%d %H:%M:%S")
68
-
69
- # Example usage
70
- current_time_and_date = get_current_time_and_date()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
  # def fetch_local_events():
73
  # api_key = os.environ['SERP_API']
@@ -77,60 +231,23 @@ current_time_and_date = get_current_time_and_date()
77
  # if response.status_code == 200:
78
  # events_results = response.json().get("events_results", [])
79
  # events_html = """
80
- # <h2 style="font-family: 'Georgia', serif; color: #4CAF50; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Events</h2>
81
  # <style>
82
  # .event-item {
83
  # font-family: 'Verdana', sans-serif;
84
  # color: #333;
85
- # background-color: #f0f8ff;
86
  # margin-bottom: 15px;
87
  # padding: 10px;
88
- # border: 1px solid #ddd;
89
- # border-radius: 5px;
90
- # border: 2px solid red; /* Added red border */
91
- # transition: box-shadow 0.3s ease, background-color 0.3s ease;
92
  # font-weight: bold;
93
  # }
94
- # .event-item:hover {
95
- # box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
96
- # background-color: #e6f7ff;
97
- # }
98
  # .event-item a {
99
  # color: #1E90FF;
100
  # text-decoration: none;
101
- # font-weight: bold;
102
  # }
103
  # .event-item a:hover {
104
  # text-decoration: underline;
105
  # }
106
- # .event-preview {
107
- # position: absolute;
108
- # display: none;
109
- # border: 1px solid #ccc;
110
- # border-radius: 5px;
111
- # box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
112
- # background-color: white;
113
- # z-index: 1000;
114
- # max-width: 300px;
115
- # padding: 10px;
116
- # font-family: 'Verdana', sans-serif;
117
- # color: #333;
118
- # }
119
  # </style>
120
- # <script>
121
- # function showPreview(event, previewContent) {
122
- # var previewBox = document.getElementById('event-preview');
123
- # previewBox.innerHTML = previewContent;
124
- # previewBox.style.left = event.pageX + 'px';
125
- # previewBox.style.top = event.pageY + 'px';
126
- # previewBox.style.display = 'block';
127
- # }
128
- # function hidePreview() {
129
- # var previewBox = document.getElementById('event-preview');
130
- # previewBox.style.display = 'none';
131
- # }
132
- # </script>
133
- # <div id="event-preview" class="event-preview"></div>
134
  # """
135
  # for index, event in enumerate(events_results):
136
  # title = event.get("title", "No title")
@@ -138,7 +255,7 @@ current_time_and_date = get_current_time_and_date()
138
  # location = event.get("address", "No location")
139
  # link = event.get("link", "#")
140
  # events_html += f"""
141
- # <div class="event-item" onmouseover="showPreview(event, 'Date: {date}<br>Location: {location}')" onmouseout="hidePreview()">
142
  # <a href='{link}' target='_blank'>{index + 1}. {title}</a>
143
  # <p>Date: {date}<br>Location: {location}</p>
144
  # </div>
@@ -147,17 +264,389 @@ current_time_and_date = get_current_time_and_date()
147
  # else:
148
  # return "<p>Failed to fetch local events</p>"
149
 
150
- # def fetch_local_events():
151
- # api_key = os.environ['SERP_API']
152
- # url = f'https://serpapi.com/search.json?engine=google_events&q=Events+in+Omaha&hl=en&gl=us&api_key={api_key}'
153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  # response = requests.get(url)
155
  # if response.status_code == 200:
156
- # events_results = response.json().get("events_results", [])
157
- # events_html = """
158
- # <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Events</h2>
159
  # <style>
160
- # .event-item {
161
  # font-family: 'Verdana', sans-serif;
162
  # color: #333;
163
  # background-color: #f0f8ff;
@@ -167,19 +656,19 @@ current_time_and_date = get_current_time_and_date()
167
  # transition: box-shadow 0.3s ease, background-color 0.3s ease;
168
  # font-weight: bold;
169
  # }
170
- # .event-item:hover {
171
  # box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
172
  # background-color: #e6f7ff;
173
  # }
174
- # .event-item a {
175
  # color: #1E90FF;
176
  # text-decoration: none;
177
  # font-weight: bold;
178
  # }
179
- # .event-item a:hover {
180
  # text-decoration: underline;
181
  # }
182
- # .event-preview {
183
  # position: absolute;
184
  # display: none;
185
  # border: 1px solid #ccc;
@@ -195,38 +684,283 @@ current_time_and_date = get_current_time_and_date()
195
  # </style>
196
  # <script>
197
  # function showPreview(event, previewContent) {
198
- # var previewBox = document.getElementById('event-preview');
199
  # previewBox.innerHTML = previewContent;
200
  # previewBox.style.left = event.pageX + 'px';
201
  # previewBox.style.top = event.pageY + 'px';
202
  # previewBox.style.display = 'block';
203
  # }
204
  # function hidePreview() {
205
- # var previewBox = document.getElementById('event-preview');
206
  # previewBox.style.display = 'none';
207
  # }
208
  # </script>
209
- # <div id="event-preview" class="event-preview"></div>
210
  # """
211
- # for index, event in enumerate(events_results):
212
- # title = event.get("title", "No title")
213
- # date = event.get("date", "No date")
214
- # location = event.get("address", "No location")
215
- # link = event.get("link", "#")
216
- # events_html += f"""
217
- # <div class="event-item" onmouseover="showPreview(event, 'Date: {date}<br>Location: {location}')" onmouseout="hidePreview()">
218
  # <a href='{link}' target='_blank'>{index + 1}. {title}</a>
219
- # <p>Date: {date}<br>Location: {location}</p>
220
  # </div>
221
  # """
222
- # return events_html
223
  # else:
224
- # return "<p>Failed to fetch local events</p>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
  def fetch_local_events():
227
  api_key = os.environ['SERP_API']
228
  url = f'https://serpapi.com/search.json?engine=google_events&q=Events+in+Omaha&hl=en&gl=us&api_key={api_key}'
229
-
230
  response = requests.get(url)
231
  if response.status_code == 200:
232
  events_results = response.json().get("events_results", [])
@@ -264,75 +998,6 @@ def fetch_local_events():
264
  else:
265
  return "<p>Failed to fetch local events</p>"
266
 
267
-
268
- # def fetch_local_weather():
269
- # try:
270
- # api_key = os.environ['WEATHER_API']
271
- # url = f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/omaha?unitGroup=metric&include=events%2Calerts%2Chours%2Cdays%2Ccurrent&key={api_key}'
272
- # response = requests.get(url)
273
- # response.raise_for_status()
274
- # jsonData = response.json()
275
-
276
- # current_conditions = jsonData.get("currentConditions", {})
277
- # temp_celsius = current_conditions.get("temp", "N/A")
278
-
279
- # if temp_celsius != "N/A":
280
- # temp_fahrenheit = int((temp_celsius * 9/5) + 32)
281
- # else:
282
- # temp_fahrenheit = "N/A"
283
-
284
- # condition = current_conditions.get("conditions", "N/A")
285
- # humidity = current_conditions.get("humidity", "N/A")
286
-
287
- # weather_html = f"""
288
- # <div class="weather-theme">
289
- # <h2 style="font-family: 'Georgia', serif; color: #4CAF50; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Weather</h2>
290
- # <div class="weather-content">
291
- # <div class="weather-icon">
292
- # <img src="https://www.weatherbit.io/static/img/icons/{get_weather_icon(condition)}.png" alt="{condition}" style="width: 100px; height: 100px;">
293
- # </div>
294
- # <div class="weather-details">
295
- # <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Temperature: {temp_fahrenheit}°F</p>
296
- # <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Condition: {condition}</p>
297
- # <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Humidity: {humidity}%</p>
298
- # </div>
299
- # </div>
300
- # </div>
301
- # <style>
302
- # .weather-theme {{
303
- # animation: backgroundAnimation 10s infinite alternate;
304
- # border: 2px solid red; /* Added red border */
305
- # border-radius: 10px;
306
- # padding: 10px;
307
- # margin-bottom: 15px;
308
- # background: linear-gradient(45deg, #ffcc33, #ff6666, #ffcc33, #ff6666);
309
- # background-size: 400% 400%;
310
- # box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
311
- # transition: box-shadow 0.3s ease, background-color 0.3s ease;
312
- # }}
313
- # .weather-theme:hover {{
314
- # box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2);
315
- # background-position: 100% 100%;
316
- # }}
317
- # @keyframes backgroundAnimation {{
318
- # 0% {{ background-position: 0% 50%; }}
319
- # 100% {{ background-position: 100% 50%; }}
320
- # }}
321
- # .weather-content {{
322
- # display: flex;
323
- # align-items: center;
324
- # }}
325
- # .weather-icon {{
326
- # flex: 1;
327
- # }}
328
- # .weather-details {{
329
- # flex: 3;
330
- # }}
331
- # </style>
332
- # """
333
- # return weather_html
334
- # except requests.exceptions.RequestException as e:
335
- # return f"<p>Failed to fetch local weather: {e}</p>"
336
  def fetch_local_weather():
337
  try:
338
  api_key = os.environ['WEATHER_API']
@@ -340,15 +1005,12 @@ def fetch_local_weather():
340
  response = requests.get(url)
341
  response.raise_for_status()
342
  jsonData = response.json()
343
-
344
  current_conditions = jsonData.get("currentConditions", {})
345
  temp_celsius = current_conditions.get("temp", "N/A")
346
-
347
  if temp_celsius != "N/A":
348
  temp_fahrenheit = int((temp_celsius * 9/5) + 32)
349
  else:
350
  temp_fahrenheit = "N/A"
351
-
352
  condition = current_conditions.get("conditions", "N/A")
353
  humidity = current_conditions.get("humidity", "N/A")
354
 
@@ -418,14 +1080,82 @@ def get_weather_icon(condition):
418
  }
419
  return condition_map.get(condition, "c04d")
420
 
421
- # Update prompt templates to include fetched details
422
-
423
- current_time_and_date = get_current_time_and_date()
424
-
425
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
 
427
  # Define prompt templates
428
- template1 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska. Based on weather being a sunny bright day and the today's date is 20th june 2024, use the following pieces of context,
429
  memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
430
  Use fifteen sentences maximum. Keep the answer as detailed as possible. Always include the address, time, date, and
431
  event type and description. Always say "It was my pleasure!" at the end of the answer.
@@ -433,20 +1163,42 @@ event type and description. Always say "It was my pleasure!" at the end of the a
433
  Question: {question}
434
  Helpful Answer:"""
435
 
436
- template2 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska. Based on today's weather being a sunny bright day and today's date is 20th june 2024, take the location or address but don't show the location or address on the output prompts. Use the following pieces of context,
437
  memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
438
  Keep the answer short and sweet and crisp. Always say "It was my pleasure!" at the end of the answer.
439
  {context}
440
  Question: {question}
441
  Helpful Answer:"""
442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443
 
 
 
 
 
 
444
 
445
  QA_CHAIN_PROMPT_1 = PromptTemplate(input_variables=["context", "question"], template=template1)
446
  QA_CHAIN_PROMPT_2 = PromptTemplate(input_variables=["context", "question"], template=template2)
447
 
448
-
449
- # Define the retrieval QA chain
450
  def build_qa_chain(prompt_template):
451
  qa_chain = RetrievalQA.from_chain_type(
452
  llm=chat_model,
@@ -463,7 +1215,6 @@ def build_qa_chain(prompt_template):
463
  ]
464
  return qa_chain, tools
465
 
466
- # Define the agent initializer
467
  def initialize_agent_with_prompt(prompt_template):
468
  qa_chain, tools = build_qa_chain(prompt_template)
469
  agent = initialize_agent(
@@ -477,10 +1228,8 @@ def initialize_agent_with_prompt(prompt_template):
477
  )
478
  return agent
479
 
480
- # Define the function to generate answers
481
  def generate_answer(message, choice):
482
  logging.debug(f"generate_answer called with prompt_choice: {choice}")
483
-
484
  if choice == "Details":
485
  agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
486
  elif choice == "Conversational":
@@ -489,32 +1238,23 @@ def generate_answer(message, choice):
489
  logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
490
  agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
491
  response = agent(message)
492
-
493
- # Extract addresses for mapping regardless of the choice
494
  addresses = extract_addresses(response['output'])
495
  return response['output'], addresses
496
-
497
-
498
 
499
  def bot(history, choice):
500
  if not history:
501
  return history
502
  response, addresses = generate_answer(history[-1][0], choice)
503
  history[-1][1] = ""
504
-
505
- # Generate audio for the entire response in a separate thread
506
  with concurrent.futures.ThreadPoolExecutor() as executor:
507
  audio_future = executor.submit(generate_audio_elevenlabs, response)
508
-
509
  for character in response:
510
  history[-1][1] += character
511
- time.sleep(0.05) # Adjust the speed of text appearance
512
  yield history, None
513
-
514
  audio_path = audio_future.result()
515
  yield history, audio_path
516
 
517
-
518
  def add_message(history, message):
519
  history.append((message, None))
520
  return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
@@ -544,12 +1284,9 @@ all_addresses = []
544
  def generate_map(location_names):
545
  global all_addresses
546
  all_addresses.extend(location_names)
547
-
548
  api_key = os.environ['GOOGLEMAPS_API_KEY']
549
  gmaps = GoogleMapsClient(key=api_key)
550
-
551
  m = folium.Map(location=[41.2565, -95.9345], zoom_start=12)
552
-
553
  for location_name in all_addresses:
554
  geocode_result = gmaps.geocode(location_name)
555
  if geocode_result:
@@ -558,201 +1295,9 @@ def generate_map(location_names):
558
  [location['lat'], location['lng']],
559
  tooltip=f"{geocode_result[0]['formatted_address']}"
560
  ).add_to(m)
561
-
562
  map_html = m._repr_html_()
563
  return map_html
564
 
565
- # def fetch_local_news():
566
- # api_key = os.environ['SERP_API']
567
- # url = f'https://serpapi.com/search.json?engine=google_news&q=omaha headline&api_key={api_key}'
568
- # response = requests.get(url)
569
- # if response.status_code == 200:
570
- # results = response.json().get("news_results", [])
571
- # news_html = """
572
- # <h2 style="font-family: 'Georgia', serif; color: #4CAF50; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Omaha Today </h2>
573
- # <style>
574
- # .news-item {
575
- # font-family: 'Verdana', sans-serif;
576
- # color: #333;
577
- # background-color: #f0f8ff;
578
- # margin-bottom: 15px;
579
- # padding: 10px;
580
- # border: 2px solid red; /* Added red border */
581
- # border-radius: 5px;
582
- # transition: box-shadow 0.3s ease, background-color 0.3s ease;
583
- # font-weight: bold;
584
- # }
585
- # .news-item:hover {
586
- # box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
587
- # background-color: #e6f7ff;
588
- # }
589
- # .news-item a {
590
- # color: #1E90FF;
591
- # text-decoration: none;
592
- # font-weight: bold;
593
- # }
594
- # .news-item a:hover {
595
- # text-decoration: underline;
596
- # }
597
- # .news-preview {
598
- # position: absolute;
599
- # display: none;
600
- # border: 1px solid #ccc;
601
- # border-radius: 5px;
602
- # box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
603
- # background-color: white;
604
- # z-index: 1000;
605
- # max-width: 300px;
606
- # padding: 10px;
607
- # font-family: 'Verdana', sans-serif;
608
- # color: #333;
609
- # }
610
- # </style>
611
- # <script>
612
- # function showPreview(event, previewContent) {
613
- # var previewBox = document.getElementById('news-preview');
614
- # previewBox.innerHTML = previewContent;
615
- # previewBox.style.left = event.pageX + 'px';
616
- # previewBox.style.top = event.pageY + 'px';
617
- # previewBox.style.display = 'block';
618
- # }
619
- # function hidePreview() {
620
- # var previewBox = document.getElementById('news-preview');
621
- # previewBox.style.display = 'none';
622
- # }
623
- # </script>
624
- # <div id="news-preview" class="news-preview"></div>
625
- # """
626
- # for index, result in enumerate(results[:7]):
627
- # title = result.get("title", "No title")
628
- # link = result.get("link", "#")
629
- # snippet = result.get("snippet", "")
630
- # news_html += f"""
631
- # <div class="news-item" onmouseover="showPreview(event, '{snippet}')" onmouseout="hidePreview()">
632
- # <a href='{link}' target='_blank'>{index + 1}. {title}</a>
633
- # <p>{snippet}</p>
634
- # </div>
635
- # """
636
- # return news_html
637
- # else:
638
- # return "<p>Failed to fetch local news</p>"
639
-
640
- def fetch_local_news():
641
- api_key = os.environ['SERP_API']
642
- url = f'https://serpapi.com/search.json?engine=google_news&q=omaha headline&api_key={api_key}'
643
- response = requests.get(url)
644
- if response.status_code == 200:
645
- results = response.json().get("news_results", [])
646
- news_html = """
647
- <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Omaha Today</h2>
648
- <style>
649
- .news-item {
650
- font-family: 'Verdana', sans-serif;
651
- color: #333;
652
- background-color: #f0f8ff;
653
- margin-bottom: 15px;
654
- padding: 10px;
655
- border-radius: 5px;
656
- transition: box-shadow 0.3s ease, background-color 0.3s ease;
657
- font-weight: bold;
658
- }
659
- .news-item:hover {
660
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
661
- background-color: #e6f7ff;
662
- }
663
- .news-item a {
664
- color: #1E90FF;
665
- text-decoration: none;
666
- font-weight: bold;
667
- }
668
- .news-item a:hover {
669
- text-decoration: underline;
670
- }
671
- .news-preview {
672
- position: absolute;
673
- display: none;
674
- border: 1px solid #ccc;
675
- border-radius: 5px;
676
- box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
677
- background-color: white;
678
- z-index: 1000;
679
- max-width: 300px;
680
- padding: 10px;
681
- font-family: 'Verdana', sans-serif;
682
- color: #333;
683
- }
684
- </style>
685
- <script>
686
- function showPreview(event, previewContent) {
687
- var previewBox = document.getElementById('news-preview');
688
- previewBox.innerHTML = previewContent;
689
- previewBox.style.left = event.pageX + 'px';
690
- previewBox.style.top = event.pageY + 'px';
691
- previewBox.style.display = 'block';
692
- }
693
- function hidePreview() {
694
- var previewBox = document.getElementById('news-preview');
695
- previewBox.style.display = 'none';
696
- }
697
- </script>
698
- <div id="news-preview" class="news-preview"></div>
699
- """
700
- for index, result in enumerate(results[:7]):
701
- title = result.get("title", "No title")
702
- link = result.get("link", "#")
703
- snippet = result.get("snippet", "")
704
- news_html += f"""
705
- <div class="news-item" onmouseover="showPreview(event, '{snippet}')" onmouseout="hidePreview()">
706
- <a href='{link}' target='_blank'>{index + 1}. {title}</a>
707
- <p>{snippet}</p>
708
- </div>
709
- """
710
- return news_html
711
- else:
712
- return "<p>Failed to fetch local news</p>"
713
-
714
-
715
- # Voice Control
716
- import numpy as np
717
- import torch
718
- from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
719
-
720
- model_id = 'openai/whisper-large-v3'
721
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
722
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
723
- model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype,
724
- #low_cpu_mem_usage=True,
725
- use_safetensors=True).to(device)
726
- processor = AutoProcessor.from_pretrained(model_id)
727
-
728
- # Optimized ASR pipeline
729
- pipe_asr = pipeline("automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=15, batch_size=16, torch_dtype=torch_dtype, device=device, return_timestamps=True)
730
-
731
- base_audio_drive = "/data/audio"
732
-
733
- import numpy as np
734
-
735
- def transcribe_function(stream, new_chunk):
736
- try:
737
- sr, y = new_chunk[0], new_chunk[1]
738
- except TypeError:
739
- print(f"Error chunk structure: {type(new_chunk)}, content: {new_chunk}")
740
- return stream, "", None
741
-
742
- y = y.astype(np.float32) / np.max(np.abs(y))
743
-
744
- if stream is not None:
745
- stream = np.concatenate([stream, y])
746
- else:
747
- stream = y
748
-
749
- result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
750
-
751
- full_text = result.get("text", "")
752
-
753
- return stream, full_text, result
754
-
755
-
756
  def update_map_with_response(history):
757
  if not history:
758
  return ""
@@ -760,22 +1305,18 @@ def update_map_with_response(history):
760
  addresses = extract_addresses(response)
761
  return generate_map(addresses)
762
 
763
-
764
-
765
  def clear_textbox():
766
- return ""
767
 
768
- def show_map_if_details(history,choice):
769
  if choice in ["Details", "Conversational"]:
770
  return gr.update(visible=True), update_map_with_response(history)
771
  else:
772
- return gr.update(visible(False), "")
773
-
774
-
775
 
776
  def generate_audio_elevenlabs(text):
777
  XI_API_KEY = os.environ['ELEVENLABS_API']
778
- VOICE_ID = 'd9MIrwLnvDeH7aZb61E9' # Replace with your voice ID
779
  tts_url = f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}/stream"
780
  headers = {
781
  "Accept": "application/json",
@@ -787,7 +1328,7 @@ def generate_audio_elevenlabs(text):
787
  "voice_settings": {
788
  "stability": 1.0,
789
  "similarity_boost": 0.0,
790
- "style": 0.60, # Adjust style for more romantic tone
791
  "use_speaker_boost": False
792
  }
793
  }
@@ -816,14 +1357,10 @@ def generate_image(prompt):
816
  ).images[0]
817
  return image
818
 
819
- # Hardcoded prompt for image generation
820
- # hardcoded_prompt_1 = "Useing The top events like 'Summer Art Festival'and Date - 06/19/2024 ,Weather-Sunny Bright Day.Create Highly Visually Compelling High Resolution and High Quality Photographics Advatizement for 'Toyota'"
821
- hardcoded_prompt_1="Give a high quality photograph of a great looking red 2026 toyota coupe against a skyline setting in th night, michael mann style in omaha enticing the consumer to buy this product"
822
- # hardcoded_prompt_2 = "Create a vibrant poster of Nebraska with beautiful weather, featuring picturesque landscapes, clear skies, and the word 'Nebraska' prominently displayed."
823
- hardcoded_prompt_2="A vibrant and dynamic football game scene in the style of Peter Paul Rubens, showcasing the intense match between Alabama and Nebraska. The players are depicted with the dramatic, muscular physiques and expressive faces typical of Rubens' style. The Alabama team is wearing their iconic crimson and white uniforms, while the Nebraska team is in their classic red and white attire. The scene is filled with action, with players in mid-motion, tackling, running, and catching the ball. The background features a grand stadium filled with cheering fans, banners, and the natural landscape in the distance. The colors are rich and vibrant, with a strong use of light and shadow to create depth and drama. The overall atmosphere captures the intensity and excitement of the game, infused with the grandeur and dynamism characteristic of Rubens' work."
824
  hardcoded_prompt_3 = "Create a high-energy scene of a DJ performing on a large stage with vibrant lights, colorful lasers, a lively dancing crowd, and various electronic equipment in the background."
825
 
826
-
827
  def update_images():
828
  image_1 = generate_image(hardcoded_prompt_1)
829
  image_2 = generate_image(hardcoded_prompt_2)
@@ -831,14 +1368,11 @@ def update_images():
831
  return image_1, image_2, image_3
832
 
833
  with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
834
-
835
  with gr.Row():
836
  with gr.Column():
837
  state = gr.State()
838
-
839
  chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
840
  choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
841
-
842
  gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
843
  chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
844
  chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
@@ -847,33 +1381,31 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
847
  chatbot.like(print_like_dislike, None, None)
848
  clear_button = gr.Button("Clear")
849
  clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
850
-
851
-
852
  audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
853
  audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="SAMLOne_real_time")
854
-
855
  gr.Markdown("<h1 style='color: red;'>Map</h1>", elem_id="location-markdown")
856
  location_output = gr.HTML()
857
  bot_msg.then(show_map_if_details, [chatbot, choice], [location_output, location_output])
858
-
859
  with gr.Column():
860
  weather_output = gr.HTML(value=fetch_local_weather())
861
  news_output = gr.HTML(value=fetch_local_news())
862
- news_output = gr.HTML(value=fetch_local_events())
863
-
864
  with gr.Column():
865
-
866
  image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
867
  image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
868
  image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
869
-
870
-
871
  refresh_button = gr.Button("Refresh Images")
872
  refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
873
 
 
 
 
874
  demo.queue()
875
  demo.launch(share=True)
876
 
 
 
 
877
 
878
 
879
 
 
1
+ # import os
2
+ # import re
3
+ # import time
4
+ # import requests
5
+ # import logging
6
+ # import folium
7
+ # import gradio as gr
8
+ # import tempfile
9
+ # import torch
10
+ # from datetime import datetime
11
+ # import numpy as np
12
+ # from gtts import gTTS
13
+ # from googlemaps import Client as GoogleMapsClient
14
+ # from diffusers import StableDiffusion3Pipeline
15
+ # import concurrent.futures
16
+ # from PIL import Image
17
+
18
+ # from langchain_openai import OpenAIEmbeddings, ChatOpenAI
19
+ # from langchain_pinecone import PineconeVectorStore
20
+ # from langchain.prompts import PromptTemplate
21
+ # from langchain.chains import RetrievalQA
22
+ # from langchain.chains.conversation.memory import ConversationBufferWindowMemory
23
+ # from langchain.agents import Tool, initialize_agent
24
+ # from huggingface_hub import login
25
+
26
+ # # Check if the token is already set in the environment variables
27
+ # hf_token = os.getenv("HF_TOKEN")
28
+
29
+ # if hf_token is None:
30
+ # # If the token is not set, prompt for it (this should be done securely)
31
+ # print("Please set your Hugging Face token in the environment variables.")
32
+ # else:
33
+ # # Login using the token
34
+ # login(token=hf_token)
35
+
36
+ # # Your application logic goes here
37
+ # print("Logged in successfully to Hugging Face Hub!")
38
+
39
+
40
+
41
+ # # Set up logging
42
+ # logging.basicConfig(level=logging.DEBUG)
43
+
44
+ # # Initialize OpenAI embeddings
45
+ # embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
46
+
47
+ # # Initialize Pinecone
48
+ # from pinecone import Pinecone
49
+ # pc = Pinecone(api_key=os.environ['PINECONE_API_KEY'])
50
+
51
+ # index_name = "omaha-details"
52
+ # vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
53
+ # retriever = vectorstore.as_retriever(search_kwargs={'k': 5})
54
+
55
+ # # Initialize ChatOpenAI model
56
+ # chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'],
57
+ # temperature=0, model='gpt-4o')
58
+
59
+ # conversational_memory = ConversationBufferWindowMemory(
60
+ # memory_key='chat_history',
61
+ # k=10,
62
+ # return_messages=True
63
+ # )
64
+
65
+ # def get_current_time_and_date():
66
+ # now = datetime.now()
67
+ # return now.strftime("%Y-%m-%d %H:%M:%S")
68
+
69
+ # # Example usage
70
+ # current_time_and_date = get_current_time_and_date()
71
+
72
+ # # def fetch_local_events():
73
+ # # api_key = os.environ['SERP_API']
74
+ # # url = f'https://serpapi.com/search.json?engine=google_events&q=Events+in+Omaha&hl=en&gl=us&api_key={api_key}'
75
+
76
+ # # response = requests.get(url)
77
+ # # if response.status_code == 200:
78
+ # # events_results = response.json().get("events_results", [])
79
+ # # events_html = """
80
+ # # <h2 style="font-family: 'Georgia', serif; color: #4CAF50; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Events</h2>
81
+ # # <style>
82
+ # # .event-item {
83
+ # # font-family: 'Verdana', sans-serif;
84
+ # # color: #333;
85
+ # # background-color: #f0f8ff;
86
+ # # margin-bottom: 15px;
87
+ # # padding: 10px;
88
+ # # border: 1px solid #ddd;
89
+ # # border-radius: 5px;
90
+ # # border: 2px solid red; /* Added red border */
91
+ # # transition: box-shadow 0.3s ease, background-color 0.3s ease;
92
+ # # font-weight: bold;
93
+ # # }
94
+ # # .event-item:hover {
95
+ # # box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
96
+ # # background-color: #e6f7ff;
97
+ # # }
98
+ # # .event-item a {
99
+ # # color: #1E90FF;
100
+ # # text-decoration: none;
101
+ # # font-weight: bold;
102
+ # # }
103
+ # # .event-item a:hover {
104
+ # # text-decoration: underline;
105
+ # # }
106
+ # # .event-preview {
107
+ # # position: absolute;
108
+ # # display: none;
109
+ # # border: 1px solid #ccc;
110
+ # # border-radius: 5px;
111
+ # # box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
112
+ # # background-color: white;
113
+ # # z-index: 1000;
114
+ # # max-width: 300px;
115
+ # # padding: 10px;
116
+ # # font-family: 'Verdana', sans-serif;
117
+ # # color: #333;
118
+ # # }
119
+ # # </style>
120
+ # # <script>
121
+ # # function showPreview(event, previewContent) {
122
+ # # var previewBox = document.getElementById('event-preview');
123
+ # # previewBox.innerHTML = previewContent;
124
+ # # previewBox.style.left = event.pageX + 'px';
125
+ # # previewBox.style.top = event.pageY + 'px';
126
+ # # previewBox.style.display = 'block';
127
+ # # }
128
+ # # function hidePreview() {
129
+ # # var previewBox = document.getElementById('event-preview');
130
+ # # previewBox.style.display = 'none';
131
+ # # }
132
+ # # </script>
133
+ # # <div id="event-preview" class="event-preview"></div>
134
+ # # """
135
+ # # for index, event in enumerate(events_results):
136
+ # # title = event.get("title", "No title")
137
+ # # date = event.get("date", "No date")
138
+ # # location = event.get("address", "No location")
139
+ # # link = event.get("link", "#")
140
+ # # events_html += f"""
141
+ # # <div class="event-item" onmouseover="showPreview(event, 'Date: {date}<br>Location: {location}')" onmouseout="hidePreview()">
142
+ # # <a href='{link}' target='_blank'>{index + 1}. {title}</a>
143
+ # # <p>Date: {date}<br>Location: {location}</p>
144
+ # # </div>
145
+ # # """
146
+ # # return events_html
147
+ # # else:
148
+ # # return "<p>Failed to fetch local events</p>"
149
+
150
+ # # def fetch_local_events():
151
+ # # api_key = os.environ['SERP_API']
152
+ # # url = f'https://serpapi.com/search.json?engine=google_events&q=Events+in+Omaha&hl=en&gl=us&api_key={api_key}'
153
+
154
+ # # response = requests.get(url)
155
+ # # if response.status_code == 200:
156
+ # # events_results = response.json().get("events_results", [])
157
+ # # events_html = """
158
+ # # <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Events</h2>
159
+ # # <style>
160
+ # # .event-item {
161
+ # # font-family: 'Verdana', sans-serif;
162
+ # # color: #333;
163
+ # # background-color: #f0f8ff;
164
+ # # margin-bottom: 15px;
165
+ # # padding: 10px;
166
+ # # border-radius: 5px;
167
+ # # transition: box-shadow 0.3s ease, background-color 0.3s ease;
168
+ # # font-weight: bold;
169
+ # # }
170
+ # # .event-item:hover {
171
+ # # box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
172
+ # # background-color: #e6f7ff;
173
+ # # }
174
+ # # .event-item a {
175
+ # # color: #1E90FF;
176
+ # # text-decoration: none;
177
+ # # font-weight: bold;
178
+ # # }
179
+ # # .event-item a:hover {
180
+ # # text-decoration: underline;
181
+ # # }
182
+ # # .event-preview {
183
+ # # position: absolute;
184
+ # # display: none;
185
+ # # border: 1px solid #ccc;
186
+ # # border-radius: 5px;
187
+ # # box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
188
+ # # background-color: white;
189
+ # # z-index: 1000;
190
+ # # max-width: 300px;
191
+ # # padding: 10px;
192
+ # # font-family: 'Verdana', sans-serif;
193
+ # # color: #333;
194
+ # # }
195
+ # # </style>
196
+ # # <script>
197
+ # # function showPreview(event, previewContent) {
198
+ # # var previewBox = document.getElementById('event-preview');
199
+ # # previewBox.innerHTML = previewContent;
200
+ # # previewBox.style.left = event.pageX + 'px';
201
+ # # previewBox.style.top = event.pageY + 'px';
202
+ # # previewBox.style.display = 'block';
203
+ # # }
204
+ # # function hidePreview() {
205
+ # # var previewBox = document.getElementById('event-preview');
206
+ # # previewBox.style.display = 'none';
207
+ # # }
208
+ # # </script>
209
+ # # <div id="event-preview" class="event-preview"></div>
210
+ # # """
211
+ # # for index, event in enumerate(events_results):
212
+ # # title = event.get("title", "No title")
213
+ # # date = event.get("date", "No date")
214
+ # # location = event.get("address", "No location")
215
+ # # link = event.get("link", "#")
216
+ # # events_html += f"""
217
+ # # <div class="event-item" onmouseover="showPreview(event, 'Date: {date}<br>Location: {location}')" onmouseout="hidePreview()">
218
+ # # <a href='{link}' target='_blank'>{index + 1}. {title}</a>
219
+ # # <p>Date: {date}<br>Location: {location}</p>
220
+ # # </div>
221
+ # # """
222
+ # # return events_html
223
+ # # else:
224
+ # # return "<p>Failed to fetch local events</p>"
225
 
226
  # def fetch_local_events():
227
  # api_key = os.environ['SERP_API']
 
231
  # if response.status_code == 200:
232
  # events_results = response.json().get("events_results", [])
233
  # events_html = """
234
+ # <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Events</h2>
235
  # <style>
236
  # .event-item {
237
  # font-family: 'Verdana', sans-serif;
238
  # color: #333;
 
239
  # margin-bottom: 15px;
240
  # padding: 10px;
 
 
 
 
241
  # font-weight: bold;
242
  # }
 
 
 
 
243
  # .event-item a {
244
  # color: #1E90FF;
245
  # text-decoration: none;
 
246
  # }
247
  # .event-item a:hover {
248
  # text-decoration: underline;
249
  # }
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  # </style>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  # """
252
  # for index, event in enumerate(events_results):
253
  # title = event.get("title", "No title")
 
255
  # location = event.get("address", "No location")
256
  # link = event.get("link", "#")
257
  # events_html += f"""
258
+ # <div class="event-item">
259
  # <a href='{link}' target='_blank'>{index + 1}. {title}</a>
260
  # <p>Date: {date}<br>Location: {location}</p>
261
  # </div>
 
264
  # else:
265
  # return "<p>Failed to fetch local events</p>"
266
 
 
 
 
267
 
268
+ # # def fetch_local_weather():
269
+ # # try:
270
+ # # api_key = os.environ['WEATHER_API']
271
+ # # url = f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/omaha?unitGroup=metric&include=events%2Calerts%2Chours%2Cdays%2Ccurrent&key={api_key}'
272
+ # # response = requests.get(url)
273
+ # # response.raise_for_status()
274
+ # # jsonData = response.json()
275
+
276
+ # # current_conditions = jsonData.get("currentConditions", {})
277
+ # # temp_celsius = current_conditions.get("temp", "N/A")
278
+
279
+ # # if temp_celsius != "N/A":
280
+ # # temp_fahrenheit = int((temp_celsius * 9/5) + 32)
281
+ # # else:
282
+ # # temp_fahrenheit = "N/A"
283
+
284
+ # # condition = current_conditions.get("conditions", "N/A")
285
+ # # humidity = current_conditions.get("humidity", "N/A")
286
+
287
+ # # weather_html = f"""
288
+ # # <div class="weather-theme">
289
+ # # <h2 style="font-family: 'Georgia', serif; color: #4CAF50; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Weather</h2>
290
+ # # <div class="weather-content">
291
+ # # <div class="weather-icon">
292
+ # # <img src="https://www.weatherbit.io/static/img/icons/{get_weather_icon(condition)}.png" alt="{condition}" style="width: 100px; height: 100px;">
293
+ # # </div>
294
+ # # <div class="weather-details">
295
+ # # <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Temperature: {temp_fahrenheit}°F</p>
296
+ # # <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Condition: {condition}</p>
297
+ # # <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Humidity: {humidity}%</p>
298
+ # # </div>
299
+ # # </div>
300
+ # # </div>
301
+ # # <style>
302
+ # # .weather-theme {{
303
+ # # animation: backgroundAnimation 10s infinite alternate;
304
+ # # border: 2px solid red; /* Added red border */
305
+ # # border-radius: 10px;
306
+ # # padding: 10px;
307
+ # # margin-bottom: 15px;
308
+ # # background: linear-gradient(45deg, #ffcc33, #ff6666, #ffcc33, #ff6666);
309
+ # # background-size: 400% 400%;
310
+ # # box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
311
+ # # transition: box-shadow 0.3s ease, background-color 0.3s ease;
312
+ # # }}
313
+ # # .weather-theme:hover {{
314
+ # # box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2);
315
+ # # background-position: 100% 100%;
316
+ # # }}
317
+ # # @keyframes backgroundAnimation {{
318
+ # # 0% {{ background-position: 0% 50%; }}
319
+ # # 100% {{ background-position: 100% 50%; }}
320
+ # # }}
321
+ # # .weather-content {{
322
+ # # display: flex;
323
+ # # align-items: center;
324
+ # # }}
325
+ # # .weather-icon {{
326
+ # # flex: 1;
327
+ # # }}
328
+ # # .weather-details {{
329
+ # # flex: 3;
330
+ # # }}
331
+ # # </style>
332
+ # # """
333
+ # # return weather_html
334
+ # # except requests.exceptions.RequestException as e:
335
+ # # return f"<p>Failed to fetch local weather: {e}</p>"
336
+ # def fetch_local_weather():
337
+ # try:
338
+ # api_key = os.environ['WEATHER_API']
339
+ # url = f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/omaha?unitGroup=metric&include=events%2Calerts%2Chours%2Cdays%2Ccurrent&key={api_key}'
340
+ # response = requests.get(url)
341
+ # response.raise_for_status()
342
+ # jsonData = response.json()
343
+
344
+ # current_conditions = jsonData.get("currentConditions", {})
345
+ # temp_celsius = current_conditions.get("temp", "N/A")
346
+
347
+ # if temp_celsius != "N/A":
348
+ # temp_fahrenheit = int((temp_celsius * 9/5) + 32)
349
+ # else:
350
+ # temp_fahrenheit = "N/A"
351
+
352
+ # condition = current_conditions.get("conditions", "N/A")
353
+ # humidity = current_conditions.get("humidity", "N/A")
354
+
355
+ # weather_html = f"""
356
+ # <div class="weather-theme">
357
+ # <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Weather</h2>
358
+ # <div class="weather-content">
359
+ # <div class="weather-icon">
360
+ # <img src="https://www.weatherbit.io/static/img/icons/{get_weather_icon(condition)}.png" alt="{condition}" style="width: 100px; height: 100px;">
361
+ # </div>
362
+ # <div class="weather-details">
363
+ # <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Temperature: {temp_fahrenheit}°F</p>
364
+ # <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Condition: {condition}</p>
365
+ # <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Humidity: {humidity}%</p>
366
+ # </div>
367
+ # </div>
368
+ # </div>
369
+ # <style>
370
+ # .weather-theme {{
371
+ # animation: backgroundAnimation 10s infinite alternate;
372
+ # border-radius: 10px;
373
+ # padding: 10px;
374
+ # margin-bottom: 15px;
375
+ # background: linear-gradient(45deg, #ffcc33, #ff6666, #ffcc33, #ff6666);
376
+ # background-size: 400% 400%;
377
+ # box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
378
+ # transition: box-shadow 0.3s ease, background-color 0.3s ease;
379
+ # }}
380
+ # .weather-theme:hover {{
381
+ # box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2);
382
+ # background-position: 100% 100%;
383
+ # }}
384
+ # @keyframes backgroundAnimation {{
385
+ # 0% {{ background-position: 0% 50%; }}
386
+ # 100% {{ background-position: 100% 50%; }}
387
+ # }}
388
+ # .weather-content {{
389
+ # display: flex;
390
+ # align-items: center;
391
+ # }}
392
+ # .weather-icon {{
393
+ # flex: 1;
394
+ # }}
395
+ # .weather-details {{
396
+ # flex: 3;
397
+ # }}
398
+ # </style>
399
+ # """
400
+ # return weather_html
401
+ # except requests.exceptions.RequestException as e:
402
+ # return f"<p>Failed to fetch local weather: {e}</p>"
403
+
404
+ # def get_weather_icon(condition):
405
+ # condition_map = {
406
+ # "Clear": "c01d",
407
+ # "Partly Cloudy": "c02d",
408
+ # "Cloudy": "c03d",
409
+ # "Overcast": "c04d",
410
+ # "Mist": "a01d",
411
+ # "Patchy rain possible": "r01d",
412
+ # "Light rain": "r02d",
413
+ # "Moderate rain": "r03d",
414
+ # "Heavy rain": "r04d",
415
+ # "Snow": "s01d",
416
+ # "Thunderstorm": "t01d",
417
+ # "Fog": "a05d",
418
+ # }
419
+ # return condition_map.get(condition, "c04d")
420
+
421
+ # # Update prompt templates to include fetched details
422
+
423
+ # current_time_and_date = get_current_time_and_date()
424
+
425
+
426
+
427
+ # # Define prompt templates
428
+ # template1 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska. Based on weather being a sunny bright day and the today's date is 20th june 2024, use the following pieces of context,
429
+ # memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
430
+ # Use fifteen sentences maximum. Keep the answer as detailed as possible. Always include the address, time, date, and
431
+ # event type and description. Always say "It was my pleasure!" at the end of the answer.
432
+ # {context}
433
+ # Question: {question}
434
+ # Helpful Answer:"""
435
+
436
+ # template2 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska. Based on today's weather being a sunny bright day and today's date is 20th june 2024, take the location or address but don't show the location or address on the output prompts. Use the following pieces of context,
437
+ # memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
438
+ # Keep the answer short and sweet and crisp. Always say "It was my pleasure!" at the end of the answer.
439
+ # {context}
440
+ # Question: {question}
441
+ # Helpful Answer:"""
442
+
443
+
444
+
445
+ # QA_CHAIN_PROMPT_1 = PromptTemplate(input_variables=["context", "question"], template=template1)
446
+ # QA_CHAIN_PROMPT_2 = PromptTemplate(input_variables=["context", "question"], template=template2)
447
+
448
+
449
+ # # Define the retrieval QA chain
450
+ # def build_qa_chain(prompt_template):
451
+ # qa_chain = RetrievalQA.from_chain_type(
452
+ # llm=chat_model,
453
+ # chain_type="stuff",
454
+ # retriever=retriever,
455
+ # chain_type_kwargs={"prompt": prompt_template}
456
+ # )
457
+ # tools = [
458
+ # Tool(
459
+ # name='Knowledge Base',
460
+ # func=qa_chain,
461
+ # description='Use this tool when answering general knowledge queries to get more information about the topic'
462
+ # )
463
+ # ]
464
+ # return qa_chain, tools
465
+
466
+ # # Define the agent initializer
467
+ # def initialize_agent_with_prompt(prompt_template):
468
+ # qa_chain, tools = build_qa_chain(prompt_template)
469
+ # agent = initialize_agent(
470
+ # agent='chat-conversational-react-description',
471
+ # tools=tools,
472
+ # llm=chat_model,
473
+ # verbose=False,
474
+ # max_iteration=5,
475
+ # early_stopping_method='generate',
476
+ # memory=conversational_memory
477
+ # )
478
+ # return agent
479
+
480
+ # # Define the function to generate answers
481
+ # def generate_answer(message, choice):
482
+ # logging.debug(f"generate_answer called with prompt_choice: {choice}")
483
+
484
+ # if choice == "Details":
485
+ # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
486
+ # elif choice == "Conversational":
487
+ # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
488
+ # else:
489
+ # logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
490
+ # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
491
+ # response = agent(message)
492
+
493
+ # # Extract addresses for mapping regardless of the choice
494
+ # addresses = extract_addresses(response['output'])
495
+ # return response['output'], addresses
496
+
497
+
498
+
499
+ # def bot(history, choice):
500
+ # if not history:
501
+ # return history
502
+ # response, addresses = generate_answer(history[-1][0], choice)
503
+ # history[-1][1] = ""
504
+
505
+ # # Generate audio for the entire response in a separate thread
506
+ # with concurrent.futures.ThreadPoolExecutor() as executor:
507
+ # audio_future = executor.submit(generate_audio_elevenlabs, response)
508
+
509
+ # for character in response:
510
+ # history[-1][1] += character
511
+ # time.sleep(0.05) # Adjust the speed of text appearance
512
+ # yield history, None
513
+
514
+ # audio_path = audio_future.result()
515
+ # yield history, audio_path
516
+
517
+
518
+ # def add_message(history, message):
519
+ # history.append((message, None))
520
+ # return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
521
+
522
+ # def print_like_dislike(x: gr.LikeData):
523
+ # print(x.index, x.value, x.liked)
524
+
525
+ # def extract_addresses(response):
526
+ # if not isinstance(response, str):
527
+ # response = str(response)
528
+ # address_patterns = [
529
+ # r'([A-Z].*,\sOmaha,\sNE\s\d{5})',
530
+ # r'(\d{4}\s.*,\sOmaha,\sNE\s\d{5})',
531
+ # r'([A-Z].*,\sNE\s\d{5})',
532
+ # r'([A-Z].*,.*\sSt,\sOmaha,\sNE\s\d{5})',
533
+ # r'([A-Z].*,.*\sStreets,\sOmaha,\sNE\s\d{5})',
534
+ # r'(\d{2}.*\sStreets)',
535
+ # r'([A-Z].*\s\d{2},\sOmaha,\sNE\s\d{5})'
536
+ # ]
537
+ # addresses = []
538
+ # for pattern in address_patterns:
539
+ # addresses.extend(re.findall(pattern, response))
540
+ # return addresses
541
+
542
+ # all_addresses = []
543
+
544
+ # def generate_map(location_names):
545
+ # global all_addresses
546
+ # all_addresses.extend(location_names)
547
+
548
+ # api_key = os.environ['GOOGLEMAPS_API_KEY']
549
+ # gmaps = GoogleMapsClient(key=api_key)
550
+
551
+ # m = folium.Map(location=[41.2565, -95.9345], zoom_start=12)
552
+
553
+ # for location_name in all_addresses:
554
+ # geocode_result = gmaps.geocode(location_name)
555
+ # if geocode_result:
556
+ # location = geocode_result[0]['geometry']['location']
557
+ # folium.Marker(
558
+ # [location['lat'], location['lng']],
559
+ # tooltip=f"{geocode_result[0]['formatted_address']}"
560
+ # ).add_to(m)
561
+
562
+ # map_html = m._repr_html_()
563
+ # return map_html
564
+
565
+ # # def fetch_local_news():
566
+ # # api_key = os.environ['SERP_API']
567
+ # # url = f'https://serpapi.com/search.json?engine=google_news&q=omaha headline&api_key={api_key}'
568
+ # # response = requests.get(url)
569
+ # # if response.status_code == 200:
570
+ # # results = response.json().get("news_results", [])
571
+ # # news_html = """
572
+ # # <h2 style="font-family: 'Georgia', serif; color: #4CAF50; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Omaha Today </h2>
573
+ # # <style>
574
+ # # .news-item {
575
+ # # font-family: 'Verdana', sans-serif;
576
+ # # color: #333;
577
+ # # background-color: #f0f8ff;
578
+ # # margin-bottom: 15px;
579
+ # # padding: 10px;
580
+ # # border: 2px solid red; /* Added red border */
581
+ # # border-radius: 5px;
582
+ # # transition: box-shadow 0.3s ease, background-color 0.3s ease;
583
+ # # font-weight: bold;
584
+ # # }
585
+ # # .news-item:hover {
586
+ # # box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
587
+ # # background-color: #e6f7ff;
588
+ # # }
589
+ # # .news-item a {
590
+ # # color: #1E90FF;
591
+ # # text-decoration: none;
592
+ # # font-weight: bold;
593
+ # # }
594
+ # # .news-item a:hover {
595
+ # # text-decoration: underline;
596
+ # # }
597
+ # # .news-preview {
598
+ # # position: absolute;
599
+ # # display: none;
600
+ # # border: 1px solid #ccc;
601
+ # # border-radius: 5px;
602
+ # # box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
603
+ # # background-color: white;
604
+ # # z-index: 1000;
605
+ # # max-width: 300px;
606
+ # # padding: 10px;
607
+ # # font-family: 'Verdana', sans-serif;
608
+ # # color: #333;
609
+ # # }
610
+ # # </style>
611
+ # # <script>
612
+ # # function showPreview(event, previewContent) {
613
+ # # var previewBox = document.getElementById('news-preview');
614
+ # # previewBox.innerHTML = previewContent;
615
+ # # previewBox.style.left = event.pageX + 'px';
616
+ # # previewBox.style.top = event.pageY + 'px';
617
+ # # previewBox.style.display = 'block';
618
+ # # }
619
+ # # function hidePreview() {
620
+ # # var previewBox = document.getElementById('news-preview');
621
+ # # previewBox.style.display = 'none';
622
+ # # }
623
+ # # </script>
624
+ # # <div id="news-preview" class="news-preview"></div>
625
+ # # """
626
+ # # for index, result in enumerate(results[:7]):
627
+ # # title = result.get("title", "No title")
628
+ # # link = result.get("link", "#")
629
+ # # snippet = result.get("snippet", "")
630
+ # # news_html += f"""
631
+ # # <div class="news-item" onmouseover="showPreview(event, '{snippet}')" onmouseout="hidePreview()">
632
+ # # <a href='{link}' target='_blank'>{index + 1}. {title}</a>
633
+ # # <p>{snippet}</p>
634
+ # # </div>
635
+ # # """
636
+ # # return news_html
637
+ # # else:
638
+ # # return "<p>Failed to fetch local news</p>"
639
+
640
+ # def fetch_local_news():
641
+ # api_key = os.environ['SERP_API']
642
+ # url = f'https://serpapi.com/search.json?engine=google_news&q=omaha headline&api_key={api_key}'
643
  # response = requests.get(url)
644
  # if response.status_code == 200:
645
+ # results = response.json().get("news_results", [])
646
+ # news_html = """
647
+ # <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Omaha Today</h2>
648
  # <style>
649
+ # .news-item {
650
  # font-family: 'Verdana', sans-serif;
651
  # color: #333;
652
  # background-color: #f0f8ff;
 
656
  # transition: box-shadow 0.3s ease, background-color 0.3s ease;
657
  # font-weight: bold;
658
  # }
659
+ # .news-item:hover {
660
  # box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
661
  # background-color: #e6f7ff;
662
  # }
663
+ # .news-item a {
664
  # color: #1E90FF;
665
  # text-decoration: none;
666
  # font-weight: bold;
667
  # }
668
+ # .news-item a:hover {
669
  # text-decoration: underline;
670
  # }
671
+ # .news-preview {
672
  # position: absolute;
673
  # display: none;
674
  # border: 1px solid #ccc;
 
684
  # </style>
685
  # <script>
686
  # function showPreview(event, previewContent) {
687
+ # var previewBox = document.getElementById('news-preview');
688
  # previewBox.innerHTML = previewContent;
689
  # previewBox.style.left = event.pageX + 'px';
690
  # previewBox.style.top = event.pageY + 'px';
691
  # previewBox.style.display = 'block';
692
  # }
693
  # function hidePreview() {
694
+ # var previewBox = document.getElementById('news-preview');
695
  # previewBox.style.display = 'none';
696
  # }
697
  # </script>
698
+ # <div id="news-preview" class="news-preview"></div>
699
  # """
700
+ # for index, result in enumerate(results[:7]):
701
+ # title = result.get("title", "No title")
702
+ # link = result.get("link", "#")
703
+ # snippet = result.get("snippet", "")
704
+ # news_html += f"""
705
+ # <div class="news-item" onmouseover="showPreview(event, '{snippet}')" onmouseout="hidePreview()">
 
706
  # <a href='{link}' target='_blank'>{index + 1}. {title}</a>
707
+ # <p>{snippet}</p>
708
  # </div>
709
  # """
710
+ # return news_html
711
  # else:
712
+ # return "<p>Failed to fetch local news</p>"
713
+
714
+
715
+ # # Voice Control
716
+ # import numpy as np
717
+ # import torch
718
+ # from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
719
+
720
+ # model_id = 'openai/whisper-large-v3'
721
+ # device = "cuda:0" if torch.cuda.is_available() else "cpu"
722
+ # torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
723
+ # model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype,
724
+ # #low_cpu_mem_usage=True,
725
+ # use_safetensors=True).to(device)
726
+ # processor = AutoProcessor.from_pretrained(model_id)
727
+
728
+ # # Optimized ASR pipeline
729
+ # pipe_asr = pipeline("automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=15, batch_size=16, torch_dtype=torch_dtype, device=device, return_timestamps=True)
730
+
731
+ # base_audio_drive = "/data/audio"
732
+
733
+ # import numpy as np
734
+
735
+ # def transcribe_function(stream, new_chunk):
736
+ # try:
737
+ # sr, y = new_chunk[0], new_chunk[1]
738
+ # except TypeError:
739
+ # print(f"Error chunk structure: {type(new_chunk)}, content: {new_chunk}")
740
+ # return stream, "", None
741
+
742
+ # y = y.astype(np.float32) / np.max(np.abs(y))
743
+
744
+ # if stream is not None:
745
+ # stream = np.concatenate([stream, y])
746
+ # else:
747
+ # stream = y
748
+
749
+ # result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
750
+
751
+ # full_text = result.get("text", "")
752
+
753
+ # return stream, full_text, result
754
+
755
+
756
+ # def update_map_with_response(history):
757
+ # if not history:
758
+ # return ""
759
+ # response = history[-1][1]
760
+ # addresses = extract_addresses(response)
761
+ # return generate_map(addresses)
762
+
763
+
764
+
765
+ # def clear_textbox():
766
+ # return ""
767
+
768
+ # def show_map_if_details(history,choice):
769
+ # if choice in ["Details", "Conversational"]:
770
+ # return gr.update(visible=True), update_map_with_response(history)
771
+ # else:
772
+ # return gr.update(visible(False), "")
773
+
774
+
775
+
776
+ # def generate_audio_elevenlabs(text):
777
+ # XI_API_KEY = os.environ['ELEVENLABS_API']
778
+ # VOICE_ID = 'd9MIrwLnvDeH7aZb61E9' # Replace with your voice ID
779
+ # tts_url = f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}/stream"
780
+ # headers = {
781
+ # "Accept": "application/json",
782
+ # "xi-api-key": XI_API_KEY
783
+ # }
784
+ # data = {
785
+ # "text": str(text),
786
+ # "model_id": "eleven_multilingual_v2",
787
+ # "voice_settings": {
788
+ # "stability": 1.0,
789
+ # "similarity_boost": 0.0,
790
+ # "style": 0.60, # Adjust style for more romantic tone
791
+ # "use_speaker_boost": False
792
+ # }
793
+ # }
794
+ # response = requests.post(tts_url, headers=headers, json=data, stream=True)
795
+ # if response.ok:
796
+ # with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as f:
797
+ # for chunk in response.iter_content(chunk_size=1024):
798
+ # f.write(chunk)
799
+ # temp_audio_path = f.name
800
+ # logging.debug(f"Audio saved to {temp_audio_path}")
801
+ # return temp_audio_path
802
+ # else:
803
+ # logging.error(f"Error generating audio: {response.text}")
804
+ # return None
805
+
806
+ # # Stable Diffusion setup
807
+ # pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16)
808
+ # pipe = pipe.to("cuda")
809
+
810
+ # def generate_image(prompt):
811
+ # image = pipe(
812
+ # prompt,
813
+ # negative_prompt="",
814
+ # num_inference_steps=28,
815
+ # guidance_scale=3.0,
816
+ # ).images[0]
817
+ # return image
818
+
819
+ # # Hardcoded prompt for image generation
820
+ # # hardcoded_prompt_1 = "Useing The top events like 'Summer Art Festival'and Date - 06/19/2024 ,Weather-Sunny Bright Day.Create Highly Visually Compelling High Resolution and High Quality Photographics Advatizement for 'Toyota'"
821
+ # hardcoded_prompt_1="Give a high quality photograph of a great looking red 2026 toyota coupe against a skyline setting in th night, michael mann style in omaha enticing the consumer to buy this product"
822
+ # # hardcoded_prompt_2 = "Create a vibrant poster of Nebraska with beautiful weather, featuring picturesque landscapes, clear skies, and the word 'Nebraska' prominently displayed."
823
+ # hardcoded_prompt_2="A vibrant and dynamic football game scene in the style of Peter Paul Rubens, showcasing the intense match between Alabama and Nebraska. The players are depicted with the dramatic, muscular physiques and expressive faces typical of Rubens' style. The Alabama team is wearing their iconic crimson and white uniforms, while the Nebraska team is in their classic red and white attire. The scene is filled with action, with players in mid-motion, tackling, running, and catching the ball. The background features a grand stadium filled with cheering fans, banners, and the natural landscape in the distance. The colors are rich and vibrant, with a strong use of light and shadow to create depth and drama. The overall atmosphere captures the intensity and excitement of the game, infused with the grandeur and dynamism characteristic of Rubens' work."
824
+ # hardcoded_prompt_3 = "Create a high-energy scene of a DJ performing on a large stage with vibrant lights, colorful lasers, a lively dancing crowd, and various electronic equipment in the background."
825
+
826
+
827
+ # def update_images():
828
+ # image_1 = generate_image(hardcoded_prompt_1)
829
+ # image_2 = generate_image(hardcoded_prompt_2)
830
+ # image_3 = generate_image(hardcoded_prompt_3)
831
+ # return image_1, image_2, image_3
832
+
833
+ # with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
834
+
835
+ # with gr.Row():
836
+ # with gr.Column():
837
+ # state = gr.State()
838
+
839
+ # chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
840
+ # choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
841
+
842
+ # gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
843
+ # chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
844
+ # chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
845
+ # bot_msg = chat_msg.then(bot, [chatbot, choice], [chatbot, gr.Audio(interactive=False, autoplay=True)])
846
+ # bot_msg.then(lambda: gr.Textbox(value="", interactive=True, placeholder="Ask Radar!!!...", show_label=False), None, [chat_input])
847
+ # chatbot.like(print_like_dislike, None, None)
848
+ # clear_button = gr.Button("Clear")
849
+ # clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
850
+
851
+
852
+ # audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
853
+ # audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="SAMLOne_real_time")
854
+
855
+ # gr.Markdown("<h1 style='color: red;'>Map</h1>", elem_id="location-markdown")
856
+ # location_output = gr.HTML()
857
+ # bot_msg.then(show_map_if_details, [chatbot, choice], [location_output, location_output])
858
+
859
+ # with gr.Column():
860
+ # weather_output = gr.HTML(value=fetch_local_weather())
861
+ # news_output = gr.HTML(value=fetch_local_news())
862
+ # news_output = gr.HTML(value=fetch_local_events())
863
+
864
+ # with gr.Column():
865
+
866
+ # image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
867
+ # image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
868
+ # image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
869
+
870
+
871
+ # refresh_button = gr.Button("Refresh Images")
872
+ # refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
873
+
874
+ # demo.queue()
875
+ # demo.launch(share=True)
876
+
877
+
878
+ import os
879
+ import re
880
+ import time
881
+ import requests
882
+ import logging
883
+ import folium
884
+ import gradio as gr
885
+ import tempfile
886
+ import torch
887
+ from datetime import datetime
888
+ import numpy as np
889
+ from gtts import gTTS
890
+ from googlemaps import Client as GoogleMapsClient
891
+ from diffusers import StableDiffusion3Pipeline
892
+ import concurrent.futures
893
+ from PIL import Image
894
+ from flask import Flask, redirect, url_for, session
895
+ from authlib.integrations.flask_client import OAuth
896
+
897
+ # Initialize Flask app
898
+ app = Flask(__name__)
899
+ app.secret_key = os.urandom(24)
900
+
901
+ # OAuth setup
902
+ oauth = OAuth(app)
903
+ google = oauth.register(
904
+ name='google',
905
+ client_id=os.environ['GOOGLE_CLIENT_ID'],
906
+ client_secret=os.environ['GOOGLE_CLIENT_SECRET'],
907
+ access_token_url='https://accounts.google.com/o/oauth2/token',
908
+ authorize_url='https://accounts.google.com/o/oauth2/auth',
909
+ authorize_params=None,
910
+ access_token_params=None,
911
+ refresh_token_url=None,
912
+ redirect_uri='http://localhost:7860/oauth2callback',
913
+ client_kwargs={'scope': 'openid profile email'},
914
+ )
915
 
916
+ @app.route('/')
917
+ def home():
918
+ email = dict(session).get('email', None)
919
+ if email:
920
+ return f'Hello, {email}!'
921
+ return redirect('/login')
922
+
923
+ @app.route('/login')
924
+ def login():
925
+ return google.authorize_redirect(url_for('authorize', _external=True))
926
+
927
+ @app.route('/logout')
928
+ def logout():
929
+ session.pop('email', None)
930
+ return redirect('/')
931
+
932
+ @app.route('/authorize')
933
+ def authorize():
934
+ token = google.authorize_access_token()
935
+ resp = google.get('https://www.googleapis.com/oauth2/v1/userinfo')
936
+ user_info = resp.json()
937
+ session['email'] = user_info['email']
938
+ return redirect('/')
939
+
940
+ @app.route('/oauth2callback')
941
+ def oauth2callback():
942
+ token = google.authorize_access_token()
943
+ resp = google.get('https://www.googleapis.com/oauth2/v1/userinfo')
944
+ user_info = resp.json()
945
+ session['email'] = user_info['email']
946
+ return redirect('/')
947
+
948
+ # Your Gradio interface and other functions go here
949
+
950
+ # Initialize logging
951
+ logging.basicConfig(level=logging.DEBUG)
952
+
953
+ # Define function to get current time and date
954
+ def get_current_time_and_date():
955
+ now = datetime.now()
956
+ return now.strftime("%Y-%m-%d %H:%M:%S")
957
+
958
+ current_time_and_date = get_current_time_and_date()
959
+
960
+ # Define functions for fetching events, weather, and news
961
  def fetch_local_events():
962
  api_key = os.environ['SERP_API']
963
  url = f'https://serpapi.com/search.json?engine=google_events&q=Events+in+Omaha&hl=en&gl=us&api_key={api_key}'
 
964
  response = requests.get(url)
965
  if response.status_code == 200:
966
  events_results = response.json().get("events_results", [])
 
998
  else:
999
  return "<p>Failed to fetch local events</p>"
1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1001
  def fetch_local_weather():
1002
  try:
1003
  api_key = os.environ['WEATHER_API']
 
1005
  response = requests.get(url)
1006
  response.raise_for_status()
1007
  jsonData = response.json()
 
1008
  current_conditions = jsonData.get("currentConditions", {})
1009
  temp_celsius = current_conditions.get("temp", "N/A")
 
1010
  if temp_celsius != "N/A":
1011
  temp_fahrenheit = int((temp_celsius * 9/5) + 32)
1012
  else:
1013
  temp_fahrenheit = "N/A"
 
1014
  condition = current_conditions.get("conditions", "N/A")
1015
  humidity = current_conditions.get("humidity", "N/A")
1016
 
 
1080
  }
1081
  return condition_map.get(condition, "c04d")
1082
 
1083
+ def fetch_local_news():
1084
+ api_key = os.environ['SERP_API']
1085
+ url = f'https://serpapi.com/search.json?engine=google_news&q=omaha headline&api_key={api_key}'
1086
+ response = requests.get(url)
1087
+ if response.status_code == 200:
1088
+ results = response.json().get("news_results", [])
1089
+ news_html = """
1090
+ <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Omaha Today</h2>
1091
+ <style>
1092
+ .news-item {
1093
+ font-family: 'Verdana', sans-serif;
1094
+ color: #333;
1095
+ background-color: #f0f8ff;
1096
+ margin-bottom: 15px;
1097
+ padding: 10px;
1098
+ border-radius: 5px;
1099
+ transition: box-shadow 0.3s ease, background-color 0.3s ease;
1100
+ font-weight: bold;
1101
+ }
1102
+ .news-item:hover {
1103
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
1104
+ background-color: #e6f7ff;
1105
+ }
1106
+ .news-item a {
1107
+ color: #1E90FF;
1108
+ text-decoration: none;
1109
+ font-weight: bold;
1110
+ }
1111
+ .news-item a:hover {
1112
+ text-decoration: underline;
1113
+ }
1114
+ .news-preview {
1115
+ position: absolute;
1116
+ display: none;
1117
+ border: 1px solid #ccc;
1118
+ border-radius: 5px;
1119
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
1120
+ background-color: white;
1121
+ z-index: 1000;
1122
+ max-width: 300px;
1123
+ padding: 10px;
1124
+ font-family: 'Verdana', sans-serif;
1125
+ color: #333;
1126
+ }
1127
+ </style>
1128
+ <script>
1129
+ function showPreview(event, previewContent) {
1130
+ var previewBox = document.getElementById('news-preview');
1131
+ previewBox.innerHTML = previewContent;
1132
+ previewBox.style.left = event.pageX + 'px';
1133
+ previewBox.style.top = event.pageY + 'px';
1134
+ previewBox.style.display = 'block';
1135
+ }
1136
+ function hidePreview() {
1137
+ var previewBox = document.getElementById('news-preview');
1138
+ previewBox.style.display = 'none';
1139
+ }
1140
+ </script>
1141
+ <div id="news-preview" class="news-preview"></div>
1142
+ """
1143
+ for index, result in enumerate(results[:7]):
1144
+ title = result.get("title", "No title")
1145
+ link = result.get("link", "#")
1146
+ snippet = result.get("snippet", "")
1147
+ news_html += f"""
1148
+ <div class="news-item" onmouseover="showPreview(event, '{snippet}')" onmouseout="hidePreview()">
1149
+ <a href='{link}' target='_blank'>{index + 1}. {title}</a>
1150
+ <p>{snippet}</p>
1151
+ </div>
1152
+ """
1153
+ return news_html
1154
+ else:
1155
+ return "<p>Failed to fetch local news</p>"
1156
 
1157
  # Define prompt templates
1158
+ template1 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska. Based on weather being a sunny bright day and today's date is 20th June 2024, use the following pieces of context,
1159
  memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
1160
  Use fifteen sentences maximum. Keep the answer as detailed as possible. Always include the address, time, date, and
1161
  event type and description. Always say "It was my pleasure!" at the end of the answer.
 
1163
  Question: {question}
1164
  Helpful Answer:"""
1165
 
1166
+ template2 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska. Based on today's weather being a sunny bright day and today's date is 20th June 2024, take the location or address but don't show the location or address on the output prompts. Use the following pieces of context,
1167
  memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
1168
  Keep the answer short and sweet and crisp. Always say "It was my pleasure!" at the end of the answer.
1169
  {context}
1170
  Question: {question}
1171
  Helpful Answer:"""
1172
 
1173
+ # Initialize ChatOpenAI model
1174
+ from langchain_openai import OpenAIEmbeddings, ChatOpenAI
1175
+ from langchain_pinecone import PineconeVectorStore
1176
+ from langchain.prompts import PromptTemplate
1177
+ from langchain.chains import RetrievalQA
1178
+ from langchain.chains.conversation.memory import ConversationBufferWindowMemory
1179
+ from langchain.agents import Tool, initialize_agent
1180
+
1181
+ embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
1182
+
1183
+ # Initialize Pinecone
1184
+ from pinecone import Pinecone
1185
+ pc = Pinecone(api_key=os.environ['PINECONE_API_KEY'])
1186
+
1187
+ index_name = "omaha-details"
1188
+ vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
1189
+ retriever = vectorstore.as_retriever(search_kwargs={'k': 5})
1190
+
1191
+ chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
1192
 
1193
+ conversational_memory = ConversationBufferWindowMemory(
1194
+ memory_key='chat_history',
1195
+ k=10,
1196
+ return_messages=True
1197
+ )
1198
 
1199
  QA_CHAIN_PROMPT_1 = PromptTemplate(input_variables=["context", "question"], template=template1)
1200
  QA_CHAIN_PROMPT_2 = PromptTemplate(input_variables=["context", "question"], template=template2)
1201
 
 
 
1202
  def build_qa_chain(prompt_template):
1203
  qa_chain = RetrievalQA.from_chain_type(
1204
  llm=chat_model,
 
1215
  ]
1216
  return qa_chain, tools
1217
 
 
1218
  def initialize_agent_with_prompt(prompt_template):
1219
  qa_chain, tools = build_qa_chain(prompt_template)
1220
  agent = initialize_agent(
 
1228
  )
1229
  return agent
1230
 
 
1231
  def generate_answer(message, choice):
1232
  logging.debug(f"generate_answer called with prompt_choice: {choice}")
 
1233
  if choice == "Details":
1234
  agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
1235
  elif choice == "Conversational":
 
1238
  logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
1239
  agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
1240
  response = agent(message)
 
 
1241
  addresses = extract_addresses(response['output'])
1242
  return response['output'], addresses
 
 
1243
 
1244
  def bot(history, choice):
1245
  if not history:
1246
  return history
1247
  response, addresses = generate_answer(history[-1][0], choice)
1248
  history[-1][1] = ""
 
 
1249
  with concurrent.futures.ThreadPoolExecutor() as executor:
1250
  audio_future = executor.submit(generate_audio_elevenlabs, response)
 
1251
  for character in response:
1252
  history[-1][1] += character
1253
+ time.sleep(0.05)
1254
  yield history, None
 
1255
  audio_path = audio_future.result()
1256
  yield history, audio_path
1257
 
 
1258
  def add_message(history, message):
1259
  history.append((message, None))
1260
  return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
 
1284
  def generate_map(location_names):
1285
  global all_addresses
1286
  all_addresses.extend(location_names)
 
1287
  api_key = os.environ['GOOGLEMAPS_API_KEY']
1288
  gmaps = GoogleMapsClient(key=api_key)
 
1289
  m = folium.Map(location=[41.2565, -95.9345], zoom_start=12)
 
1290
  for location_name in all_addresses:
1291
  geocode_result = gmaps.geocode(location_name)
1292
  if geocode_result:
 
1295
  [location['lat'], location['lng']],
1296
  tooltip=f"{geocode_result[0]['formatted_address']}"
1297
  ).add_to(m)
 
1298
  map_html = m._repr_html_()
1299
  return map_html
1300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1301
  def update_map_with_response(history):
1302
  if not history:
1303
  return ""
 
1305
  addresses = extract_addresses(response)
1306
  return generate_map(addresses)
1307
 
 
 
1308
  def clear_textbox():
1309
+ return ""
1310
 
1311
+ def show_map_if_details(history, choice):
1312
  if choice in ["Details", "Conversational"]:
1313
  return gr.update(visible=True), update_map_with_response(history)
1314
  else:
1315
+ return gr.update(visible=False), ""
 
 
1316
 
1317
  def generate_audio_elevenlabs(text):
1318
  XI_API_KEY = os.environ['ELEVENLABS_API']
1319
+ VOICE_ID = 'd9MIrwLnvDeH7aZb61E9'
1320
  tts_url = f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}/stream"
1321
  headers = {
1322
  "Accept": "application/json",
 
1328
  "voice_settings": {
1329
  "stability": 1.0,
1330
  "similarity_boost": 0.0,
1331
+ "style": 0.60,
1332
  "use_speaker_boost": False
1333
  }
1334
  }
 
1357
  ).images[0]
1358
  return image
1359
 
1360
+ hardcoded_prompt_1 = "Give a high quality photograph of a great looking red 2026 toyota coupe against a skyline setting in the night, michael mann style in omaha enticing the consumer to buy this product"
1361
+ hardcoded_prompt_2 = "A vibrant and dynamic football game scene in the style of Peter Paul Rubens, showcasing the intense match between Alabama and Nebraska. The players are depicted with the dramatic, muscular physiques and expressive faces typical of Rubens' style. The Alabama team is wearing their iconic crimson and white uniforms, while the Nebraska team is in their classic red and white attire. The scene is filled with action, with players in mid-motion, tackling, running, and catching the ball. The background features a grand stadium filled with cheering fans, banners, and the natural landscape in the distance. The colors are rich and vibrant, with a strong use of light and shadow to create depth and drama. The overall atmosphere captures the intensity and excitement of the game, infused with the grandeur and dynamism characteristic of Rubens' work."
 
 
 
1362
  hardcoded_prompt_3 = "Create a high-energy scene of a DJ performing on a large stage with vibrant lights, colorful lasers, a lively dancing crowd, and various electronic equipment in the background."
1363
 
 
1364
  def update_images():
1365
  image_1 = generate_image(hardcoded_prompt_1)
1366
  image_2 = generate_image(hardcoded_prompt_2)
 
1368
  return image_1, image_2, image_3
1369
 
1370
  with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
 
1371
  with gr.Row():
1372
  with gr.Column():
1373
  state = gr.State()
 
1374
  chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
1375
  choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
 
1376
  gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
1377
  chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
1378
  chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
 
1381
  chatbot.like(print_like_dislike, None, None)
1382
  clear_button = gr.Button("Clear")
1383
  clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
 
 
1384
  audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
1385
  audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="SAMLOne_real_time")
 
1386
  gr.Markdown("<h1 style='color: red;'>Map</h1>", elem_id="location-markdown")
1387
  location_output = gr.HTML()
1388
  bot_msg.then(show_map_if_details, [chatbot, choice], [location_output, location_output])
 
1389
  with gr.Column():
1390
  weather_output = gr.HTML(value=fetch_local_weather())
1391
  news_output = gr.HTML(value=fetch_local_news())
1392
+ events_output = gr.HTML(value=fetch_local_events())
 
1393
  with gr.Column():
 
1394
  image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
1395
  image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
1396
  image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
 
 
1397
  refresh_button = gr.Button("Refresh Images")
1398
  refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
1399
 
1400
+ login_button = gr.Button("Login with Google")
1401
+ login_button.click(fn=lambda: redirect('/login'))
1402
+
1403
  demo.queue()
1404
  demo.launch(share=True)
1405
 
1406
+ if __name__ == "__main__":
1407
+ app.run(port=7860)
1408
+
1409
 
1410
 
1411