Pijush2023 commited on
Commit
1407881
·
verified ·
1 Parent(s): a5dd543

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +200 -188
app.py CHANGED
@@ -890,7 +890,7 @@ from datetime import datetime
890
  import numpy as np
891
  from gtts import gTTS
892
  from googlemaps import Client as GoogleMapsClient
893
- from diffusers import StableDiffusionPipeline
894
  import concurrent.futures
895
  from PIL import Image
896
 
@@ -903,16 +903,19 @@ from langchain.agents import Tool, initialize_agent
903
  from huggingface_hub import login
904
 
905
  import sqlite3
906
- import hashlib
907
 
908
  # Check if the token is already set in the environment variables
909
  hf_token = os.getenv("HF_TOKEN")
910
 
911
  if hf_token is None:
 
912
  print("Please set your Hugging Face token in the environment variables.")
913
  else:
 
914
  login(token=hf_token)
915
 
 
916
  print("Logged in successfully to Hugging Face Hub!")
917
 
918
  # Set up logging
@@ -931,7 +934,7 @@ retriever = vectorstore.as_retriever(search_kwargs={'k': 5})
931
 
932
  # Initialize ChatOpenAI model
933
  chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'],
934
- temperature=0, model='gpt-4')
935
 
936
  conversational_memory = ConversationBufferWindowMemory(
937
  memory_key='chat_history',
@@ -943,6 +946,7 @@ def get_current_time_and_date():
943
  now = datetime.now()
944
  return now.strftime("%Y-%m-%d %H:%M:%S")
945
 
 
946
  current_time_and_date = get_current_time_and_date()
947
 
948
  def fetch_local_events():
@@ -1071,6 +1075,140 @@ def get_weather_icon(condition):
1071
  }
1072
  return condition_map.get(condition, "c04d")
1073
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1074
  def fetch_local_news():
1075
  api_key = os.environ['SERP_API']
1076
  url = f'https://serpapi.com/search.json?engine=google_news&q=omaha headline&api_key={api_key}'
@@ -1154,6 +1292,7 @@ model_id = 'openai/whisper-large-v3'
1154
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
1155
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
1156
  model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype,
 
1157
  use_safetensors=True).to(device)
1158
  processor = AutoProcessor.from_pretrained(model_id)
1159
 
@@ -1162,8 +1301,6 @@ pipe_asr = pipeline("automatic-speech-recognition", model=model, tokenizer=proce
1162
 
1163
  base_audio_drive = "/data/audio"
1164
 
1165
- import numpy as np
1166
-
1167
  def transcribe_function(stream, new_chunk):
1168
  try:
1169
  sr, y = new_chunk[0], new_chunk[1]
@@ -1183,7 +1320,6 @@ def transcribe_function(stream, new_chunk):
1183
  full_text = result.get("text", "")
1184
 
1185
  return stream, full_text, result
1186
-
1187
 
1188
  def update_map_with_response(history):
1189
  if not history:
@@ -1199,7 +1335,7 @@ def show_map_if_details(history,choice):
1199
  if choice in ["Details", "Conversational"]:
1200
  return gr.update(visible=True), update_map_with_response(history)
1201
  else:
1202
- return gr.update(visible=False), ""
1203
 
1204
  def generate_audio_elevenlabs(text):
1205
  XI_API_KEY = os.environ['ELEVENLABS_API']
@@ -1245,8 +1381,8 @@ def generate_image(prompt):
1245
  return image
1246
 
1247
  # Hardcoded prompt for image generation
1248
- hardcoded_prompt_1="Give a high quality photograph of a great looking red 2026 toyota coupe against a skyline setting in the night, michael mann style in omaha enticing the consumer to buy this product"
1249
- hardcoded_prompt_2="A vibrant and dynamic football game scene in the style of Peter Paul Rubens, showcasing the intense match between Alabama and Nebraska. The players are depicted with the dramatic, muscular physiques and expressive faces typical of Rubens' style. The Alabama team is wearing their iconic crimson and white uniforms, while the Nebraska team is in their classic red and white attire. The scene is filled with action, with players in mid-motion, tackling, running, and catching the ball. The background features a grand stadium filled with cheering fans, banners, and the natural landscape in the distance. The colors are rich and vibrant, with a strong use of light and shadow to create depth and drama. The overall atmosphere captures the intensity and excitement of the game, infused with the grandeur and dynamism characteristic of Rubens' work."
1250
  hardcoded_prompt_3 = "Create a high-energy scene of a DJ performing on a large stage with vibrant lights, colorful lasers, a lively dancing crowd, and various electronic equipment in the background."
1251
 
1252
  def update_images():
@@ -1255,197 +1391,56 @@ def update_images():
1255
  image_3 = generate_image(hardcoded_prompt_3)
1256
  return image_1, image_2, image_3
1257
 
1258
- # Initialize database and create user table if not exists
1259
- def init_db():
1260
- conn = sqlite3.connect('user_data.db')
1261
- c = conn.cursor()
1262
- c.execute('''CREATE TABLE IF NOT EXISTS users
1263
- (username TEXT PRIMARY KEY, password TEXT)''')
 
 
 
 
 
1264
  conn.commit()
1265
  conn.close()
1266
 
1267
- init_db()
1268
-
1269
- def hash_password(password):
1270
- return hashlib.sha256(password.encode()).hexdigest()
1271
-
1272
  def signup_user(username, password):
1273
- conn = sqlite3.connect('user_data.db')
1274
- c = conn.cursor()
 
1275
  try:
1276
- c.execute("INSERT INTO users (username, password) VALUES (?, ?)", (username, hash_password(password)))
1277
  conn.commit()
1278
- return True
1279
  except sqlite3.IntegrityError:
1280
- return False
1281
  finally:
1282
  conn.close()
1283
 
1284
  def login_user(username, password):
1285
- conn = sqlite3.connect('user_data.db')
1286
- c = conn.cursor()
1287
- c.execute("SELECT * FROM users WHERE username=? AND password=?", (username, hash_password(password)))
1288
- user = c.fetchone()
1289
  conn.close()
1290
- return user is not None
1291
-
1292
- def handle_signup(username, password):
1293
- if '@mangoes.ai' not in username:
1294
- return "Username must include '@mangoes.ai'."
1295
- if signup_user(username, password):
1296
- return "Signup successful! You can now log in."
1297
- else:
1298
- return "Signup failed! Username may already be taken."
1299
-
1300
- def handle_login(username, password):
1301
- if login_user(username, password):
1302
  return "Login successful!"
1303
  else:
1304
- return "Login failed! Please check your credentials."
1305
-
1306
- def build_qa_chain(prompt_template):
1307
- qa_chain = RetrievalQA.from_chain_type(
1308
- llm=chat_model,
1309
- chain_type="stuff",
1310
- retriever=retriever,
1311
- chain_type_kwargs={"prompt": prompt_template}
1312
- )
1313
- tools = [
1314
- Tool(
1315
- name='Knowledge Base',
1316
- func=qa_chain,
1317
- description='Use this tool when answering general knowledge queries to get more information about the topic'
1318
- )
1319
- ]
1320
- return qa_chain, tools
1321
-
1322
- def initialize_agent_with_prompt(prompt_template):
1323
- qa_chain, tools = build_qa_chain(prompt_template)
1324
- agent = initialize_agent(
1325
- agent='chat-conversational-react-description',
1326
- tools=tools,
1327
- llm=chat_model,
1328
- verbose=False,
1329
- max_iteration=5,
1330
- early_stopping_method='generate',
1331
- memory=conversational_memory
1332
- )
1333
- return agent
1334
-
1335
- def generate_answer(message, choice):
1336
- logging.debug(f"generate_answer called with prompt_choice: {choice}")
1337
-
1338
- if choice == "Details":
1339
- agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
1340
- elif choice == "Conversational":
1341
- agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
1342
- else:
1343
- logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
1344
- agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
1345
- response = agent(message)
1346
-
1347
- # Extract addresses for mapping regardless of the choice
1348
- addresses = extract_addresses(response['output'])
1349
- return response['output'], addresses
1350
-
1351
- def bot(history, choice):
1352
- if not history:
1353
- return history
1354
- response, addresses = generate_answer(history[-1][0], choice)
1355
- history[-1][1] = ""
1356
-
1357
- # Generate audio for the entire response in a separate thread
1358
- with concurrent.futures.ThreadPoolExecutor() as executor:
1359
- audio_future = executor.submit(generate_audio_elevenlabs, response)
1360
-
1361
- for character in response:
1362
- history[-1][1] += character
1363
- time.sleep(0.05) # Adjust the speed of text appearance
1364
- yield history, None
1365
-
1366
- audio_path = audio_future.result()
1367
- yield history, audio_path
1368
-
1369
- def add_message(history, message):
1370
- history.append((message, None))
1371
- return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
1372
-
1373
- def print_like_dislike(x: gr.LikeData):
1374
- print(x.index, x.value, x.liked)
1375
-
1376
- def extract_addresses(response):
1377
- if not isinstance(response, str):
1378
- response = str(response)
1379
- address_patterns = [
1380
- r'([A-Z].*,\sOmaha,\sNE\s\d{5})',
1381
- r'(\d{4}\s.*,\sOmaha,\sNE\s\d{5})',
1382
- r'([A-Z].*,\sNE\s\d{5})',
1383
- r'([A-Z].*,.*\sSt,\sOmaha,\sNE\s\d{5})',
1384
- r'([A-Z].*,.*\sStreets,\sOmaha,\sNE\s\d{5})',
1385
- r'(\d{2}.*\sStreets)',
1386
- r'([A-Z].*\s\d{2},\sOmaha,\sNE\s\d{5})'
1387
- ]
1388
- addresses = []
1389
- for pattern in address_patterns:
1390
- addresses.extend(re.findall(pattern, response))
1391
- return addresses
1392
-
1393
- all_addresses = []
1394
-
1395
- def generate_map(location_names):
1396
- global all_addresses
1397
- all_addresses.extend(location_names)
1398
-
1399
- api_key = os.environ['GOOGLEMAPS_API_KEY']
1400
- gmaps = GoogleMapsClient(key=api_key)
1401
-
1402
- m = folium.Map(location=[41.2565, -95.9345], zoom_start=12)
1403
-
1404
- for location_name in all_addresses:
1405
- geocode_result = gmaps.geocode(location_name)
1406
- if geocode_result:
1407
- location = geocode_result[0]['geometry']['location']
1408
- folium.Marker(
1409
- [location['lat'], location['lng']],
1410
- tooltip=f"{geocode_result[0]['formatted_address']}"
1411
- ).add_to(m)
1412
-
1413
- map_html = m._repr_html_()
1414
- return map_html
1415
 
1416
- template1 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska. Based on weather being a sunny bright day and the today's date is 20th june 2024, use the following pieces of context,
1417
- memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
1418
- Use fifteen sentences maximum. Keep the answer as detailed as possible. Always include the address, time, date, and
1419
- event type and description. Always say "It was my pleasure!" at the end of the answer.
1420
- {context}
1421
- Question: {question}
1422
- Helpful Answer:"""
1423
 
1424
- template2 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska. Based on today's weather being a sunny bright day and today's date is 20th june 2024, take the location or address but don't show the location or address on the output prompts. Use the following pieces of context,
1425
- memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
1426
- Keep the answer short and sweet and crisp. Always say "It was my pleasure!" at the end of the answer.
1427
- {context}
1428
- Question: {question}
1429
- Helpful Answer:"""
1430
 
1431
- QA_CHAIN_PROMPT_1 = PromptTemplate(input_variables=["context", "question"], template=template1)
1432
- QA_CHAIN_PROMPT_2 = PromptTemplate(input_variables=["context", "question"], template=template2)
1433
 
1434
  with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1435
- with gr.Tab("Login"):
1436
- login_username = gr.Textbox(label="Username")
1437
- login_password = gr.Password(label="Password")
1438
- login_button = gr.Button("Login")
1439
- login_output = gr.Textbox(label="Login Status", interactive=False)
1440
- login_button.click(handle_login, inputs=[login_username, login_password], outputs=login_output)
1441
-
1442
- with gr.Tab("Signup"):
1443
- signup_username = gr.Textbox(label="Username")
1444
- signup_password = gr.Password(label="Password")
1445
- signup_button = gr.Button("Signup")
1446
- signup_output = gr.Textbox(label="Signup Status", interactive=False)
1447
- signup_button.click(handle_signup, inputs=[signup_username, signup_password], outputs=signup_output)
1448
-
1449
  with gr.Row():
1450
  with gr.Column():
1451
  state = gr.State()
@@ -1462,6 +1457,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1462
  clear_button = gr.Button("Clear")
1463
  clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
1464
 
 
1465
  audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
1466
  audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="SAMLOne_real_time")
1467
 
@@ -1472,20 +1468,36 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1472
  with gr.Column():
1473
  weather_output = gr.HTML(value=fetch_local_weather())
1474
  news_output = gr.HTML(value=fetch_local_news())
1475
- events_output = gr.HTML(value=fetch_local_events())
1476
-
1477
  with gr.Column():
 
1478
  image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
1479
  image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
1480
  image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
1481
 
1482
  refresh_button = gr.Button("Refresh Images")
1483
  refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1484
 
1485
  demo.queue()
1486
  demo.launch(share=True)
1487
-
1488
-
1489
-
1490
-
1491
-
 
890
  import numpy as np
891
  from gtts import gTTS
892
  from googlemaps import Client as GoogleMapsClient
893
+ from diffusers import StableDiffusion3Pipeline
894
  import concurrent.futures
895
  from PIL import Image
896
 
 
903
  from huggingface_hub import login
904
 
905
  import sqlite3
906
+ from passlib.hash import bcrypt
907
 
908
  # Check if the token is already set in the environment variables
909
  hf_token = os.getenv("HF_TOKEN")
910
 
911
  if hf_token is None:
912
+ # If the token is not set, prompt for it (this should be done securely)
913
  print("Please set your Hugging Face token in the environment variables.")
914
  else:
915
+ # Login using the token
916
  login(token=hf_token)
917
 
918
+ # Your application logic goes here
919
  print("Logged in successfully to Hugging Face Hub!")
920
 
921
  # Set up logging
 
934
 
935
  # Initialize ChatOpenAI model
936
  chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'],
937
+ temperature=0, model='gpt-4o')
938
 
939
  conversational_memory = ConversationBufferWindowMemory(
940
  memory_key='chat_history',
 
946
  now = datetime.now()
947
  return now.strftime("%Y-%m-%d %H:%M:%S")
948
 
949
+ # Example usage
950
  current_time_and_date = get_current_time_and_date()
951
 
952
  def fetch_local_events():
 
1075
  }
1076
  return condition_map.get(condition, "c04d")
1077
 
1078
+ current_time_and_date = get_current_time_and_date()
1079
+
1080
+ # Define prompt templates
1081
+ template1 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska. Based on weather being a sunny bright day and the today's date is 20th june 2024, use the following pieces of context,
1082
+ memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
1083
+ Use fifteen sentences maximum. Keep the answer as detailed as possible. Always include the address, time, date, and
1084
+ event type and description. Always say "It was my pleasure!" at the end of the answer.
1085
+ {context}
1086
+ Question: {question}
1087
+ Helpful Answer:"""
1088
+
1089
+ template2 = """You are an expert concierge who is helpful and a renowned guide for Omaha, Nebraska. Based on today's weather being a sunny bright day and today's date is 20th june 2024, take the location or address but don't show the location or address on the output prompts. Use the following pieces of context,
1090
+ memory, and message history, along with your knowledge of perennial events in Omaha, Nebraska, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
1091
+ Keep the answer short and sweet and crisp. Always say "It was my pleasure!" at the end of the answer.
1092
+ {context}
1093
+ Question: {question}
1094
+ Helpful Answer:"""
1095
+
1096
+ QA_CHAIN_PROMPT_1 = PromptTemplate(input_variables=["context", "question"], template=template1)
1097
+ QA_CHAIN_PROMPT_2 = PromptTemplate(input_variables=["context", "question"], template=template2)
1098
+
1099
+ # Define the retrieval QA chain
1100
+ def build_qa_chain(prompt_template):
1101
+ qa_chain = RetrievalQA.from_chain_type(
1102
+ llm=chat_model,
1103
+ chain_type="stuff",
1104
+ retriever=retriever,
1105
+ chain_type_kwargs={"prompt": prompt_template}
1106
+ )
1107
+ tools = [
1108
+ Tool(
1109
+ name='Knowledge Base',
1110
+ func=qa_chain,
1111
+ description='Use this tool when answering general knowledge queries to get more information about the topic'
1112
+ )
1113
+ ]
1114
+ return qa_chain, tools
1115
+
1116
+ # Define the agent initializer
1117
+ def initialize_agent_with_prompt(prompt_template):
1118
+ qa_chain, tools = build_qa_chain(prompt_template)
1119
+ agent = initialize_agent(
1120
+ agent='chat-conversational-react-description',
1121
+ tools=tools,
1122
+ llm=chat_model,
1123
+ verbose=False,
1124
+ max_iteration=5,
1125
+ early_stopping_method='generate',
1126
+ memory=conversational_memory
1127
+ )
1128
+ return agent
1129
+
1130
+ # Define the function to generate answers
1131
+ def generate_answer(message, choice):
1132
+ logging.debug(f"generate_answer called with prompt_choice: {choice}")
1133
+
1134
+ if choice == "Details":
1135
+ agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
1136
+ elif choice == "Conversational":
1137
+ agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
1138
+ else:
1139
+ logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
1140
+ agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
1141
+ response = agent(message)
1142
+
1143
+ # Extract addresses for mapping regardless of the choice
1144
+ addresses = extract_addresses(response['output'])
1145
+ return response['output'], addresses
1146
+
1147
+ def bot(history, choice):
1148
+ if not history:
1149
+ return history
1150
+ response, addresses = generate_answer(history[-1][0], choice)
1151
+ history[-1][1] = ""
1152
+
1153
+ # Generate audio for the entire response in a separate thread
1154
+ with concurrent.futures.ThreadPoolExecutor() as executor:
1155
+ audio_future = executor.submit(generate_audio_elevenlabs, response)
1156
+
1157
+ for character in response:
1158
+ history[-1][1] += character
1159
+ time.sleep(0.05) # Adjust the speed of text appearance
1160
+ yield history, None
1161
+
1162
+ audio_path = audio_future.result()
1163
+ yield history, audio_path
1164
+
1165
+ def add_message(history, message):
1166
+ history.append((message, None))
1167
+ return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
1168
+
1169
+ def print_like_dislike(x: gr.LikeData):
1170
+ print(x.index, x.value, x.liked)
1171
+
1172
+ def extract_addresses(response):
1173
+ if not isinstance(response, str):
1174
+ response = str(response)
1175
+ address_patterns = [
1176
+ r'([A-Z].*,\sOmaha,\sNE\s\d{5})',
1177
+ r'(\d{4}\s.*,\sOmaha,\sNE\s\d{5})',
1178
+ r'([A-Z].*,\sNE\s\d{5})',
1179
+ r'([A-Z].*,.*\sSt,\sOmaha,\sNE\s\d{5})',
1180
+ r'([A-Z].*,.*\sStreets,\sOmaha,\sNE\s\d{5})',
1181
+ r'(\d{2}.*\sStreets)',
1182
+ r'([A-Z].*\s\d{2},\sOmaha,\sNE\s\d{5})'
1183
+ ]
1184
+ addresses = []
1185
+ for pattern in address_patterns:
1186
+ addresses.extend(re.findall(pattern, response))
1187
+ return addresses
1188
+
1189
+ all_addresses = []
1190
+
1191
+ def generate_map(location_names):
1192
+ global all_addresses
1193
+ all_addresses.extend(location_names)
1194
+
1195
+ api_key = os.environ['GOOGLEMAPS_API_KEY']
1196
+ gmaps = GoogleMapsClient(key=api_key)
1197
+
1198
+ m = folium.Map(location=[41.2565, -95.9345], zoom_start=12)
1199
+
1200
+ for location_name in all_addresses:
1201
+ geocode_result = gmaps.geocode(location_name)
1202
+ if geocode_result:
1203
+ location = geocode_result[0]['geometry']['location']
1204
+ folium.Marker(
1205
+ [location['lat'], 'lng']],
1206
+ tooltip=f"{geocode_result[0]['formatted_address']}"
1207
+ ).add_to(m)
1208
+
1209
+ map_html = m._repr_html_()
1210
+ return map_html
1211
+
1212
  def fetch_local_news():
1213
  api_key = os.environ['SERP_API']
1214
  url = f'https://serpapi.com/search.json?engine=google_news&q=omaha headline&api_key={api_key}'
 
1292
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
1293
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
1294
  model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype,
1295
+ #low_cpu_mem_usage=True,
1296
  use_safetensors=True).to(device)
1297
  processor = AutoProcessor.from_pretrained(model_id)
1298
 
 
1301
 
1302
  base_audio_drive = "/data/audio"
1303
 
 
 
1304
  def transcribe_function(stream, new_chunk):
1305
  try:
1306
  sr, y = new_chunk[0], new_chunk[1]
 
1320
  full_text = result.get("text", "")
1321
 
1322
  return stream, full_text, result
 
1323
 
1324
  def update_map_with_response(history):
1325
  if not history:
 
1335
  if choice in ["Details", "Conversational"]:
1336
  return gr.update(visible=True), update_map_with_response(history)
1337
  else:
1338
+ return gr.update(visible(False), "")
1339
 
1340
  def generate_audio_elevenlabs(text):
1341
  XI_API_KEY = os.environ['ELEVENLABS_API']
 
1381
  return image
1382
 
1383
  # Hardcoded prompt for image generation
1384
+ hardcoded_prompt_1 = "Give a high quality photograph of a great looking red 2026 toyota coupe against a skyline setting in the night, michael mann style in omaha enticing the consumer to buy this product"
1385
+ hardcoded_prompt_2 = "A vibrant and dynamic football game scene in the style of Peter Paul Rubens, showcasing the intense match between Alabama and Nebraska. The players are depicted with the dramatic, muscular physiques and expressive faces typical of Rubens' style. The Alabama team is wearing their iconic crimson and white uniforms, while the Nebraska team is in their classic red and white attire. The scene is filled with action, with players in mid-motion, tackling, running, and catching the ball. The background features a grand stadium filled with cheering fans, banners, and the natural landscape in the distance. The colors are rich and vibrant, with a strong use of light and shadow to create depth and drama. The overall atmosphere captures the intensity and excitement of the game, infused with the grandeur and dynamism characteristic of Rubens' work."
1386
  hardcoded_prompt_3 = "Create a high-energy scene of a DJ performing on a large stage with vibrant lights, colorful lasers, a lively dancing crowd, and various electronic equipment in the background."
1387
 
1388
  def update_images():
 
1391
  image_3 = generate_image(hardcoded_prompt_3)
1392
  return image_1, image_2, image_3
1393
 
1394
+ # Database setup
1395
+ def create_database():
1396
+ conn = sqlite3.connect('user_credentials.db')
1397
+ cursor = conn.cursor()
1398
+ cursor.execute('''
1399
+ CREATE TABLE IF NOT EXISTS users (
1400
+ id INTEGER PRIMARY KEY,
1401
+ username TEXT UNIQUE,
1402
+ password TEXT
1403
+ )
1404
+ ''')
1405
  conn.commit()
1406
  conn.close()
1407
 
 
 
 
 
 
1408
  def signup_user(username, password):
1409
+ conn = sqlite3.connect('user_credentials.db')
1410
+ cursor = conn.cursor()
1411
+ hashed_password = bcrypt.hash(password)
1412
  try:
1413
+ cursor.execute('INSERT INTO users (username, password) VALUES (?, ?)', (username, hashed_password))
1414
  conn.commit()
1415
+ return "Signup successful!"
1416
  except sqlite3.IntegrityError:
1417
+ return "Username already exists!"
1418
  finally:
1419
  conn.close()
1420
 
1421
  def login_user(username, password):
1422
+ conn = sqlite3.connect('user_credentials.db')
1423
+ cursor = conn.cursor()
1424
+ cursor.execute('SELECT password FROM users WHERE username = ?', (username,))
1425
+ result = cursor.fetchone()
1426
  conn.close()
1427
+ if result and bcrypt.verify(password, result[0]):
 
 
 
 
 
 
 
 
 
 
 
1428
  return "Login successful!"
1429
  else:
1430
+ return "Invalid username or password."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1431
 
1432
+ # Initialize database
1433
+ create_database()
 
 
 
 
 
1434
 
1435
+ def signup(username, password, password_confirmation):
1436
+ if password != password_confirmation:
1437
+ return "Passwords do not match."
1438
+ return signup_user(username, password)
 
 
1439
 
1440
+ def login(username, password):
1441
+ return login_user(username, password)
1442
 
1443
  with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444
  with gr.Row():
1445
  with gr.Column():
1446
  state = gr.State()
 
1457
  clear_button = gr.Button("Clear")
1458
  clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
1459
 
1460
+
1461
  audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
1462
  audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="SAMLOne_real_time")
1463
 
 
1468
  with gr.Column():
1469
  weather_output = gr.HTML(value=fetch_local_weather())
1470
  news_output = gr.HTML(value=fetch_local_news())
1471
+ news_output = gr.HTML(value=fetch_local_events())
1472
+
1473
  with gr.Column():
1474
+
1475
  image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
1476
  image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
1477
  image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
1478
 
1479
  refresh_button = gr.Button("Refresh Images")
1480
  refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
1481
+
1482
+ with gr.Row():
1483
+ with gr.Column():
1484
+ gr.Markdown("<h2>Signup</h2>")
1485
+ signup_username = gr.Textbox(placeholder="Username")
1486
+ signup_password = gr.Textbox(placeholder="Password", type="password")
1487
+ signup_password_confirmation = gr.Textbox(placeholder="Confirm Password", type="password")
1488
+ signup_button = gr.Button("Signup")
1489
+ signup_message = gr.Textbox(interactive=False)
1490
+
1491
+ signup_button.click(fn=signup, inputs=[signup_username, signup_password, signup_password_confirmation], outputs=[signup_message])
1492
+
1493
+ with gr.Column():
1494
+ gr.Markdown("<h2>Login</h2>")
1495
+ login_username = gr.Textbox(placeholder="Username")
1496
+ login_password = gr.Textbox(placeholder="Password", type="password")
1497
+ login_button = gr.Button("Login")
1498
+ login_message = gr.Textbox(interactive=False)
1499
+
1500
+ login_button.click(fn=login, inputs=[login_username, login_password], outputs=[login_message])
1501
 
1502
  demo.queue()
1503
  demo.launch(share=True)