Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -890,7 +890,7 @@ from datetime import datetime
|
|
890 |
import numpy as np
|
891 |
from gtts import gTTS
|
892 |
from googlemaps import Client as GoogleMapsClient
|
893 |
-
from diffusers import
|
894 |
import concurrent.futures
|
895 |
from PIL import Image
|
896 |
|
@@ -909,13 +909,10 @@ import hashlib
|
|
909 |
hf_token = os.getenv("HF_TOKEN")
|
910 |
|
911 |
if hf_token is None:
|
912 |
-
# If the token is not set, prompt for it (this should be done securely)
|
913 |
print("Please set your Hugging Face token in the environment variables.")
|
914 |
else:
|
915 |
-
# Login using the token
|
916 |
login(token=hf_token)
|
917 |
|
918 |
-
# Your application logic goes here
|
919 |
print("Logged in successfully to Hugging Face Hub!")
|
920 |
|
921 |
# Set up logging
|
@@ -934,7 +931,7 @@ retriever = vectorstore.as_retriever(search_kwargs={'k': 5})
|
|
934 |
|
935 |
# Initialize ChatOpenAI model
|
936 |
chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'],
|
937 |
-
temperature=0, model='gpt-
|
938 |
|
939 |
conversational_memory = ConversationBufferWindowMemory(
|
940 |
memory_key='chat_history',
|
@@ -946,7 +943,6 @@ def get_current_time_and_date():
|
|
946 |
now = datetime.now()
|
947 |
return now.strftime("%Y-%m-%d %H:%M:%S")
|
948 |
|
949 |
-
# Example usage
|
950 |
current_time_and_date = get_current_time_and_date()
|
951 |
|
952 |
def fetch_local_events():
|
@@ -1203,7 +1199,7 @@ def show_map_if_details(history,choice):
|
|
1203 |
if choice in ["Details", "Conversational"]:
|
1204 |
return gr.update(visible=True), update_map_with_response(history)
|
1205 |
else:
|
1206 |
-
return gr.update(visible
|
1207 |
|
1208 |
def generate_audio_elevenlabs(text):
|
1209 |
XI_API_KEY = os.environ['ELEVENLABS_API']
|
@@ -1236,7 +1232,7 @@ def generate_audio_elevenlabs(text):
|
|
1236 |
return None
|
1237 |
|
1238 |
# Stable Diffusion setup
|
1239 |
-
pipe =
|
1240 |
pipe = pipe.to("cuda")
|
1241 |
|
1242 |
def generate_image(prompt):
|
@@ -1466,7 +1462,6 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
1466 |
clear_button = gr.Button("Clear")
|
1467 |
clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
|
1468 |
|
1469 |
-
|
1470 |
audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
|
1471 |
audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="SAMLOne_real_time")
|
1472 |
|
@@ -1477,8 +1472,8 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
1477 |
with gr.Column():
|
1478 |
weather_output = gr.HTML(value=fetch_local_weather())
|
1479 |
news_output = gr.HTML(value=fetch_local_news())
|
1480 |
-
|
1481 |
-
|
1482 |
with gr.Column():
|
1483 |
image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
|
1484 |
image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
|
|
|
890 |
import numpy as np
|
891 |
from gtts import gTTS
|
892 |
from googlemaps import Client as GoogleMapsClient
|
893 |
+
from diffusers import StableDiffusionPipeline
|
894 |
import concurrent.futures
|
895 |
from PIL import Image
|
896 |
|
|
|
909 |
hf_token = os.getenv("HF_TOKEN")
|
910 |
|
911 |
if hf_token is None:
|
|
|
912 |
print("Please set your Hugging Face token in the environment variables.")
|
913 |
else:
|
|
|
914 |
login(token=hf_token)
|
915 |
|
|
|
916 |
print("Logged in successfully to Hugging Face Hub!")
|
917 |
|
918 |
# Set up logging
|
|
|
931 |
|
932 |
# Initialize ChatOpenAI model
|
933 |
chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'],
|
934 |
+
temperature=0, model='gpt-4')
|
935 |
|
936 |
conversational_memory = ConversationBufferWindowMemory(
|
937 |
memory_key='chat_history',
|
|
|
943 |
now = datetime.now()
|
944 |
return now.strftime("%Y-%m-%d %H:%M:%S")
|
945 |
|
|
|
946 |
current_time_and_date = get_current_time_and_date()
|
947 |
|
948 |
def fetch_local_events():
|
|
|
1199 |
if choice in ["Details", "Conversational"]:
|
1200 |
return gr.update(visible=True), update_map_with_response(history)
|
1201 |
else:
|
1202 |
+
return gr.update(visible=False), ""
|
1203 |
|
1204 |
def generate_audio_elevenlabs(text):
|
1205 |
XI_API_KEY = os.environ['ELEVENLABS_API']
|
|
|
1232 |
return None
|
1233 |
|
1234 |
# Stable Diffusion setup
|
1235 |
+
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-3-base", torch_dtype=torch.float16)
|
1236 |
pipe = pipe.to("cuda")
|
1237 |
|
1238 |
def generate_image(prompt):
|
|
|
1462 |
clear_button = gr.Button("Clear")
|
1463 |
clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
|
1464 |
|
|
|
1465 |
audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
|
1466 |
audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="SAMLOne_real_time")
|
1467 |
|
|
|
1472 |
with gr.Column():
|
1473 |
weather_output = gr.HTML(value=fetch_local_weather())
|
1474 |
news_output = gr.HTML(value=fetch_local_news())
|
1475 |
+
events_output = gr.HTML(value=fetch_local_events())
|
1476 |
+
|
1477 |
with gr.Column():
|
1478 |
image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
|
1479 |
image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
|