Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -689,62 +689,96 @@ def generate_map(location_names):
|
|
689 |
map_html = m._repr_html_()
|
690 |
return map_html
|
691 |
|
692 |
-
#Flux Code
|
693 |
-
|
694 |
-
import spaces
|
695 |
-
import gradio as gr
|
696 |
-
import torch
|
697 |
-
from PIL import Image
|
698 |
from diffusers import DiffusionPipeline
|
699 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
700 |
|
701 |
-
#
|
702 |
-
|
703 |
-
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
|
704 |
|
705 |
-
|
706 |
-
trigger_word = "" # Leave trigger_word blank if not used.
|
707 |
-
pipe.load_lora_weights(lora_repo)
|
708 |
|
709 |
-
|
|
|
|
|
|
|
710 |
|
711 |
-
|
|
|
712 |
|
713 |
-
|
714 |
-
|
715 |
-
|
716 |
-
hardcoded_prompt_3 = "A high quality cinematic image for Taylor Swift concert in Birmingham skyline style of Michael Mann"
|
717 |
|
718 |
-
|
719 |
-
|
720 |
-
|
721 |
-
|
722 |
-
|
723 |
|
724 |
-
|
725 |
-
|
|
|
|
|
726 |
|
727 |
-
|
728 |
-
|
729 |
-
|
730 |
-
if i % (steps // 10) == 0: # Update every 10% of the steps
|
731 |
-
progress(i / steps * 100, f"Processing step {i} of {steps}...")
|
732 |
|
733 |
-
|
734 |
-
|
735 |
-
|
736 |
-
|
737 |
-
guidance_scale=cfg_scale,
|
738 |
-
width=width,
|
739 |
-
height=height,
|
740 |
-
generator=generator,
|
741 |
-
joint_attention_kwargs={"scale": lora_scale},
|
742 |
-
).images[0]
|
743 |
|
744 |
-
#
|
745 |
-
progress(100, "Completed!")
|
746 |
|
747 |
-
|
748 |
|
749 |
|
750 |
|
@@ -1495,13 +1529,20 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
1495 |
|
1496 |
with gr.Column():
|
1497 |
|
1498 |
-
|
1499 |
-
|
1500 |
-
|
1501 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
1502 |
|
1503 |
-
|
1504 |
-
|
|
|
1505 |
|
1506 |
|
1507 |
|
|
|
689 |
map_html = m._repr_html_()
|
690 |
return map_html
|
691 |
|
|
|
|
|
|
|
|
|
|
|
|
|
692 |
from diffusers import DiffusionPipeline
|
693 |
+
import torch
|
694 |
+
|
695 |
+
# Load the FLUX model
|
696 |
+
def load_flux_model():
|
697 |
+
pipeline = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell")
|
698 |
+
if torch.cuda.is_available():
|
699 |
+
pipeline = pipeline.to("cuda")
|
700 |
+
return pipeline
|
701 |
+
|
702 |
+
flux_pipeline = load_flux_model()
|
703 |
+
|
704 |
+
# Function to generate an image based on a selected prompt
|
705 |
+
def generate_flux_image(selected_prompt):
|
706 |
+
prompt_map = {
|
707 |
+
"Toyota Truck in Birmingham": "A high quality cinematic image for Toyota Truck in Birmingham skyline shot in the style of Michael Mann",
|
708 |
+
"Alabama Quarterback": "A high quality cinematic image for Alabama Quarterback close up emotional shot in the style of Michael Mann",
|
709 |
+
"Taylor Swift Concert": "A high quality cinematic image for Taylor Swift concert in Birmingham skyline style of Michael Mann"
|
710 |
+
}
|
711 |
+
prompt = prompt_map.get(selected_prompt, "")
|
712 |
+
if not prompt:
|
713 |
+
return None # In case no prompt is matched
|
714 |
+
|
715 |
+
# Generate the image using the FLUX model
|
716 |
+
image = flux_pipeline(prompt).images[0]
|
717 |
+
|
718 |
+
# Save the image to a temporary file
|
719 |
+
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as f:
|
720 |
+
image.save(f.name)
|
721 |
+
return f.name
|
722 |
+
|
723 |
+
# Predefine the three prompts for automatic image generation
|
724 |
+
default_prompts = [
|
725 |
+
"Toyota Truck in Birmingham",
|
726 |
+
"Alabama Quarterback",
|
727 |
+
"Taylor Swift Concert"
|
728 |
+
]
|
729 |
+
|
730 |
+
# Add image-related components and integrate it with Gradio
|
731 |
+
with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
732 |
+
|
733 |
+
with gr.Row():
|
734 |
+
with gr.Column():
|
735 |
+
state = gr.State()
|
736 |
+
|
737 |
+
chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
|
738 |
+
choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
|
739 |
+
retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB")
|
740 |
+
model_choice = gr.Dropdown(label="Choose Model", choices=["LM-1", "LM-2", "LM-3"], value="LM-1")
|
741 |
|
742 |
+
# Link the dropdown change to handle_model_choice_change
|
743 |
+
model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
|
|
|
744 |
|
745 |
+
gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
|
|
|
|
746 |
|
747 |
+
chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!", placeholder="Hey Radar...!!")
|
748 |
+
tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta"], value="Alpha")
|
749 |
+
|
750 |
+
retriever_button = gr.Button("Retriever")
|
751 |
|
752 |
+
clear_button = gr.Button("Clear")
|
753 |
+
clear_button.click(lambda: [None, None], outputs=[chat_input, state])
|
754 |
|
755 |
+
gr.Markdown("<h1 style='color: red;'>Radar Map</h1>", elem_id="Map-Radar")
|
756 |
+
location_output = gr.HTML()
|
757 |
+
audio_output = gr.Audio(interactive=False, autoplay=True)
|
|
|
758 |
|
759 |
+
# Image output section
|
760 |
+
gr.Markdown("<h2>Generated Images on Load</h2>")
|
761 |
+
image_output_1 = gr.Image(type="filepath", label="Generated Image 1", width=400, height=400)
|
762 |
+
image_output_2 = gr.Image(type="filepath", label="Generated Image 2", width=400, height=400)
|
763 |
+
image_output_3 = gr.Image(type="filepath", label="Generated Image 3", width=400, height=400)
|
764 |
|
765 |
+
# Automatically generate and display the three images on startup using the predefined prompts
|
766 |
+
startup_image_1 = generate_flux_image(default_prompts[0])
|
767 |
+
startup_image_2 = generate_flux_image(default_prompts[1])
|
768 |
+
startup_image_3 = generate_flux_image(default_prompts[2])
|
769 |
|
770 |
+
image_output_1.update(value=startup_image_1)
|
771 |
+
image_output_2.update(value=startup_image_2)
|
772 |
+
image_output_3.update(value=startup_image_3)
|
|
|
|
|
773 |
|
774 |
+
with gr.Column():
|
775 |
+
weather_output = gr.HTML(value=fetch_local_weather())
|
776 |
+
news_output = gr.HTML(value=fetch_local_news())
|
777 |
+
events_output = gr.HTML(value=fetch_local_events())
|
|
|
|
|
|
|
|
|
|
|
|
|
778 |
|
779 |
+
# Rest of the Gradio interface setup
|
|
|
780 |
|
781 |
+
demo.launch(show_error=True)
|
782 |
|
783 |
|
784 |
|
|
|
1529 |
|
1530 |
with gr.Column():
|
1531 |
|
1532 |
+
# Image output section
|
1533 |
+
gr.Markdown("<h2>Generated Images on Load</h2>")
|
1534 |
+
image_output_1 = gr.Image(type="filepath", label="Generated Image 1", width=400, height=400)
|
1535 |
+
image_output_2 = gr.Image(type="filepath", label="Generated Image 2", width=400, height=400)
|
1536 |
+
image_output_3 = gr.Image(type="filepath", label="Generated Image 3", width=400, height=400)
|
1537 |
+
|
1538 |
+
# Automatically generate and display the three images on startup using the predefined prompts
|
1539 |
+
startup_image_1 = generate_flux_image(default_prompts[0])
|
1540 |
+
startup_image_2 = generate_flux_image(default_prompts[1])
|
1541 |
+
startup_image_3 = generate_flux_image(default_prompts[2])
|
1542 |
|
1543 |
+
image_output_1.update(value=startup_image_1)
|
1544 |
+
image_output_2.update(value=startup_image_2)
|
1545 |
+
image_output_3.update(value=startup_image_3)
|
1546 |
|
1547 |
|
1548 |
|