Pijush2023 commited on
Commit
02ceb47
·
verified ·
1 Parent(s): ad46481

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -51
app.py CHANGED
@@ -692,41 +692,6 @@ def generate_map(location_names):
692
  from diffusers import DiffusionPipeline
693
  import torch
694
 
695
- # Load the FLUX model
696
- def load_flux_model():
697
- pipeline = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell")
698
- if torch.cuda.is_available():
699
- pipeline = pipeline.to("cuda")
700
- return pipeline
701
-
702
- flux_pipeline = load_flux_model()
703
-
704
- # Function to generate an image based on a selected prompt
705
- def generate_flux_image(selected_prompt):
706
- prompt_map = {
707
- "Toyota Truck in Birmingham": "A high quality cinematic image for Toyota Truck in Birmingham skyline shot in the style of Michael Mann",
708
- "Alabama Quarterback": "A high quality cinematic image for Alabama Quarterback close up emotional shot in the style of Michael Mann",
709
- "Taylor Swift Concert": "A high quality cinematic image for Taylor Swift concert in Birmingham skyline style of Michael Mann"
710
- }
711
- prompt = prompt_map.get(selected_prompt, "")
712
- if not prompt:
713
- return None # In case no prompt is matched
714
-
715
- # Generate the image using the FLUX model
716
- image = flux_pipeline(prompt).images[0]
717
-
718
- # Save the image to a temporary file
719
- with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as f:
720
- image.save(f.name)
721
- return f.name
722
-
723
- # Predefine the three prompts for automatic image generation
724
- default_prompts = [
725
- "Toyota Truck in Birmingham",
726
- "Alabama Quarterback",
727
- "Taylor Swift Concert"
728
- ]
729
-
730
 
731
 
732
 
@@ -1160,7 +1125,44 @@ def handle_model_choice_change(selected_model):
1160
  # Default case: allow interaction
1161
  return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
1162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1163
 
 
 
 
 
 
 
1164
 
1165
 
1166
 
@@ -1461,23 +1463,11 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1461
  news_output = gr.HTML(value=fetch_local_news())
1462
  events_output = gr.HTML(value=fetch_local_events())
1463
 
1464
-
1465
  with gr.Column():
1466
-
1467
- # Image output section
1468
- gr.Markdown("<h2>Generated Images on Load</h2>")
1469
- image_output_1 = gr.Image(type="filepath", label="Generated Image 1", width=400, height=400)
1470
- image_output_2 = gr.Image(type="filepath", label="Generated Image 2", width=400, height=400)
1471
- image_output_3 = gr.Image(type="filepath", label="Generated Image 3", width=400, height=400)
1472
-
1473
- # Automatically generate and display the three images on startup using the predefined prompts
1474
- startup_image_1 = generate_flux_image(default_prompts[0])
1475
- startup_image_2 = generate_flux_image(default_prompts[1])
1476
- startup_image_3 = generate_flux_image(default_prompts[2])
1477
-
1478
- image_output_1.update(value=startup_image_1)
1479
- image_output_2.update(value=startup_image_2)
1480
- image_output_3.update(value=startup_image_3)
1481
 
1482
 
1483
 
 
692
  from diffusers import DiffusionPipeline
693
  import torch
694
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695
 
696
 
697
 
 
1125
  # Default case: allow interaction
1126
  return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
1127
 
1128
+ import gradio as gr
1129
+ import torch
1130
+ from diffusers import FluxPipeline
1131
+ from PIL import Image
1132
+
1133
+ # Flux Image Generation
1134
+ # Load the Flux pipeline
1135
+ flux_pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16)
1136
+ flux_pipe.enable_model_cpu_offload() # Save some VRAM by offloading to CPU if needed
1137
+
1138
+ # Function to generate image using Flux
1139
+ def generate_flux_image(prompt: str):
1140
+ generator = torch.Generator("cpu").manual_seed(0) # For reproducibility
1141
+ image = flux_pipe(
1142
+ prompt,
1143
+ guidance_scale=0.0,
1144
+ num_inference_steps=4,
1145
+ max_sequence_length=256,
1146
+ generator=generator
1147
+ ).images[0]
1148
+
1149
+ # Save image temporarily and return for display
1150
+ temp_image_path = f"temp_flux_image_{hash(prompt)}.png"
1151
+ image.save(temp_image_path)
1152
+
1153
+ return temp_image_path
1154
+
1155
+ # Hardcoded prompts for generating images
1156
+ hardcoded_prompt_1 = "A high quality cinematic image for Toyota Truck in Birmingham skyline shot in the style of Michael Mann"
1157
+ hardcoded_prompt_2 = "A high quality cinematic image for Alabama Quarterback close up emotional shot in the style of Michael Mann"
1158
+ hardcoded_prompt_3 = "A high quality cinematic image for Taylor Swift concert in Birmingham skyline style of Michael Mann"
1159
 
1160
+ # Generate the images automatically upon launching the interface
1161
+ def show_images_on_startup():
1162
+ img1 = generate_flux_image(hardcoded_prompt_1)
1163
+ img2 = generate_flux_image(hardcoded_prompt_2)
1164
+ img3 = generate_flux_image(hardcoded_prompt_3)
1165
+ return img1, img2, img3
1166
 
1167
 
1168
 
 
1463
  news_output = gr.HTML(value=fetch_local_news())
1464
  events_output = gr.HTML(value=fetch_local_events())
1465
 
 
1466
  with gr.Column():
1467
+ gr.Markdown("<h1 style='color: blue;'>Cinematic Images Generated with Flux</h1>")
1468
+ image_output_1 = gr.Image()
1469
+ image_output_2 = gr.Image()
1470
+ image_output_3 = gr.Image()
 
 
 
 
 
 
 
 
 
 
 
1471
 
1472
 
1473