Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -689,66 +689,66 @@ def generate_map(location_names):
|
|
| 689 |
map_html = m._repr_html_()
|
| 690 |
return map_html
|
| 691 |
|
| 692 |
-
#
|
| 693 |
-
# pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16)
|
| 694 |
-
# pipe.to(device)
|
| 695 |
-
|
| 696 |
-
# def generate_image(prompt):
|
| 697 |
-
# with torch.cuda.amp.autocast():
|
| 698 |
-
# image = pipe(
|
| 699 |
-
# prompt,
|
| 700 |
-
# num_inference_steps=28,
|
| 701 |
-
# guidance_scale=3.0,
|
| 702 |
-
# ).images[0]
|
| 703 |
-
# return image
|
| 704 |
-
|
| 705 |
-
# hardcoded_prompt_1 = "A high quality cinematic image for Toyota Truck in Birmingham skyline shot in th style of Michael Mann"
|
| 706 |
-
# hardcoded_prompt_2 = "A high quality cinematic image for Alabama Quarterback close up emotional shot in th style of Michael Mann"
|
| 707 |
-
# hardcoded_prompt_3 = "A high quality cinematic image for Taylor Swift concert in Birmingham skyline style of Michael Mann"
|
| 708 |
-
|
| 709 |
-
# def update_images():
|
| 710 |
-
# image_1 = generate_image(hardcoded_prompt_1)
|
| 711 |
-
# image_2 = generate_image(hardcoded_prompt_2)
|
| 712 |
-
# image_3 = generate_image(hardcoded_prompt_3)
|
| 713 |
-
# return image_1, image_2, image_3
|
| 714 |
-
|
| 715 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 716 |
|
|
|
|
|
|
|
|
|
|
| 717 |
|
|
|
|
|
|
|
|
|
|
| 718 |
|
|
|
|
| 719 |
|
| 720 |
-
|
| 721 |
-
from diffusers import FluxPipeline
|
| 722 |
-
import os
|
| 723 |
|
| 724 |
-
#
|
| 725 |
-
|
|
|
|
|
|
|
| 726 |
|
| 727 |
-
|
| 728 |
-
|
| 729 |
-
|
|
|
|
|
|
|
|
|
|
| 730 |
|
|
|
|
|
|
|
| 731 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 732 |
|
| 733 |
-
#
|
| 734 |
-
def generate_image_flux(prompt, width=400, height=400, num_inference_steps=4):
|
| 735 |
image = pipe(
|
| 736 |
-
prompt=prompt,
|
|
|
|
|
|
|
| 737 |
width=width,
|
| 738 |
height=height,
|
| 739 |
-
|
| 740 |
-
|
| 741 |
-
guidance_scale=0.0
|
| 742 |
).images[0]
|
| 743 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 744 |
|
| 745 |
|
| 746 |
-
# Existing image generation function, updated to use the new Flux pipeline
|
| 747 |
|
| 748 |
-
# Hardcoded prompts for generating images
|
| 749 |
-
hardcoded_prompt_1 = "A high quality cinematic image for Toyota Truck in Birmingham skyline shot in the style of Michael Mann"
|
| 750 |
-
hardcoded_prompt_2 = "A high quality cinematic image for Alabama Quarterback close up emotional shot in the style of Michael Mann"
|
| 751 |
-
hardcoded_prompt_3 = "A high quality cinematic image for Taylor Swift concert in Birmingham skyline style of Michael Mann"
|
| 752 |
|
| 753 |
|
| 754 |
|
|
@@ -1496,12 +1496,11 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
| 1496 |
|
| 1497 |
with gr.Column():
|
| 1498 |
|
| 1499 |
-
|
| 1500 |
-
image_output_1 = gr.Image(value=
|
| 1501 |
-
image_output_2 = gr.Image(value=
|
| 1502 |
-
image_output_3 = gr.Image(value=
|
| 1503 |
|
| 1504 |
-
# Refresh button to update images
|
| 1505 |
refresh_button = gr.Button("Refresh Images")
|
| 1506 |
refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
|
| 1507 |
|
|
|
|
| 689 |
map_html = m._repr_html_()
|
| 690 |
return map_html
|
| 691 |
|
| 692 |
+
#Flux Code
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 693 |
|
| 694 |
+
import spaces
|
| 695 |
+
import gradio as gr
|
| 696 |
+
import torch
|
| 697 |
+
from PIL import Image
|
| 698 |
+
from diffusers import DiffusionPipeline
|
| 699 |
+
import random
|
| 700 |
|
| 701 |
+
# Initialize the base model and specific LoRA
|
| 702 |
+
base_model = "black-forest-labs/FLUX.1-dev"
|
| 703 |
+
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
|
| 704 |
|
| 705 |
+
lora_repo = "XLabs-AI/flux-RealismLora"
|
| 706 |
+
trigger_word = "" # Leave trigger_word blank if not used.
|
| 707 |
+
pipe.load_lora_weights(lora_repo)
|
| 708 |
|
| 709 |
+
pipe.to("cuda")
|
| 710 |
|
| 711 |
+
MAX_SEED = 2**32-1
|
|
|
|
|
|
|
| 712 |
|
| 713 |
+
# Hardcoded prompts for generating images
|
| 714 |
+
hardcoded_prompt_1 = "A high quality cinematic image for Toyota Truck in Birmingham skyline shot in the style of Michael Mann"
|
| 715 |
+
hardcoded_prompt_2 = "A high quality cinematic image for Alabama Quarterback close up emotional shot in the style of Michael Mann"
|
| 716 |
+
hardcoded_prompt_3 = "A high quality cinematic image for Taylor Swift concert in Birmingham skyline style of Michael Mann"
|
| 717 |
|
| 718 |
+
@spaces.GPU(duration=80)
|
| 719 |
+
def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
|
| 720 |
+
# Set random seed for reproducibility
|
| 721 |
+
if randomize_seed:
|
| 722 |
+
seed = random.randint(0, MAX_SEED)
|
| 723 |
+
generator = torch.Generator(device="cuda").manual_seed(seed)
|
| 724 |
|
| 725 |
+
# Update progress bar (0% saat mulai)
|
| 726 |
+
progress(0, "Starting image generation...")
|
| 727 |
|
| 728 |
+
# Generate image with progress updates
|
| 729 |
+
for i in range(1, steps + 1):
|
| 730 |
+
# Simulate the processing step (in a real scenario, you would integrate this with your image generation process)
|
| 731 |
+
if i % (steps // 10) == 0: # Update every 10% of the steps
|
| 732 |
+
progress(i / steps * 100, f"Processing step {i} of {steps}...")
|
| 733 |
|
| 734 |
+
# Generate image using the pipeline
|
|
|
|
| 735 |
image = pipe(
|
| 736 |
+
prompt=f"{prompt} {trigger_word}",
|
| 737 |
+
num_inference_steps=steps,
|
| 738 |
+
guidance_scale=cfg_scale,
|
| 739 |
width=width,
|
| 740 |
height=height,
|
| 741 |
+
generator=generator,
|
| 742 |
+
joint_attention_kwargs={"scale": lora_scale},
|
|
|
|
| 743 |
).images[0]
|
| 744 |
+
|
| 745 |
+
# Final update (100%)
|
| 746 |
+
progress(100, "Completed!")
|
| 747 |
+
|
| 748 |
+
yield image, seed
|
| 749 |
|
| 750 |
|
|
|
|
| 751 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 752 |
|
| 753 |
|
| 754 |
|
|
|
|
| 1496 |
|
| 1497 |
with gr.Column():
|
| 1498 |
|
| 1499 |
+
# Display images generated using Flux with LoRA integration
|
| 1500 |
+
image_output_1 = gr.Image(value=next(run_lora(hardcoded_prompt_1, 7.5, 50, True, 42, 512, 512, 0.5)), width=400, height=400)
|
| 1501 |
+
image_output_2 = gr.Image(value=next(run_lora(hardcoded_prompt_2, 7.5, 50, True, 42, 512, 512, 0.5)), width=400, height=400)
|
| 1502 |
+
image_output_3 = gr.Image(value=next(run_lora(hardcoded_prompt_3, 7.5, 50, True, 42, 512, 512, 0.5)), width=400, height=400)
|
| 1503 |
|
|
|
|
| 1504 |
refresh_button = gr.Button("Refresh Images")
|
| 1505 |
refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
|
| 1506 |
|