File size: 3,987 Bytes
2af7c18 eaa76b7 2af7c18 6f6d790 2af7c18 b766640 2af7c18 ce63ff5 2af7c18 0e05b81 e97569c 2af7c18 eaa76b7 2af7c18 999f6b7 ce63ff5 2af7c18 c7e9ad1 2af7c18 999f6b7 2af7c18 103dcd4 2af7c18 616306a 4218146 2af7c18 103dcd4 2af7c18 09862e9 0e05b81 2af7c18 103dcd4 2af7c18 0e05b81 2af7c18 0e05b81 2af7c18 4218146 2af7c18 8097377 2af7c18 882b96e 09862e9 2af7c18 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
import gradio as gr
import numpy as np
import torch
import spaces
from diffusers import FluxPipeline, FluxTransformer2DModel
from huggingface_hub import hf_hub_download
from PIL import Image
from diffusers.utils import export_to_gif
import uuid
import random
device = "cuda" if torch.cuda.is_available() else "cpu"
if torch.cuda.is_available():
torch_dtype = torch.bfloat16
else:
torch_dtype = torch.float32
def split_image(input_image, num_splits=4):
# Create a list to store the output images
output_images = []
# Split the image into four 256x256 sections
for i in range(num_splits):
left = i * 288
right = (i + 1) * 288
box = (left, 0, right, 288)
output_images.append(input_image.crop(box))
return output_images
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch_dtype)
pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-16steps-lora.safetensors"))
pipe.fuse_lora(lora_scale=0.125)
pipe.to(device=device, dtype=torch_dtype)
MAX_SEED = np.iinfo(np.int32).max
@spaces.GPU
def infer(prompt, seed=1, randomize_seed=False, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
prompt_template = f"A side by side 4 frame image showing high quality consecutive stills from a looped gif animation moving from left to right. The stills are of {prompt}"
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt=prompt_template,
num_inference_steps=num_inference_steps,
num_images_per_prompt=1,
generator=torch.Generator(device).manual_seed(seed),
height=384,
width=1152
).images[0]
gif_name = f"{uuid.uuid4().hex}-flux.gif"
export_to_gif(split_image(image, 4), gif_name, fps=4)
return gif_name, image, seed
examples = [
"a cute cat raising holding a sign that reads \"Flux does Video?\"",
"Will Smith eating pizza",
"A flying saucer over the white house",
]
css="""
#col-container {
margin: 0 auto;
max-width: 640px;
}
#strip{
max-height: 160px
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# FLUX Animations
Generate gifs with FLUX [dev] + HyperSD 16 steps. Concept idea by [fofr](https://x.com/fofrAI). Diffusers implementation by [Dhruv](_DhruvNair_)
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
result_full = gr.Image(label="Gif Strip", elem_id="strip")
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=32,
step=1,
value=16,
)
gr.Examples(
examples = examples,
inputs = [prompt],
outputs = [result, result_full, seed],
fn=infer,
cache_examples="lazy"
)
gr.on(
triggers=[run_button.click, prompt.submit],
fn = infer,
inputs = [prompt, seed, randomize_seed, num_inference_steps],
outputs = [result, result_full, seed]
)
demo.queue().launch() |