Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,344 Bytes
176edce 343fdaf 176edce 343fdaf 176edce 343fdaf 176edce 343fdaf 176edce 343fdaf 176edce 343fdaf de7fb8a 7b9b23e 3ec2621 7b9b23e 3ec2621 7b9b23e 3ec2621 7b9b23e 3ec2621 3c467bb 343fdaf 3ec2621 016778b 1b0733f 343fdaf 3ec2621 aba7c6b 3ec2621 343fdaf 176edce 3ec2621 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import spaces
import argparse
import os
import time
from os import path
from safetensors.torch import load_file
from huggingface_hub import hf_hub_download
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
os.environ["TRANSFORMERS_CACHE"] = cache_path
os.environ["HF_HUB_CACHE"] = cache_path
os.environ["HF_HOME"] = cache_path
import gradio as gr
import torch
from diffusers import FluxPipeline
torch.backends.cuda.matmul.allow_tf32 = True
class timer:
def __init__(self, method_name="timed process"):
self.method = method_name
def __enter__(self):
self.start = time.time()
print(f"{self.method} starts")
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
print(f"{self.method} took {str(round(end - self.start, 2))}s")
if not path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"))
pipe.fuse_lora(lora_scale=0.125)
pipe.to(device="cuda", dtype=torch.bfloat16)
css = """
footer {
visibility: hidden;
}
"""
with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
with gr.Row():
with gr.Column(scale=3):
with gr.Group():
prompt = gr.Textbox(
label="Your Image Description",
placeholder="E.g., A serene landscape with mountains and a lake at sunset",
lines=3
)
with gr.Accordion("Advanced Settings", open=False):
with gr.Group():
with gr.Row():
height = gr.Slider(label="Height", minimum=256, maximum=1152, step=64, value=1024)
width = gr.Slider(label="Width", minimum=256, maximum=1152, step=64, value=1024)
with gr.Row():
steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8)
scales = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=5.0, step=0.1, value=3.5)
seed = gr.Number(label="Seed (for reproducibility)", value=3413, precision=0)
generate_btn = gr.Button("Generate Image", variant="primary", scale=1)
with gr.Column(scale=4):
output = gr.Image(label="Your Generated Image")
@spaces.GPU
def process_image(height, width, steps, scales, prompt, seed):
global pipe
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
return pipe(
prompt=[prompt],
generator=torch.Generator().manual_seed(int(seed)),
num_inference_steps=int(steps),
guidance_scale=float(scales),
height=int(height),
width=int(width),
max_sequence_length=256
).images[0]
generate_btn.click(
process_image,
inputs=[height, width, steps, scales, prompt, seed],
outputs=output
)
if __name__ == "__main__":
demo.launch()
|