Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import torch | |
from diffusers import ( | |
FluxPipeline, | |
StableDiffusion3Pipeline, | |
PixArtSigmaPipeline, | |
SanaPipeline, | |
AuraFlowPipeline, | |
Kandinsky3Pipeline, | |
HunyuanDiTPipeline, | |
LuminaText2ImgPipeline | |
) | |
import gradio as gr | |
cache_dir = '/workspace/hf_cache' | |
MODEL_CONFIGS = { | |
"FLUX": { | |
"repo_id": "black-forest-labs/FLUX.1-dev", | |
"pipeline_class": FluxPipeline, | |
"cache_dir": cache_dir, | |
}, | |
"Stable Diffusion 3.5": { | |
"repo_id": "stabilityai/stable-diffusion-3.5-large", | |
"pipeline_class": StableDiffusion3Pipeline, | |
"cache_dir": cache_dir, | |
}, | |
"PixArt": { | |
"repo_id": "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", | |
"pipeline_class": PixArtSigmaPipeline, | |
"cache_dir": cache_dir, | |
}, | |
"SANA": { | |
"repo_id": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers", | |
"pipeline_class": SanaPipeline, | |
"cache_dir": cache_dir, | |
}, | |
"AuraFlow": { | |
"repo_id": "fal/AuraFlow", | |
"pipeline_class": AuraFlowPipeline, | |
"cache_dir": cache_dir, | |
}, | |
"Kandinsky": { | |
"repo_id": "kandinsky-community/kandinsky-3", | |
"pipeline_class": Kandinsky3Pipeline, | |
"cache_dir": cache_dir, | |
}, | |
"Hunyuan": { | |
"repo_id": "Tencent-Hunyuan/HunyuanDiT-Diffusers", | |
"pipeline_class": HunyuanDiTPipeline, | |
"cache_dir": cache_dir, | |
}, | |
"Lumina": { | |
"repo_id": "Alpha-VLLM/Lumina-Next-SFT-diffusers", | |
"pipeline_class": LuminaText2ImgPipeline, | |
"cache_dir": cache_dir, | |
} | |
} | |
def generate_image_with_progress(pipe, prompt, num_steps, guidance_scale=None, seed=None, progress=gr.Progress()): | |
generator = None | |
if seed is not None: | |
generator = torch.Generator("cuda").manual_seed(seed) | |
def callback(pipe, step_index, timestep, callback_kwargs): | |
print(f" callback => {pipe}, {step_index}, {timestep}") | |
if step_index is None: | |
step_index = 0 | |
cur_prg = step_index / num_steps | |
progress(cur_prg, desc=f"Step {step_index}/{num_steps}") | |
return callback_kwargs | |
if hasattr(pipe, "guidance_scale"): | |
image = pipe( | |
prompt, | |
num_inference_steps=num_steps, | |
guidance_scale=guidance_scale, | |
callback_on_step_end=callback, | |
).images[0] | |
else: | |
image = pipe( | |
prompt, | |
num_inference_steps=num_steps, | |
generator=generator, | |
output_type="pil", | |
callback_on_step_end=callback, | |
).images[0] | |
return image | |
def create_pipeline_logic(model_name, config): | |
def start_process(prompt_text): | |
print(f"starting {model_name}") | |
progress = gr.Progress() | |
num_steps = 30 | |
guidance_scale = 7.5 # Example guidance scale, can be adjusted per model | |
seed = 42 | |
pipe_class = config["pipeline_class"] | |
pipe = pipe_class.from_pretrained( | |
config["repo_id"], | |
#cache_dir=config["cache_dir"], | |
torch_dtype=torch.bfloat16 | |
).to("cuda") | |
image = generate_image_with_progress( | |
pipe, prompt_text, num_steps=num_steps, guidance_scale=guidance_scale, seed=seed, progress=progress | |
) | |
return f"Seed: {seed}", image | |
return start_process | |
def main(): | |
with gr.Blocks() as app: | |
gr.Markdown("# Dynamic Multiple Model Image Generation") | |
prompt_text = gr.Textbox(label="Enter prompt") | |
for model_name, config in MODEL_CONFIGS.items(): | |
with gr.Tab(model_name): | |
button = gr.Button(f"Run {model_name}") | |
output = gr.Textbox(label="Status") | |
img = gr.Image(label=model_name, height=300) | |
start_process = create_pipeline_logic(model_name, config) | |
button.click(fn=start_process, inputs=[prompt_text], outputs=[output, img]) | |
app.launch() | |
if __name__ == "__main__": | |
main() | |