Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	File size: 2,039 Bytes
			
			| 1887c49 a3e9651 6631477 a3e9651 6631477 a3e9651 6631477 a3e9651 6631477 a3e9651 6631477 ad96459 6631477 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | import gradio as gr
from diffusers import DiffusionPipeline
import spaces
dev_model = "black-forest-labs/FLUX.1-dev"
schnell_model = "black-forest-labs/FLUX.1-schnell"
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe_dev = DiffusionPipeline.from_pretrained(dev_model, torch_dtype=torch.bfloat16).to(device)
pipe_schnell = DiffusionPipeline.from_pretrained(schnell_model, torch_dtype=torch.bfloat16).to(device)
@spaces.GPU
def run_dev_hyper(prompt):
    repo_name = "ByteDance/Hyper-SD"
    ckpt_name = "Hyper-FLUX.1-dev-8steps-lora.safetensors"
    pipe_dev.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
    image = pipe_dev(prompt, num_inference_steps=8, joint_attention_kwargs={"scale": 0.125}).images[0]
    pipe_dev.unload_lora_weights()
    return image
@spaces.GPU
def run_dev_turbo(prompt):
    repo_name = "alimama-creative/FLUX.1-Turbo-Alpha"
    ckpt_name = "diffusion_pytorch_model.safetensors"
    pipe_dev.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
    image = pipe_dev(prompt, num_inference_steps=8).images[0]
    pipe_dev.unload_lora_weights()
    return image
@spaces.GPU
def run_schnell(prompt):
    image = pipe_schnell(prompt).images[0]
    return image
def run_parallel_models(prompt):
    
    with ProcessPoolExecutor(3) as e:
        image_dev_hyper = run_dev_hyper(prompt)
        image_dev_turbo = run_dev_turbo(prompt)
        image_schnell = run_schnell(prompt)
        
    return gr.update(), gr.update(), gr.update()    
run_parallel_models.zerogpu = True
with gr.Blocks() as demo:
    gr.Markdown("# Fast Flux Comparison")
    with gr.Row():
        prompt = gr.Textbox(label="Prompt")
        submit = gr.Button()
    with gr.Row():
        schnell = gr.Image(label="FLUX Schnell (4 steps)")
        hyper = gr.Image(label="FLUX.1[dev] HyperFLUX (8 steps)")
        turbo = gr.Image(label="FLUX.1[dev]-Turbo-Alpha (8 steps)")
    submit.click(
        fn=run_parallel_models,
        inputs=[prompt],
        outputs=[schnell, hyper, turbo]
    )
demo.launch() | 
