Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,761 Bytes
8fd0cd1 965fecf 51270f5 965fecf 8fd0cd1 51270f5 8fd0cd1 4928b51 8fd0cd1 790c473 9bf6989 790c473 8fd0cd1 790c473 8fd0cd1 9bf6989 8fd0cd1 0c6ce0e 8fd0cd1 0c6ce0e 790c473 0c6ce0e 8fd0cd1 790c473 8fd0cd1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import gradio as gr
import numpy as np
import random
import spaces
import torch
from diffusers import FluxPriorReduxPipeline, FluxPipeline
from diffusers.utils import load_image
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev" ,
torch_dtype=torch.bfloat16
).to("cuda")
pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Redux-dev",
text_encoder=pipe.text_encoder,
tokenizer=pipe.tokenizer,
text_encoder_2=pipe.text_encoder_2,
tokenizer_2=pipe.tokenizer_2,
torch_dtype=torch.bfloat16
).to("cuda")
@spaces.GPU
def infer(control_image, prompt, reference_scale= 0.03 , seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
pipe_prior_output = pipe_prior_redux(control_image, prompt=prompt)
cond_size = 729
hidden_size = 4096
max_sequence_length = 512
full_attention_size = max_sequence_length + hidden_size + cond_size
attention_mask = torch.zeros(
(full_attention_size, full_attention_size), device="cuda", dtype=torch.bfloat16
)
bias = torch.log(
torch.tensor(reference_scale, dtype=torch.bfloat16, device="cuda").clamp(min=1e-5, max=1)
)
attention_mask[:, max_sequence_length : max_sequence_length + cond_size] = bias
joint_attention_kwargs=dict(attention_mask=attention_mask)
images = pipe(
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=torch.Generator("cpu").manual_seed(seed),
joint_attention_kwargs=joint_attention_kwargs,
**pipe_prior_output,
).images[0]
return images, seed
css="""
#col-container {
margin: 0 auto;
max-width: 960px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""# FLUX.1 Redux [dev]
An adapter for FLUX [dev] to create image variations
[[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)]
""")
with gr.Row():
with gr.Column():
input_image = gr.Image(label="Image to create variations", type="pil")
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
reference_scale = gr.Slider(
label="Masking Scale",
minimum=0.01,
maximum=0.08,
step=0.001,
value=0.03,
)
run_button = gr.Button("Run")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=1,
maximum=15,
step=0.1,
value=3.5,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=28,
)
gr.on(
triggers=[run_button.click],
fn = infer,
inputs = [input_image, prompt, reference_scale, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
outputs = [result, seed]
)
demo.launch() |