Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,260 Bytes
8ccf632 1321eb2 06f0278 1321eb2 06f0278 8ccf632 1321eb2 76d8871 bc0adb1 8ccf632 06f0278 8ccf632 1321eb2 76d8871 1321eb2 54192f0 8ccf632 1321eb2 76d8871 1321eb2 8ccf632 06f0278 8ccf632 1321eb2 8ccf632 e2944a6 8ccf632 1321eb2 8ccf632 dc55004 26a7c6b 1321eb2 8ccf632 1321eb2 8ccf632 1321eb2 8ccf632 1321eb2 8ccf632 1321eb2 8ccf632 1321eb2 b213a9c 1321eb2 8ccf632 2b62414 1321eb2 8ccf632 1321eb2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
import gradio as gr
import numpy as np
import random
import torch
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images
# 檢查設備是否可用 GPU
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
# 載入模型,若需要 API Token,請加上 use_auth_token=True
try:
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae",
torch_dtype=dtype).to(device)
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
except Exception as e:
print(f"模型載入錯誤: {e}")
torch.cuda.empty_cache()
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
# 確保 flux_pipe_call_that_returns_an_iterable_of_images 綁定到模型
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
# 定義推論函數
@gr.Interface.function(duration=75)
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024,
guidance_scale=3.5, num_inference_steps=28,
progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
# 逐步生成圖像
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
prompt=prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator,
output_type="pil",
good_vae=good_vae,
):
yield img, seed
# 預設的範例
examples = [
"a tiny astronaut hatching from an egg on the moon",
"a cat holding a sign that says hello world",
"an anime illustration of a wiener schnitzel",
]
css = """
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
# 建立 Gradio 介面
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""# FLUX.1 [dev]
12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/)
[[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]
[[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)]
[[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)]
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
with gr.Row():
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=15, step=0.1, value=3.5)
num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=28)
gr.Examples(examples=examples, fn=infer, inputs=[prompt], outputs=[result, seed], cache_examples="lazy")
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
outputs=[result, seed]
)
# 啟動 Gradio App
demo.launch(share=True)
|