Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -26,7 +26,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
| 26 |
base_model = "John6666/real-flux-10b-schnell-fp8-flux"
|
| 27 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
| 28 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
| 29 |
-
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=
|
| 30 |
|
| 31 |
MAX_SEED = 2**32-1
|
| 32 |
|
|
@@ -167,7 +167,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
|
| 167 |
with gr.Accordion("Advanced Settings", open=True):
|
| 168 |
with gr.Column():
|
| 169 |
with gr.Row():
|
| 170 |
-
cfg_scale = gr.Slider(label="CFG Scale", minimum=
|
| 171 |
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=5)
|
| 172 |
|
| 173 |
with gr.Row():
|
|
|
|
| 26 |
base_model = "John6666/real-flux-10b-schnell-fp8-flux"
|
| 27 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
| 28 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
| 29 |
+
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=good_vae).to(device)
|
| 30 |
|
| 31 |
MAX_SEED = 2**32-1
|
| 32 |
|
|
|
|
| 167 |
with gr.Accordion("Advanced Settings", open=True):
|
| 168 |
with gr.Column():
|
| 169 |
with gr.Row():
|
| 170 |
+
cfg_scale = gr.Slider(label="CFG Scale", minimum=0, maximum=20, step=0.5, value=0.0)
|
| 171 |
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=5)
|
| 172 |
|
| 173 |
with gr.Row():
|