Update app.py
Browse files
app.py
CHANGED
@@ -25,7 +25,7 @@ pipe = pipe.to(device)
|
|
25 |
|
26 |
|
27 |
transformer2 = FluxTransformer2DModel.from_pretrained(
|
28 |
-
|
29 |
)
|
30 |
pipe2 = FluxPipeline.from_pretrained(bfl_repo, transformer=None, torch_dtype=dtype)
|
31 |
pipe2.transformer = transformer2
|
@@ -39,16 +39,16 @@ pipe.load_lora_weights(
|
|
39 |
weight_name="urae_2k_adapter.safetensors",
|
40 |
adapter_name="2k",
|
41 |
)
|
42 |
-
pipe.load_lora_weights(
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
)
|
47 |
-
pipe.load_lora_weights(
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
)
|
52 |
MAX_SEED = np.iinfo(np.int32).max
|
53 |
MAX_IMAGE_SIZE = 4096
|
54 |
USE_ZERO_GPU = True
|
@@ -66,12 +66,13 @@ def infer(
|
|
66 |
model='2k',
|
67 |
):
|
68 |
print("Using model:", model)
|
69 |
-
if model == "2k":
|
70 |
-
|
71 |
-
|
72 |
-
elif model == "4k":
|
73 |
-
|
74 |
-
|
|
|
75 |
|
76 |
if randomize_seed:
|
77 |
seed = random.randint(0, MAX_SEED)
|
@@ -135,14 +136,14 @@ with gr.Blocks(css=css) as demo:
|
|
135 |
|
136 |
gr.Markdown("### Setting:")
|
137 |
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
|
147 |
with gr.Row():
|
148 |
width = gr.Slider(
|
@@ -187,7 +188,7 @@ with gr.Blocks(css=css) as demo:
|
|
187 |
fn=infer,
|
188 |
inputs=[
|
189 |
prompt,
|
190 |
-
|
191 |
seed,
|
192 |
randomize_seed,
|
193 |
width,
|
|
|
25 |
|
26 |
|
27 |
transformer2 = FluxTransformer2DModel.from_pretrained(
|
28 |
+
"black-forest-labs/FLUX.1-dev", subfolder="transformer", torch_dtype=dtype
|
29 |
)
|
30 |
pipe2 = FluxPipeline.from_pretrained(bfl_repo, transformer=None, torch_dtype=dtype)
|
31 |
pipe2.transformer = transformer2
|
|
|
39 |
weight_name="urae_2k_adapter.safetensors",
|
40 |
adapter_name="2k",
|
41 |
)
|
42 |
+
# pipe.load_lora_weights(
|
43 |
+
# "Huage001/URAE",
|
44 |
+
# weight_name="urae_4k_adapter_lora_conversion_dev.safetensors",
|
45 |
+
# adapter_name="4k_dev",
|
46 |
+
# )
|
47 |
+
# pipe.load_lora_weights(
|
48 |
+
# "Huage001/URAE",
|
49 |
+
# weight_name="urae_4k_adapter_lora_conversion_schnell.safetensors",
|
50 |
+
# adapter_name="4k_schnell",
|
51 |
+
# )
|
52 |
MAX_SEED = np.iinfo(np.int32).max
|
53 |
MAX_IMAGE_SIZE = 4096
|
54 |
USE_ZERO_GPU = True
|
|
|
66 |
model='2k',
|
67 |
):
|
68 |
print("Using model:", model)
|
69 |
+
# if model == "2k":
|
70 |
+
# pipe.vae.enable_tiling(True)
|
71 |
+
# pipe.set_adapters("2k")
|
72 |
+
# # elif model == "4k":
|
73 |
+
# pipe.vae.enable_tiling(True)
|
74 |
+
# pipe.set_adapters(f"4k_{flux_model}")
|
75 |
+
pipe = pipe if model == "schnell" else pipe2
|
76 |
|
77 |
if randomize_seed:
|
78 |
seed = random.randint(0, MAX_SEED)
|
|
|
136 |
|
137 |
gr.Markdown("### Setting:")
|
138 |
|
139 |
+
model = gr.Radio(
|
140 |
+
label="Model",
|
141 |
+
choices=[
|
142 |
+
("FLUX.1 dev", "dev"),
|
143 |
+
("FLUX.1 schnell", "schnell"),
|
144 |
+
],
|
145 |
+
value="2k",
|
146 |
+
)
|
147 |
|
148 |
with gr.Row():
|
149 |
width = gr.Slider(
|
|
|
188 |
fn=infer,
|
189 |
inputs=[
|
190 |
prompt,
|
191 |
+
model,
|
192 |
seed,
|
193 |
randomize_seed,
|
194 |
width,
|