Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -12,16 +12,10 @@ token_hf = os.environ["HF_TOKEN"]
|
|
12 |
dtype = torch.bfloat16
|
13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
ckpt_name = "Hyper-FLUX.1-dev-8steps-lora.safetensors"
|
18 |
-
hyper_lora = hf_hub_download(repo_name, ckpt_name)
|
19 |
-
|
20 |
-
pipe = FluxPipeline.from_pretrained(base_model_id, token=token_hf)
|
21 |
-
pipe.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
|
22 |
pipe.fuse_lora(lora_scale=0.125)
|
23 |
-
pipe.to("cuda", dtype=dtype)
|
24 |
-
|
25 |
|
26 |
# pipe = FluxPipeline.from_pretrained("sayakpaul/FLUX.1-merged", torch_dtype=torch.bfloat16).to(device)
|
27 |
|
|
|
12 |
dtype = torch.bfloat16
|
13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
14 |
|
15 |
+
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype)
|
16 |
+
pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"))
|
|
|
|
|
|
|
|
|
|
|
17 |
pipe.fuse_lora(lora_scale=0.125)
|
18 |
+
pipe.to(device="cuda", dtype=dtype)
|
|
|
19 |
|
20 |
# pipe = FluxPipeline.from_pretrained("sayakpaul/FLUX.1-merged", torch_dtype=torch.bfloat16).to(device)
|
21 |
|