Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -21,7 +21,7 @@ import shutil
|
|
21 |
import tempfile
|
22 |
from functools import partial
|
23 |
from optimum.quanto import quantize, qfloat8, freeze
|
24 |
-
from diffusers import
|
25 |
|
26 |
from src.utils.train_util import instantiate_from_config
|
27 |
from src.utils.camera_util import (
|
@@ -72,10 +72,12 @@ else:
|
|
72 |
print("CUDA installation not found")
|
73 |
|
74 |
|
75 |
-
|
|
|
76 |
|
77 |
-
|
78 |
-
pipe =
|
|
|
79 |
|
80 |
# Load and fuse LoRA BEFORE quantizing
|
81 |
print('Loading and fusing lora, please wait...')
|
@@ -83,7 +85,6 @@ lora_path = hf_hub_download("gokaygokay/Flux-Game-Assets-LoRA-v2", "game_asst.sa
|
|
83 |
pipe.load_lora_weights(lora_path)
|
84 |
pipe.fuse_lora(lora_scale=1.0)
|
85 |
pipe.unload_lora_weights()
|
86 |
-
pipe.to(device)
|
87 |
pipe.enable_model_cpu_offload()
|
88 |
|
89 |
|
|
|
21 |
import tempfile
|
22 |
from functools import partial
|
23 |
from optimum.quanto import quantize, qfloat8, freeze
|
24 |
+
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
|
25 |
|
26 |
from src.utils.train_util import instantiate_from_config
|
27 |
from src.utils.camera_util import (
|
|
|
72 |
print("CUDA installation not found")
|
73 |
|
74 |
|
75 |
+
dtype = torch.bfloat16
|
76 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
77 |
|
78 |
+
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
79 |
+
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1, token=huggingface_token).to(device)
|
80 |
+
torch.cuda.empty_cache()
|
81 |
|
82 |
# Load and fuse LoRA BEFORE quantizing
|
83 |
print('Loading and fusing lora, please wait...')
|
|
|
85 |
pipe.load_lora_weights(lora_path)
|
86 |
pipe.fuse_lora(lora_scale=1.0)
|
87 |
pipe.unload_lora_weights()
|
|
|
88 |
pipe.enable_model_cpu_offload()
|
89 |
|
90 |
|