Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -31,6 +31,7 @@ from src.utils.camera_util import (
|
|
31 |
)
|
32 |
from src.utils.mesh_util import save_obj, save_glb
|
33 |
from src.utils.infer_util import remove_background, resize_foreground, images_to_video
|
|
|
34 |
|
35 |
# Set up cache path
|
36 |
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
|
@@ -75,7 +76,8 @@ else:
|
|
75 |
device = torch.device('cuda')
|
76 |
|
77 |
base_model = "black-forest-labs/FLUX.1-dev"
|
78 |
-
|
|
|
79 |
|
80 |
# Load and fuse LoRA BEFORE quantizing
|
81 |
print('Loading and fusing lora, please wait...')
|
|
|
31 |
)
|
32 |
from src.utils.mesh_util import save_obj, save_glb
|
33 |
from src.utils.infer_util import remove_background, resize_foreground, images_to_video
|
34 |
+
from huggingface_hub import hb
|
35 |
|
36 |
# Set up cache path
|
37 |
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
|
|
|
76 |
device = torch.device('cuda')
|
77 |
|
78 |
base_model = "black-forest-labs/FLUX.1-dev"
|
79 |
+
file_flux = hf_hub_download("marduk191/Flux.1_collection", "flux.1_dev_fp8_fp16t5-marduk191.safetensors")
|
80 |
+
pipe = FluxPipeline.from_single_file(file_flux, torch_dtype=torch.bfloat16, token=huggingface_token).to(device)
|
81 |
|
82 |
# Load and fuse LoRA BEFORE quantizing
|
83 |
print('Loading and fusing lora, please wait...')
|