gokaygokay commited on
Commit
53babaf
·
verified ·
1 Parent(s): 5eb4ef7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -31,6 +31,7 @@ from src.utils.camera_util import (
31
  )
32
  from src.utils.mesh_util import save_obj, save_glb
33
  from src.utils.infer_util import remove_background, resize_foreground, images_to_video
 
34
 
35
  # Set up cache path
36
  cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
@@ -75,7 +76,8 @@ else:
75
  device = torch.device('cuda')
76
 
77
  base_model = "black-forest-labs/FLUX.1-dev"
78
- pipe = FluxPipeline.from_single_file("https://huggingface.co/marduk191/Flux.1_collection/resolve/main/flux.1_dev_fp8_fp16t5-marduk191.safetensors", torch_dtype=torch.bfloat16, token=huggingface_token).to(device)
 
79
 
80
  # Load and fuse LoRA BEFORE quantizing
81
  print('Loading and fusing lora, please wait...')
 
31
  )
32
  from src.utils.mesh_util import save_obj, save_glb
33
  from src.utils.infer_util import remove_background, resize_foreground, images_to_video
34
+ from huggingface_hub import hb
35
 
36
  # Set up cache path
37
  cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
 
76
  device = torch.device('cuda')
77
 
78
  base_model = "black-forest-labs/FLUX.1-dev"
79
+ file_flux = hf_hub_download("marduk191/Flux.1_collection", "flux.1_dev_fp8_fp16t5-marduk191.safetensors")
80
+ pipe = FluxPipeline.from_single_file(file_flux, torch_dtype=torch.bfloat16, token=huggingface_token).to(device)
81
 
82
  # Load and fuse LoRA BEFORE quantizing
83
  print('Loading and fusing lora, please wait...')