Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -75,7 +75,7 @@ device = 'cuda'
|
|
75 |
|
76 |
base_model = "black-forest-labs/FLUX.1-dev"
|
77 |
file_flux = hf_hub_download("marduk191/Flux.1_collection", "flux.1_dev_8x8_e4m3fn-marduk191.safetensors")
|
78 |
-
pipe = FluxPipeline.from_single_file(file_flux, torch_dtype=torch.bfloat16, token=huggingface_token)
|
79 |
|
80 |
# Load and fuse LoRA BEFORE quantizing
|
81 |
print('Loading and fusing lora, please wait...')
|
@@ -147,6 +147,7 @@ ts_cutoff = 2
|
|
147 |
|
148 |
@spaces.GPU
|
149 |
def generate_flux_image(prompt, height, width, steps, scales, seed):
|
|
|
150 |
return pipe(
|
151 |
prompt=prompt,
|
152 |
width=int(height),
|
|
|
75 |
|
76 |
base_model = "black-forest-labs/FLUX.1-dev"
|
77 |
file_flux = hf_hub_download("marduk191/Flux.1_collection", "flux.1_dev_8x8_e4m3fn-marduk191.safetensors")
|
78 |
+
pipe = FluxPipeline.from_single_file(file_flux, torch_dtype=torch.bfloat16, token=huggingface_token)
|
79 |
|
80 |
# Load and fuse LoRA BEFORE quantizing
|
81 |
print('Loading and fusing lora, please wait...')
|
|
|
147 |
|
148 |
@spaces.GPU
|
149 |
def generate_flux_image(prompt, height, width, steps, scales, seed):
|
150 |
+
pipe.to(device)
|
151 |
return pipe(
|
152 |
prompt=prompt,
|
153 |
width=int(height),
|