Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -77,7 +77,7 @@ def load_model(base_model_path, lora_path):
|
|
77 |
base_model_path,
|
78 |
transformer=transformer,
|
79 |
torch_dtype=torch.bfloat16
|
80 |
-
)
|
81 |
pipe.transformer.to(torch.bfloat16)
|
82 |
gr.Info(str(f"Model loading: {int((80 / 100) * 100)}%"))
|
83 |
gr.Info(str(f"Inject LoRA: {lora_path}"))
|
@@ -148,6 +148,7 @@ def predict(
|
|
148 |
gray_image_pil = Image.fromarray(gray_image).convert('L')
|
149 |
else:
|
150 |
gray_image_pil = input_image["layers"][0]
|
|
|
151 |
result = pipe(
|
152 |
prompt=prompt,
|
153 |
control_image=input_image["background"].convert("RGB"),
|
|
|
77 |
base_model_path,
|
78 |
transformer=transformer,
|
79 |
torch_dtype=torch.bfloat16
|
80 |
+
)
|
81 |
pipe.transformer.to(torch.bfloat16)
|
82 |
gr.Info(str(f"Model loading: {int((80 / 100) * 100)}%"))
|
83 |
gr.Info(str(f"Inject LoRA: {lora_path}"))
|
|
|
148 |
gray_image_pil = Image.fromarray(gray_image).convert('L')
|
149 |
else:
|
150 |
gray_image_pil = input_image["layers"][0]
|
151 |
+
pipe.to("cuda")
|
152 |
result = pipe(
|
153 |
prompt=prompt,
|
154 |
control_image=input_image["background"].convert("RGB"),
|