Update app.py
Browse files
app.py
CHANGED
|
@@ -22,9 +22,10 @@ import paramiko
|
|
| 22 |
import gc
|
| 23 |
import time
|
| 24 |
import datetime
|
| 25 |
-
from diffusers.schedulers import AysSchedules
|
| 26 |
|
| 27 |
from gradio import themes
|
|
|
|
| 28 |
|
| 29 |
torch.backends.cuda.matmul.allow_tf32 = False
|
| 30 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
@@ -91,7 +92,7 @@ DEFAULT_STYLE_NAME = "Style Zero"
|
|
| 91 |
STYLE_NAMES = list(styles.keys())
|
| 92 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 93 |
|
| 94 |
-
sampling_schedule = AysSchedules["StableDiffusionXLTimesteps"]
|
| 95 |
|
| 96 |
def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
|
| 97 |
if style_name in styles:
|
|
@@ -149,6 +150,8 @@ def load_and_prepare_model(model_id):
|
|
| 149 |
pipe.to(device)
|
| 150 |
pipe.to(torch.bfloat16)
|
| 151 |
|
|
|
|
|
|
|
| 152 |
pipe.unet.set_default_attn_processor()
|
| 153 |
pipe.vae.set_default_attn_processor()
|
| 154 |
|
|
@@ -229,7 +232,7 @@ def generate_30(
|
|
| 229 |
"guidance_scale": guidance_scale,
|
| 230 |
"num_inference_steps": num_inference_steps,
|
| 231 |
"generator": generator,
|
| 232 |
-
|
| 233 |
"output_type": "pil",
|
| 234 |
}
|
| 235 |
if use_resolution_binning:
|
|
@@ -296,7 +299,7 @@ def generate_60(
|
|
| 296 |
"guidance_scale": guidance_scale,
|
| 297 |
"num_inference_steps": num_inference_steps,
|
| 298 |
"generator": generator,
|
| 299 |
-
|
| 300 |
"output_type": "pil",
|
| 301 |
}
|
| 302 |
if use_resolution_binning:
|
|
@@ -363,7 +366,7 @@ def generate_90(
|
|
| 363 |
"guidance_scale": guidance_scale,
|
| 364 |
"num_inference_steps": num_inference_steps,
|
| 365 |
"generator": generator,
|
| 366 |
-
|
| 367 |
"output_type": "pil",
|
| 368 |
}
|
| 369 |
if use_resolution_binning:
|
|
|
|
| 22 |
import gc
|
| 23 |
import time
|
| 24 |
import datetime
|
| 25 |
+
#from diffusers.schedulers import AysSchedules
|
| 26 |
|
| 27 |
from gradio import themes
|
| 28 |
+
from hidiffusion import apply_hidiffusion, remove_hidiffusion
|
| 29 |
|
| 30 |
torch.backends.cuda.matmul.allow_tf32 = False
|
| 31 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
|
|
| 92 |
STYLE_NAMES = list(styles.keys())
|
| 93 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 94 |
|
| 95 |
+
#sampling_schedule = AysSchedules["StableDiffusionXLTimesteps"]
|
| 96 |
|
| 97 |
def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
|
| 98 |
if style_name in styles:
|
|
|
|
| 150 |
pipe.to(device)
|
| 151 |
pipe.to(torch.bfloat16)
|
| 152 |
|
| 153 |
+
apply_hidiffusion(pipe)
|
| 154 |
+
|
| 155 |
pipe.unet.set_default_attn_processor()
|
| 156 |
pipe.vae.set_default_attn_processor()
|
| 157 |
|
|
|
|
| 232 |
"guidance_scale": guidance_scale,
|
| 233 |
"num_inference_steps": num_inference_steps,
|
| 234 |
"generator": generator,
|
| 235 |
+
# "timesteps": sampling_schedule,
|
| 236 |
"output_type": "pil",
|
| 237 |
}
|
| 238 |
if use_resolution_binning:
|
|
|
|
| 299 |
"guidance_scale": guidance_scale,
|
| 300 |
"num_inference_steps": num_inference_steps,
|
| 301 |
"generator": generator,
|
| 302 |
+
# "timesteps": sampling_schedule,
|
| 303 |
"output_type": "pil",
|
| 304 |
}
|
| 305 |
if use_resolution_binning:
|
|
|
|
| 366 |
"guidance_scale": guidance_scale,
|
| 367 |
"num_inference_steps": num_inference_steps,
|
| 368 |
"generator": generator,
|
| 369 |
+
# "timesteps": sampling_schedule,
|
| 370 |
"output_type": "pil",
|
| 371 |
}
|
| 372 |
if use_resolution_binning:
|