Update app.py
Browse files
app.py
CHANGED
|
@@ -27,6 +27,8 @@ import datetime
|
|
| 27 |
from gradio import themes
|
| 28 |
from hidiffusion import apply_hidiffusion, remove_hidiffusion
|
| 29 |
|
|
|
|
|
|
|
| 30 |
torch.backends.cuda.matmul.allow_tf32 = False
|
| 31 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
| 32 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
|
@@ -161,7 +163,7 @@ def load_and_prepare_model(model_id):
|
|
| 161 |
print(f'watermark: {pipe.watermark}')
|
| 162 |
print(f'image_processor: {pipe.image_processor}')
|
| 163 |
print(f'feature_extractor: {pipe.feature_extractor}')
|
| 164 |
-
|
| 165 |
#print(f'UNET: {pipe.unet}')
|
| 166 |
pipe.watermark=None
|
| 167 |
pipe.safety_checker=None
|
|
@@ -221,6 +223,8 @@ def generate_30(
|
|
| 221 |
num_images: int = 1,
|
| 222 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 223 |
):
|
|
|
|
|
|
|
| 224 |
global models
|
| 225 |
pipe = models[model_choice]
|
| 226 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
|
@@ -288,6 +292,8 @@ def generate_60(
|
|
| 288 |
num_images: int = 1,
|
| 289 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 290 |
):
|
|
|
|
|
|
|
| 291 |
global models
|
| 292 |
pipe = models[model_choice]
|
| 293 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
|
@@ -355,6 +361,8 @@ def generate_90(
|
|
| 355 |
num_images: int = 1,
|
| 356 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 357 |
):
|
|
|
|
|
|
|
| 358 |
global models
|
| 359 |
pipe = models[model_choice]
|
| 360 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
|
|
|
| 27 |
from gradio import themes
|
| 28 |
from hidiffusion import apply_hidiffusion, remove_hidiffusion
|
| 29 |
|
| 30 |
+
import gc
|
| 31 |
+
|
| 32 |
torch.backends.cuda.matmul.allow_tf32 = False
|
| 33 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
| 34 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
|
|
|
| 163 |
print(f'watermark: {pipe.watermark}')
|
| 164 |
print(f'image_processor: {pipe.image_processor}')
|
| 165 |
print(f'feature_extractor: {pipe.feature_extractor}')
|
| 166 |
+
print(f'init noise scale: {pipe.scheduler.init_noise_sigma}')
|
| 167 |
#print(f'UNET: {pipe.unet}')
|
| 168 |
pipe.watermark=None
|
| 169 |
pipe.safety_checker=None
|
|
|
|
| 223 |
num_images: int = 1,
|
| 224 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 225 |
):
|
| 226 |
+
torch.cuda.empty_cache()
|
| 227 |
+
gc.collect()
|
| 228 |
global models
|
| 229 |
pipe = models[model_choice]
|
| 230 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
|
|
|
| 292 |
num_images: int = 1,
|
| 293 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 294 |
):
|
| 295 |
+
torch.cuda.empty_cache()
|
| 296 |
+
gc.collect()
|
| 297 |
global models
|
| 298 |
pipe = models[model_choice]
|
| 299 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
|
|
|
| 361 |
num_images: int = 1,
|
| 362 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 363 |
):
|
| 364 |
+
torch.cuda.empty_cache()
|
| 365 |
+
gc.collect()
|
| 366 |
global models
|
| 367 |
pipe = models[model_choice]
|
| 368 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|