Update app.py
Browse files
app.py
CHANGED
|
@@ -31,7 +31,7 @@ torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
| 31 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
| 32 |
torch.backends.cudnn.allow_tf32 = False
|
| 33 |
torch.backends.cudnn.deterministic = False
|
| 34 |
-
|
| 35 |
torch.backends.cuda.preferred_blas_library="cublas"
|
| 36 |
# torch.backends.cuda.preferred_linalg_library="cusolver"
|
| 37 |
torch.set_float32_matmul_precision("highest")
|
|
@@ -243,9 +243,9 @@ def generate_30(
|
|
| 243 |
denoise: float = 0.3,
|
| 244 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 245 |
):
|
| 246 |
-
torch.backends.cudnn.benchmark = False
|
| 247 |
-
torch.cuda.empty_cache()
|
| 248 |
-
gc.collect()
|
| 249 |
global models
|
| 250 |
pipe = models[model_choice]
|
| 251 |
#if juggernaut == True:
|
|
@@ -302,9 +302,9 @@ def generate_60(
|
|
| 302 |
denoise: float = 0.3,
|
| 303 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 304 |
):
|
| 305 |
-
torch.backends.cudnn.benchmark = True
|
| 306 |
-
torch.cuda.empty_cache()
|
| 307 |
-
gc.collect()
|
| 308 |
global models
|
| 309 |
pipe = models[model_choice]
|
| 310 |
#if juggernaut == True:
|
|
@@ -361,9 +361,9 @@ def generate_90(
|
|
| 361 |
denoise: float = 0.3,
|
| 362 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 363 |
):
|
| 364 |
-
torch.backends.cudnn.benchmark = True
|
| 365 |
-
torch.cuda.empty_cache()
|
| 366 |
-
gc.collect()
|
| 367 |
global models
|
| 368 |
pipe = models[model_choice]
|
| 369 |
#if juggernaut == True:
|
|
|
|
| 31 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
| 32 |
torch.backends.cudnn.allow_tf32 = False
|
| 33 |
torch.backends.cudnn.deterministic = False
|
| 34 |
+
torch.backends.cudnn.benchmark = False
|
| 35 |
torch.backends.cuda.preferred_blas_library="cublas"
|
| 36 |
# torch.backends.cuda.preferred_linalg_library="cusolver"
|
| 37 |
torch.set_float32_matmul_precision("highest")
|
|
|
|
| 243 |
denoise: float = 0.3,
|
| 244 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 245 |
):
|
| 246 |
+
#torch.backends.cudnn.benchmark = False
|
| 247 |
+
#torch.cuda.empty_cache()
|
| 248 |
+
#gc.collect()
|
| 249 |
global models
|
| 250 |
pipe = models[model_choice]
|
| 251 |
#if juggernaut == True:
|
|
|
|
| 302 |
denoise: float = 0.3,
|
| 303 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 304 |
):
|
| 305 |
+
#torch.backends.cudnn.benchmark = True
|
| 306 |
+
#torch.cuda.empty_cache()
|
| 307 |
+
#gc.collect()
|
| 308 |
global models
|
| 309 |
pipe = models[model_choice]
|
| 310 |
#if juggernaut == True:
|
|
|
|
| 361 |
denoise: float = 0.3,
|
| 362 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 363 |
):
|
| 364 |
+
#torch.backends.cudnn.benchmark = True
|
| 365 |
+
#torch.cuda.empty_cache()
|
| 366 |
+
#gc.collect()
|
| 367 |
global models
|
| 368 |
pipe = models[model_choice]
|
| 369 |
#if juggernaut == True:
|