Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -19,7 +19,7 @@ import paramiko
|
|
19 |
import datetime
|
20 |
import cyper
|
21 |
from image_gen_aux import UpscaleWithModel
|
22 |
-
|
23 |
torch.backends.cuda.matmul.allow_tf32 = False
|
24 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
25 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
@@ -29,10 +29,10 @@ torch.backends.cudnn.benchmark = False
|
|
29 |
# torch.backends.cuda.preferred_blas_library="cublas"
|
30 |
# torch.backends.cuda.preferred_linalg_library="cusolver"
|
31 |
torch.set_float32_matmul_precision("highest")
|
32 |
-
|
33 |
|
34 |
DESCRIPTIONXX = """
|
35 |
-
## ⚡⚡⚡⚡ REALVISXL V5.0 BF16 (Tester
|
36 |
"""
|
37 |
|
38 |
examples = [
|
@@ -141,7 +141,7 @@ def load_and_prepare_model():
|
|
141 |
# pipe.to(device=device, dtype=torch.bfloat16)
|
142 |
pipe.to(device)
|
143 |
pipe.to(torch.bfloat16)
|
144 |
-
pipe.vae.set_default_attn_processor()
|
145 |
print(f'init noise scale: {pipe.scheduler.init_noise_sigma}')
|
146 |
pipe.watermark=None
|
147 |
pipe.safety_checker=None
|
@@ -277,7 +277,7 @@ def generate_30(
|
|
277 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
278 |
):
|
279 |
seed = random.randint(0, MAX_SEED)
|
280 |
-
|
281 |
options = {
|
282 |
"prompt": [prompt],
|
283 |
"negative_prompt": [negative_prompt],
|
@@ -286,7 +286,7 @@ def generate_30(
|
|
286 |
"height": height,
|
287 |
"guidance_scale": guidance_scale,
|
288 |
"num_inference_steps": num_inference_steps,
|
289 |
-
|
290 |
"output_type": "pil",
|
291 |
"callback_on_step_end": pyx.scheduler_swap_callback
|
292 |
}
|
@@ -328,7 +328,7 @@ def generate_60(
|
|
328 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
329 |
):
|
330 |
seed = random.randint(0, MAX_SEED)
|
331 |
-
|
332 |
options = {
|
333 |
"prompt": [prompt],
|
334 |
"negative_prompt": [negative_prompt],
|
@@ -337,7 +337,7 @@ def generate_60(
|
|
337 |
"height": height,
|
338 |
"guidance_scale": guidance_scale,
|
339 |
"num_inference_steps": num_inference_steps,
|
340 |
-
|
341 |
"output_type": "pil",
|
342 |
"callback_on_step_end": scheduler_swap_callback
|
343 |
}
|
@@ -369,7 +369,7 @@ def generate_90(
|
|
369 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
370 |
):
|
371 |
seed = random.randint(0, MAX_SEED)
|
372 |
-
|
373 |
options = {
|
374 |
"prompt": [prompt],
|
375 |
"negative_prompt": [negative_prompt],
|
@@ -378,7 +378,7 @@ def generate_90(
|
|
378 |
"height": height,
|
379 |
"guidance_scale": guidance_scale,
|
380 |
"num_inference_steps": num_inference_steps,
|
381 |
-
|
382 |
"output_type": "pil",
|
383 |
"callback_on_step_end": scheduler_swap_callback
|
384 |
}
|
@@ -561,7 +561,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
561 |
outputs=[result],
|
562 |
)
|
563 |
|
564 |
-
gr.Markdown("### REALVISXL V5.0")
|
565 |
predefined_gallery = gr.Gallery(label="REALVISXL V5.0", columns=3, show_label=False, value=load_predefined_images1())
|
566 |
|
567 |
#gr.Markdown("### LIGHTNING V5.0")
|
|
|
19 |
import datetime
|
20 |
import cyper
|
21 |
from image_gen_aux import UpscaleWithModel
|
22 |
+
'''
|
23 |
torch.backends.cuda.matmul.allow_tf32 = False
|
24 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
25 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
|
|
29 |
# torch.backends.cuda.preferred_blas_library="cublas"
|
30 |
# torch.backends.cuda.preferred_linalg_library="cusolver"
|
31 |
torch.set_float32_matmul_precision("highest")
|
32 |
+
'''
|
33 |
|
34 |
DESCRIPTIONXX = """
|
35 |
+
## ⚡⚡⚡⚡ REALVISXL V5.0 BF16 (Tester D) ⚡⚡⚡⚡
|
36 |
"""
|
37 |
|
38 |
examples = [
|
|
|
141 |
# pipe.to(device=device, dtype=torch.bfloat16)
|
142 |
pipe.to(device)
|
143 |
pipe.to(torch.bfloat16)
|
144 |
+
#pipe.vae.set_default_attn_processor()
|
145 |
print(f'init noise scale: {pipe.scheduler.init_noise_sigma}')
|
146 |
pipe.watermark=None
|
147 |
pipe.safety_checker=None
|
|
|
277 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
278 |
):
|
279 |
seed = random.randint(0, MAX_SEED)
|
280 |
+
generator = torch.Generator(device='cuda').manual_seed(seed)
|
281 |
options = {
|
282 |
"prompt": [prompt],
|
283 |
"negative_prompt": [negative_prompt],
|
|
|
286 |
"height": height,
|
287 |
"guidance_scale": guidance_scale,
|
288 |
"num_inference_steps": num_inference_steps,
|
289 |
+
"generator": generator,
|
290 |
"output_type": "pil",
|
291 |
"callback_on_step_end": pyx.scheduler_swap_callback
|
292 |
}
|
|
|
328 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
329 |
):
|
330 |
seed = random.randint(0, MAX_SEED)
|
331 |
+
generator = torch.Generator(device='cuda').manual_seed(seed)
|
332 |
options = {
|
333 |
"prompt": [prompt],
|
334 |
"negative_prompt": [negative_prompt],
|
|
|
337 |
"height": height,
|
338 |
"guidance_scale": guidance_scale,
|
339 |
"num_inference_steps": num_inference_steps,
|
340 |
+
"generator": generator,
|
341 |
"output_type": "pil",
|
342 |
"callback_on_step_end": scheduler_swap_callback
|
343 |
}
|
|
|
369 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
370 |
):
|
371 |
seed = random.randint(0, MAX_SEED)
|
372 |
+
generator = torch.Generator(device='cuda').manual_seed(seed)
|
373 |
options = {
|
374 |
"prompt": [prompt],
|
375 |
"negative_prompt": [negative_prompt],
|
|
|
378 |
"height": height,
|
379 |
"guidance_scale": guidance_scale,
|
380 |
"num_inference_steps": num_inference_steps,
|
381 |
+
"generator": generator,
|
382 |
"output_type": "pil",
|
383 |
"callback_on_step_end": scheduler_swap_callback
|
384 |
}
|
|
|
561 |
outputs=[result],
|
562 |
)
|
563 |
|
564 |
+
gr.Markdown("### REALVISXL V5.0 Default Mode")
|
565 |
predefined_gallery = gr.Gallery(label="REALVISXL V5.0", columns=3, show_label=False, value=load_predefined_images1())
|
566 |
|
567 |
#gr.Markdown("### LIGHTNING V5.0")
|