Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@
|
|
6 |
# copies of the Software, and to permit persons to whom the Software is
|
7 |
import spaces
|
8 |
import os
|
|
|
9 |
|
10 |
#import subprocess
|
11 |
#subprocess.run(['sh', './conda.sh'])
|
@@ -59,6 +60,7 @@ from image_gen_aux import UpscaleWithModel
|
|
59 |
import torch
|
60 |
#import torch._dynamo
|
61 |
#torch._dynamo.list_backends()
|
|
|
62 |
|
63 |
torch.backends.cuda.matmul.allow_tf32 = False
|
64 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
@@ -380,8 +382,7 @@ def generate_30(
|
|
380 |
filename = pyx.uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
381 |
pyx.upload_to_ftp(filename)
|
382 |
batch_options = options.copy()
|
383 |
-
|
384 |
-
rv_image = pipe(**batch_options).images[0]
|
385 |
sd_image_path = f"rv_C_{timestamp}.png"
|
386 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
387 |
pyx.upload_to_ftp(sd_image_path)
|
@@ -429,8 +430,8 @@ def generate_60(
|
|
429 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
430 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
431 |
batch_options = options.copy()
|
432 |
-
|
433 |
-
|
434 |
sd_image_path = f"rv_C_{timestamp}.png"
|
435 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
436 |
upload_to_ftp(sd_image_path)
|
@@ -471,8 +472,7 @@ def generate_90(
|
|
471 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
472 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
473 |
batch_options = options.copy()
|
474 |
-
|
475 |
-
rv_image = pipe(**batch_options).images[0]
|
476 |
sd_image_path = f"rv_C_{timestamp}.png"
|
477 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
478 |
upload_to_ftp(sd_image_path)
|
|
|
6 |
# copies of the Software, and to permit persons to whom the Software is
|
7 |
import spaces
|
8 |
import os
|
9 |
+
os.environ["SAFETENSORS_FAST_GPU"] = "1"
|
10 |
|
11 |
#import subprocess
|
12 |
#subprocess.run(['sh', './conda.sh'])
|
|
|
60 |
import torch
|
61 |
#import torch._dynamo
|
62 |
#torch._dynamo.list_backends()
|
63 |
+
import time
|
64 |
|
65 |
torch.backends.cuda.matmul.allow_tf32 = False
|
66 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
|
382 |
filename = pyx.uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
383 |
pyx.upload_to_ftp(filename)
|
384 |
batch_options = options.copy()
|
385 |
+
rv_image = pipe(**batch_options).images[0]
|
|
|
386 |
sd_image_path = f"rv_C_{timestamp}.png"
|
387 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
388 |
pyx.upload_to_ftp(sd_image_path)
|
|
|
430 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
431 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
432 |
batch_options = options.copy()
|
433 |
+
time.sleep(2)
|
434 |
+
rv_image = pipe(**batch_options).images[0]
|
435 |
sd_image_path = f"rv_C_{timestamp}.png"
|
436 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
437 |
upload_to_ftp(sd_image_path)
|
|
|
472 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
473 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
474 |
batch_options = options.copy()
|
475 |
+
rv_image = pipe(**batch_options).images[0]
|
|
|
476 |
sd_image_path = f"rv_C_{timestamp}.png"
|
477 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
478 |
upload_to_ftp(sd_image_path)
|