Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -34,7 +34,7 @@ from diffusers import AutoencoderKL
|
|
34 |
#from pipeline_stable_diffusion_3_ipa import StableDiffusion3Pipeline
|
35 |
|
36 |
from PIL import Image
|
37 |
-
|
38 |
torch.backends.cuda.matmul.allow_tf32 = False
|
39 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
40 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
@@ -44,7 +44,7 @@ torch.backends.cudnn.benchmark = False
|
|
44 |
torch.backends.cuda.preferred_blas_library="cublas"
|
45 |
torch.backends.cuda.preferred_linalg_library="cusolver"
|
46 |
torch.set_float32_matmul_precision("highest")
|
47 |
-
|
48 |
hftoken = os.getenv("HF_AUTH_TOKEN")
|
49 |
|
50 |
# code = r'''
|
@@ -81,7 +81,7 @@ pipe = StableDiffusion3Pipeline.from_single_file(
|
|
81 |
"https://huggingface.co/1inkus/sd35-large-UltraReal-bf16-DDUF/blob/main/sd3-bf16-large.dduf",
|
82 |
#tokenizer_3=T5TokenizerFast.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", add_prefix_space=False, use_fast=True, subfolder="tokenizer_3"),
|
83 |
use_safetensors=True,
|
84 |
-
devive_map='
|
85 |
) #.to(device=device)
|
86 |
|
87 |
### pipe = StableDiffusion3Pipeline.from_pretrained(
|
@@ -113,7 +113,7 @@ pipe = StableDiffusion3Pipeline.from_single_file(
|
|
113 |
#pipe.to(device=device) #, dtype=torch.bfloat16)
|
114 |
#pipe.to(device)
|
115 |
#pipe.vae=vaeX.to('cpu')
|
116 |
-
upscaler_2 = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.device('
|
117 |
|
118 |
MAX_SEED = np.iinfo(np.int32).max
|
119 |
|
@@ -159,7 +159,7 @@ def infer_30(
|
|
159 |
#pyx.upload_to_ftp(sd35_path)
|
160 |
upload_to_ftp(sd35_path)
|
161 |
# pipe.unet.to('cpu')
|
162 |
-
upscaler_2.to(torch.device('cuda'))
|
163 |
with torch.no_grad():
|
164 |
upscale2 = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
165 |
print('-- got upscaled image --')
|
@@ -210,7 +210,7 @@ def infer_60(
|
|
210 |
#pyx.upload_to_ftp(sd35_path)
|
211 |
upload_to_ftp(sd35_path)
|
212 |
# pipe.unet.to('cpu')
|
213 |
-
upscaler_2.to(torch.device('cuda'))
|
214 |
with torch.no_grad():
|
215 |
upscale2 = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
216 |
print('-- got upscaled image --')
|
@@ -261,7 +261,7 @@ def infer_90(
|
|
261 |
#pyx.upload_to_ftp(sd35_path)
|
262 |
upload_to_ftp(sd35_path)
|
263 |
# pipe.unet.to('cpu')
|
264 |
-
upscaler_2.to(torch.device('cuda'))
|
265 |
with torch.no_grad():
|
266 |
upscale2 = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
267 |
print('-- got upscaled image --')
|
@@ -312,7 +312,7 @@ def infer_100(
|
|
312 |
#pyx.upload_to_ftp(sd35_path)
|
313 |
upload_to_ftp(sd35_path)
|
314 |
# pipe.unet.to('cpu')
|
315 |
-
upscaler_2.to(torch.device('cuda'))
|
316 |
with torch.no_grad():
|
317 |
upscale2 = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
318 |
print('-- got upscaled image --')
|
|
|
34 |
#from pipeline_stable_diffusion_3_ipa import StableDiffusion3Pipeline
|
35 |
|
36 |
from PIL import Image
|
37 |
+
|
38 |
torch.backends.cuda.matmul.allow_tf32 = False
|
39 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
40 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
|
|
44 |
torch.backends.cuda.preferred_blas_library="cublas"
|
45 |
torch.backends.cuda.preferred_linalg_library="cusolver"
|
46 |
torch.set_float32_matmul_precision("highest")
|
47 |
+
|
48 |
hftoken = os.getenv("HF_AUTH_TOKEN")
|
49 |
|
50 |
# code = r'''
|
|
|
81 |
"https://huggingface.co/1inkus/sd35-large-UltraReal-bf16-DDUF/blob/main/sd3-bf16-large.dduf",
|
82 |
#tokenizer_3=T5TokenizerFast.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", add_prefix_space=False, use_fast=True, subfolder="tokenizer_3"),
|
83 |
use_safetensors=True,
|
84 |
+
devive_map='auto',
|
85 |
) #.to(device=device)
|
86 |
|
87 |
### pipe = StableDiffusion3Pipeline.from_pretrained(
|
|
|
113 |
#pipe.to(device=device) #, dtype=torch.bfloat16)
|
114 |
#pipe.to(device)
|
115 |
#pipe.vae=vaeX.to('cpu')
|
116 |
+
upscaler_2 = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.device('cuda'))
|
117 |
|
118 |
MAX_SEED = np.iinfo(np.int32).max
|
119 |
|
|
|
159 |
#pyx.upload_to_ftp(sd35_path)
|
160 |
upload_to_ftp(sd35_path)
|
161 |
# pipe.unet.to('cpu')
|
162 |
+
#upscaler_2.to(torch.device('cuda'))
|
163 |
with torch.no_grad():
|
164 |
upscale2 = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
165 |
print('-- got upscaled image --')
|
|
|
210 |
#pyx.upload_to_ftp(sd35_path)
|
211 |
upload_to_ftp(sd35_path)
|
212 |
# pipe.unet.to('cpu')
|
213 |
+
#upscaler_2.to(torch.device('cuda'))
|
214 |
with torch.no_grad():
|
215 |
upscale2 = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
216 |
print('-- got upscaled image --')
|
|
|
261 |
#pyx.upload_to_ftp(sd35_path)
|
262 |
upload_to_ftp(sd35_path)
|
263 |
# pipe.unet.to('cpu')
|
264 |
+
#upscaler_2.to(torch.device('cuda'))
|
265 |
with torch.no_grad():
|
266 |
upscale2 = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
267 |
print('-- got upscaled image --')
|
|
|
312 |
#pyx.upload_to_ftp(sd35_path)
|
313 |
upload_to_ftp(sd35_path)
|
314 |
# pipe.unet.to('cpu')
|
315 |
+
#upscaler_2.to(torch.device('cuda'))
|
316 |
with torch.no_grad():
|
317 |
upscale2 = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
318 |
print('-- got upscaled image --')
|