Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -12,8 +12,7 @@ import gradio as gr
|
|
12 |
import numpy as np
|
13 |
from PIL import Image
|
14 |
import torch
|
15 |
-
from diffusers import AutoencoderKL, StableDiffusionXLPipeline
|
16 |
-
from diffusers import EulerAncestralDiscreteScheduler
|
17 |
from typing import Tuple
|
18 |
import paramiko
|
19 |
import gc
|
@@ -101,9 +100,25 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
|
|
101 |
negative = ""
|
102 |
return p.replace("{prompt}", positive), n + negative
|
103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
def load_and_prepare_model():
|
105 |
-
vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None,use_safetensors=False,torch_dtype=torch.float32,token=HF_TOKEN).to(device)
|
106 |
-
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16',token=HF_TOKEN, subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=False)
|
107 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
108 |
#'John6666/uber-realistic-porn-merge-ponyxl-urpm-ponyxlhybridv1-sdxl',
|
109 |
#'John6666/uber-realistic-porn-merge-xl-urpmxl-v3-sdxl',
|
@@ -112,10 +127,21 @@ def load_and_prepare_model():
|
|
112 |
#'John6666/pornmaster-amateur-sdxlv1vae-sdxl',
|
113 |
#torch_dtype=torch.bfloat16,
|
114 |
add_watermarker=False,
|
115 |
-
token=
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
)
|
118 |
|
|
|
|
|
|
|
|
|
119 |
'''
|
120 |
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
121 |
The component-wise standard deviation of the trained latent space computed using the first batch of the
|
@@ -136,10 +162,8 @@ def load_and_prepare_model():
|
|
136 |
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
137 |
|
138 |
#pipe.vae.force_upcast=True
|
139 |
-
pipe.scheduler = sched
|
140 |
# pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
|
141 |
pipe.vae.set_default_attn_processor() # Set attention processor first
|
142 |
-
|
143 |
pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", adapter_name="skin", low_cpu_mem_usage=False,token=HF_TOKEN)
|
144 |
|
145 |
#pipe.unet = pipe.unet.to(memory_format=torch.contiguous_format)
|
@@ -153,7 +177,8 @@ def load_and_prepare_model():
|
|
153 |
#**** BETTER WAY ****#
|
154 |
pipe.to(device, torch.bfloat16)
|
155 |
#**** BETTER WAY ****#
|
156 |
-
pipe.vae = vaeX #.to(torch.bfloat16)
|
|
|
157 |
#pipe.to(device)
|
158 |
#pipe.to(torch.bfloat16)
|
159 |
|
@@ -198,19 +223,13 @@ def randomize_seed_fn() -> int:
|
|
198 |
def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
199 |
filename= f'tst_A_{timestamp}.txt'
|
200 |
with open(filename, "w") as f:
|
201 |
-
f.write(f"Realvis 5.0 (Tester
|
202 |
f.write(f"Date/time: {timestamp} \n")
|
203 |
f.write(f"Prompt: {prompt} \n")
|
204 |
f.write(f"Steps: {num_inference_steps} \n")
|
205 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
206 |
f.write(f"SPACE SETUP: \n")
|
207 |
-
f.write(f"Use Model Dtype: no \n")
|
208 |
-
f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
|
209 |
-
f.write(f"Model VAE: sdxl-vae to bfloat safetensor=false before cuda then attn_proc / scale factor 8 \n")
|
210 |
f.write(f"Model UNET: realistic_porn \n")
|
211 |
-
f.write(f"Model HiDiffusion OFF \n")
|
212 |
-
f.write(f"Model do_resize ON \n")
|
213 |
-
f.write(f"added torch to prereq and changed accellerate \n")
|
214 |
upload_to_ftp(filename)
|
215 |
|
216 |
@spaces.GPU(duration=40)
|
@@ -252,7 +271,7 @@ def generate_30(
|
|
252 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
253 |
batch_options = options.copy()
|
254 |
rv_image = pipe(**batch_options).images[0]
|
255 |
-
sd_image_path = f"
|
256 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
257 |
upload_to_ftp(sd_image_path)
|
258 |
image_paths = save_image(rv_image)
|
@@ -262,7 +281,7 @@ def generate_30(
|
|
262 |
with torch.no_grad():
|
263 |
upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
|
264 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
265 |
-
downscale_path = f"
|
266 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
267 |
upload_to_ftp(downscale_path)
|
268 |
image_paths = [save_image(downscale1)]
|
@@ -306,7 +325,7 @@ def generate_60(
|
|
306 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
307 |
batch_options = options.copy()
|
308 |
rv_image = pipe(**batch_options).images[0]
|
309 |
-
sd_image_path = f"
|
310 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
311 |
upload_to_ftp(sd_image_path)
|
312 |
image_paths = save_image(rv_image)
|
@@ -314,7 +333,7 @@ def generate_60(
|
|
314 |
with torch.no_grad():
|
315 |
upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
|
316 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
317 |
-
downscale_path = f"
|
318 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
319 |
upload_to_ftp(downscale_path)
|
320 |
image_paths = [save_image(downscale1)]
|
@@ -358,7 +377,7 @@ def generate_90(
|
|
358 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
359 |
batch_options = options.copy()
|
360 |
rv_image = pipe(**batch_options).images[0]
|
361 |
-
sd_image_path = f"
|
362 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
363 |
upload_to_ftp(sd_image_path)
|
364 |
image_paths = save_image(rv_image)
|
@@ -366,7 +385,7 @@ def generate_90(
|
|
366 |
with torch.no_grad():
|
367 |
upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
|
368 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
369 |
-
downscale_path = f"
|
370 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
371 |
upload_to_ftp(downscale_path)
|
372 |
image_paths = [save_image(downscale1)]
|
|
|
12 |
import numpy as np
|
13 |
from PIL import Image
|
14 |
import torch
|
15 |
+
from diffusers import AutoencoderKL, StableDiffusionXLPipeline, UNet2DConditionModel, EulerAncestralDiscreteScheduler
|
|
|
16 |
from typing import Tuple
|
17 |
import paramiko
|
18 |
import gc
|
|
|
100 |
negative = ""
|
101 |
return p.replace("{prompt}", positive), n + negative
|
102 |
|
103 |
+
model_repo='John6666/uber-realistic-porn-merge-xl-urpmxl-v6final-sdxl'
|
104 |
+
#'John6666/uber-realistic-porn-merge-ponyxl-urpm-ponyxlhybridv1-sdxl'
|
105 |
+
#'John6666/uber-realistic-porn-merge-xl-urpmxl-v3-sdxl'
|
106 |
+
#'John6666/uber-realistic-porn-merge-xl-urpmxl-v6final-sdxl'
|
107 |
+
#'John6666/pornworks-real-porn-ponyv04-sdxl'
|
108 |
+
#'John6666/pornmaster-amateur-sdxlv1vae-sdxl'
|
109 |
+
|
110 |
+
|
111 |
+
rv='ford442/RealVisXL_V5.0_BF16'
|
112 |
+
|
113 |
+
text_encoder = CLIPTextModel.from_pretrained(rv, low_cpu_mem_usage=False, subfolder='text_encoder', token=True)#.to(device=device, dtype=torch.bfloat16)
|
114 |
+
text_encoder_2 = CLIPTextModelWithProjection.from_pretrained(model_repo, low_cpu_mem_usage=False, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
|
115 |
+
tokenizer_1 = CLIPTokenizer.from_pretrained(rv, low_cpu_mem_usage=False, subfolder='tokenizer', token=True)
|
116 |
+
tokenizer_2 = CLIPTokenizer.from_pretrained(model_repo, low_cpu_mem_usage=False, subfolder='tokenizer_2', token=True)
|
117 |
+
scheduler = EulerAncestralDiscreteScheduler.from_pretrained(rv, low_cpu_mem_usage=False, subfolder='scheduler', token=True)
|
118 |
+
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
119 |
+
unet = UNet2DConditionModel.from_pretrained(model_repo, low_cpu_mem_usage=False, subfolder='unet', upcast_attention=True, attention_type='gated-text-image', token=True)
|
120 |
+
|
121 |
def load_and_prepare_model():
|
|
|
|
|
122 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
123 |
#'John6666/uber-realistic-porn-merge-ponyxl-urpm-ponyxlhybridv1-sdxl',
|
124 |
#'John6666/uber-realistic-porn-merge-xl-urpmxl-v3-sdxl',
|
|
|
127 |
#'John6666/pornmaster-amateur-sdxlv1vae-sdxl',
|
128 |
#torch_dtype=torch.bfloat16,
|
129 |
add_watermarker=False,
|
130 |
+
token=True,
|
131 |
+
add_watermarker=False,
|
132 |
+
text_encoder=None,
|
133 |
+
text_encoder_2=None,
|
134 |
+
tokenizer=None,
|
135 |
+
tokenizer_2=None,
|
136 |
+
scheduler=None,
|
137 |
+
unet=None,
|
138 |
+
vae=None,
|
139 |
)
|
140 |
|
141 |
+
pipe.scheduler=scheduler
|
142 |
+
pipe.tokenizer=tokenizer_1
|
143 |
+
pipe.tokenizer_2=tokenizer_2
|
144 |
+
pipe.unet=unet
|
145 |
'''
|
146 |
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
147 |
The component-wise standard deviation of the trained latent space computed using the first batch of the
|
|
|
162 |
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
163 |
|
164 |
#pipe.vae.force_upcast=True
|
|
|
165 |
# pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
|
166 |
pipe.vae.set_default_attn_processor() # Set attention processor first
|
|
|
167 |
pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", adapter_name="skin", low_cpu_mem_usage=False,token=HF_TOKEN)
|
168 |
|
169 |
#pipe.unet = pipe.unet.to(memory_format=torch.contiguous_format)
|
|
|
177 |
#**** BETTER WAY ****#
|
178 |
pipe.to(device, torch.bfloat16)
|
179 |
#**** BETTER WAY ****#
|
180 |
+
pipe.vae = vaeX.to(device) #.to(torch.bfloat16)
|
181 |
+
pipe.vae.set_default_attn_processor()
|
182 |
#pipe.to(device)
|
183 |
#pipe.to(torch.bfloat16)
|
184 |
|
|
|
223 |
def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
224 |
filename= f'tst_A_{timestamp}.txt'
|
225 |
with open(filename, "w") as f:
|
226 |
+
f.write(f"Realvis 5.0 (Tester E) \n")
|
227 |
f.write(f"Date/time: {timestamp} \n")
|
228 |
f.write(f"Prompt: {prompt} \n")
|
229 |
f.write(f"Steps: {num_inference_steps} \n")
|
230 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
231 |
f.write(f"SPACE SETUP: \n")
|
|
|
|
|
|
|
232 |
f.write(f"Model UNET: realistic_porn \n")
|
|
|
|
|
|
|
233 |
upload_to_ftp(filename)
|
234 |
|
235 |
@spaces.GPU(duration=40)
|
|
|
271 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
272 |
batch_options = options.copy()
|
273 |
rv_image = pipe(**batch_options).images[0]
|
274 |
+
sd_image_path = f"rv50_E_{timestamp}.png"
|
275 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
276 |
upload_to_ftp(sd_image_path)
|
277 |
image_paths = save_image(rv_image)
|
|
|
281 |
with torch.no_grad():
|
282 |
upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
|
283 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
284 |
+
downscale_path = f"rv50E_upscale_{timestamp}.png"
|
285 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
286 |
upload_to_ftp(downscale_path)
|
287 |
image_paths = [save_image(downscale1)]
|
|
|
325 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
326 |
batch_options = options.copy()
|
327 |
rv_image = pipe(**batch_options).images[0]
|
328 |
+
sd_image_path = f"rv50_E_{timestamp}.png"
|
329 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
330 |
upload_to_ftp(sd_image_path)
|
331 |
image_paths = save_image(rv_image)
|
|
|
333 |
with torch.no_grad():
|
334 |
upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
|
335 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
336 |
+
downscale_path = f"rv50E_upscale_{timestamp}.png"
|
337 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
338 |
upload_to_ftp(downscale_path)
|
339 |
image_paths = [save_image(downscale1)]
|
|
|
377 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
378 |
batch_options = options.copy()
|
379 |
rv_image = pipe(**batch_options).images[0]
|
380 |
+
sd_image_path = f"rv50_E_{timestamp}.png"
|
381 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
382 |
upload_to_ftp(sd_image_path)
|
383 |
image_paths = save_image(rv_image)
|
|
|
385 |
with torch.no_grad():
|
386 |
upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
|
387 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
388 |
+
downscale_path = f"rv50E_upscale_{timestamp}.png"
|
389 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
390 |
upload_to_ftp(downscale_path)
|
391 |
image_paths = [save_image(downscale1)]
|