Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -21,6 +21,9 @@ import time
|
|
| 21 |
import datetime
|
| 22 |
from gradio import themes
|
| 23 |
from image_gen_aux import UpscaleWithModel
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
torch.backends.cuda.matmul.allow_tf32 = False
|
| 26 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
@@ -49,7 +52,7 @@ examples = [
|
|
| 49 |
]
|
| 50 |
|
| 51 |
MODEL_OPTIONS = {
|
| 52 |
-
"REALVISXL V5.0 BF16": "
|
| 53 |
}
|
| 54 |
|
| 55 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
|
|
@@ -87,6 +90,16 @@ DEFAULT_STYLE_NAME = "Style Zero"
|
|
| 87 |
STYLE_NAMES = list(styles.keys())
|
| 88 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
#sampling_schedule = AysSchedules["StableDiffusionXLTimesteps"]
|
| 91 |
upscaler = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.device("cuda:0"))
|
| 92 |
|
|
@@ -99,7 +112,7 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
|
|
| 99 |
negative = ""
|
| 100 |
return p.replace("{prompt}", positive), n + negative
|
| 101 |
|
| 102 |
-
def load_and_prepare_model(
|
| 103 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
| 104 |
vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None,use_safetensors=False)
|
| 105 |
#vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
|
|
@@ -116,10 +129,10 @@ def load_and_prepare_model(model_id):
|
|
| 116 |
#pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0").to(torch.bfloat16)
|
| 117 |
#pipeX = StableDiffusionXLPipeline.from_pretrained("ford442/Juggernaut-XI-v11-fp32",use_safetensors=True)
|
| 118 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 119 |
-
|
| 120 |
#'ford442/Juggernaut-XI-v11-fp32',
|
| 121 |
# 'SG161222/RealVisXL_V5.0',
|
| 122 |
-
'John6666/uber-realistic-porn-merge-xl-urpmxl-v3-sdxl',
|
| 123 |
#torch_dtype=torch.bfloat16,
|
| 124 |
add_watermarker=False,
|
| 125 |
# custom_pipeline="lpw_stable_diffusion_xl",
|
|
@@ -202,7 +215,7 @@ def load_and_prepare_model(model_id):
|
|
| 202 |
return pipe
|
| 203 |
|
| 204 |
# Preload and compile both models
|
| 205 |
-
models = {key: load_and_prepare_model(
|
| 206 |
|
| 207 |
MAX_SEED = np.iinfo(np.int32).max
|
| 208 |
|
|
@@ -252,7 +265,6 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise):
|
|
| 252 |
|
| 253 |
@spaces.GPU(duration=40)
|
| 254 |
def generate_30(
|
| 255 |
-
model_choice: str,
|
| 256 |
prompt: str,
|
| 257 |
negative_prompt: str = "",
|
| 258 |
use_negative_prompt: bool = False,
|
|
@@ -264,57 +276,50 @@ def generate_30(
|
|
| 264 |
num_inference_steps: int = 125,
|
| 265 |
randomize_seed: bool = False,
|
| 266 |
use_resolution_binning: bool = True,
|
| 267 |
-
|
| 268 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 269 |
):
|
| 270 |
#torch.backends.cudnn.benchmark = False
|
| 271 |
#torch.cuda.empty_cache()
|
| 272 |
#gc.collect()
|
| 273 |
global models
|
| 274 |
-
pipe = models
|
| 275 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 276 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 277 |
-
#
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
"
|
| 281 |
-
|
| 282 |
-
"
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
|
| 308 |
-
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
| 309 |
-
downscale_path = f"rv50_upscale_{timestamp}.png"
|
| 310 |
-
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
| 311 |
-
upload_to_ftp(downscale_path)
|
| 312 |
-
image_paths = [save_image(downscale1)]
|
| 313 |
return image_paths, seed
|
| 314 |
|
| 315 |
@spaces.GPU(duration=60)
|
| 316 |
def generate_60(
|
| 317 |
-
model_choice: str,
|
| 318 |
prompt: str,
|
| 319 |
negative_prompt: str = "",
|
| 320 |
use_negative_prompt: bool = False,
|
|
@@ -326,57 +331,50 @@ def generate_60(
|
|
| 326 |
num_inference_steps: int = 250,
|
| 327 |
randomize_seed: bool = False,
|
| 328 |
use_resolution_binning: bool = True,
|
| 329 |
-
|
| 330 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 331 |
):
|
| 332 |
#torch.backends.cudnn.benchmark = True
|
| 333 |
#torch.cuda.empty_cache()
|
| 334 |
#gc.collect()
|
| 335 |
global models
|
| 336 |
-
pipe = models
|
| 337 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 338 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 339 |
-
#
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
"
|
| 343 |
-
|
| 344 |
-
"
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
|
| 370 |
-
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
| 371 |
-
downscale_path = f"rv50_upscale_{timestamp}.png"
|
| 372 |
-
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
| 373 |
-
upload_to_ftp(downscale_path)
|
| 374 |
-
image_paths = [save_image(downscale1)]
|
| 375 |
return image_paths, seed
|
| 376 |
|
| 377 |
@spaces.GPU(duration=90)
|
| 378 |
def generate_90(
|
| 379 |
-
model_choice: str,
|
| 380 |
prompt: str,
|
| 381 |
negative_prompt: str = "",
|
| 382 |
use_negative_prompt: bool = False,
|
|
@@ -388,52 +386,46 @@ def generate_90(
|
|
| 388 |
num_inference_steps: int = 250,
|
| 389 |
randomize_seed: bool = False,
|
| 390 |
use_resolution_binning: bool = True,
|
| 391 |
-
|
| 392 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 393 |
):
|
| 394 |
#torch.backends.cudnn.benchmark = True
|
| 395 |
#torch.cuda.empty_cache()
|
| 396 |
#gc.collect()
|
| 397 |
global models
|
| 398 |
-
pipe = models
|
| 399 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 400 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 401 |
-
#
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
"
|
| 405 |
-
|
| 406 |
-
"
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
|
| 432 |
-
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
| 433 |
-
downscale_path = f"rv50_upscale_{timestamp}.png"
|
| 434 |
-
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
| 435 |
-
upload_to_ftp(downscale_path)
|
| 436 |
-
image_paths = [save_image(downscale1)]
|
| 437 |
return image_paths, seed
|
| 438 |
|
| 439 |
def load_predefined_images1():
|
|
@@ -474,18 +466,13 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 474 |
placeholder="Enter your prompt",
|
| 475 |
container=False,
|
| 476 |
)
|
|
|
|
| 477 |
run_button_30 = gr.Button("Run 30 Seconds", scale=0)
|
| 478 |
run_button_60 = gr.Button("Run 60 Seconds", scale=0)
|
| 479 |
run_button_90 = gr.Button("Run 90 Seconds", scale=0)
|
| 480 |
result = gr.Gallery(label="Result", columns=1, show_label=False)
|
| 481 |
|
| 482 |
with gr.Row():
|
| 483 |
-
model_choice = gr.Dropdown(
|
| 484 |
-
label="Model Selection🔻",
|
| 485 |
-
choices=list(MODEL_OPTIONS.keys()),
|
| 486 |
-
value="REALVISXL V5.0 BF16"
|
| 487 |
-
)
|
| 488 |
-
|
| 489 |
style_selection = gr.Radio(
|
| 490 |
show_label=True,
|
| 491 |
container=True,
|
|
@@ -512,13 +499,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 512 |
step=1,
|
| 513 |
value=0,
|
| 514 |
)
|
| 515 |
-
denoise = gr.Slider(
|
| 516 |
-
label="Denoising Strength",
|
| 517 |
-
minimum=0.0,
|
| 518 |
-
maximum=1.0,
|
| 519 |
-
step=0.01,
|
| 520 |
-
value=0.3,
|
| 521 |
-
)
|
| 522 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 523 |
with gr.Row():
|
| 524 |
width = gr.Slider(
|
|
@@ -548,7 +528,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 548 |
minimum=10,
|
| 549 |
maximum=1000,
|
| 550 |
step=10,
|
| 551 |
-
value=
|
| 552 |
)
|
| 553 |
|
| 554 |
gr.Examples(
|
|
@@ -571,7 +551,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 571 |
# api_name="generate", # Add this line
|
| 572 |
fn=generate_30,
|
| 573 |
inputs=[
|
| 574 |
-
model_choice,
|
| 575 |
prompt,
|
| 576 |
negative_prompt,
|
| 577 |
use_negative_prompt,
|
|
@@ -582,7 +561,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 582 |
guidance_scale,
|
| 583 |
num_inference_steps,
|
| 584 |
randomize_seed,
|
| 585 |
-
|
| 586 |
],
|
| 587 |
outputs=[result, seed],
|
| 588 |
)
|
|
@@ -594,7 +573,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 594 |
# api_name="generate", # Add this line
|
| 595 |
fn=generate_60,
|
| 596 |
inputs=[
|
| 597 |
-
model_choice,
|
| 598 |
prompt,
|
| 599 |
negative_prompt,
|
| 600 |
use_negative_prompt,
|
|
@@ -605,7 +583,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 605 |
guidance_scale,
|
| 606 |
num_inference_steps,
|
| 607 |
randomize_seed,
|
| 608 |
-
|
| 609 |
],
|
| 610 |
outputs=[result, seed],
|
| 611 |
)
|
|
@@ -617,7 +595,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 617 |
# api_name="generate", # Add this line
|
| 618 |
fn=generate_90,
|
| 619 |
inputs=[
|
| 620 |
-
model_choice,
|
| 621 |
prompt,
|
| 622 |
negative_prompt,
|
| 623 |
use_negative_prompt,
|
|
@@ -628,7 +605,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 628 |
guidance_scale,
|
| 629 |
num_inference_steps,
|
| 630 |
randomize_seed,
|
| 631 |
-
|
| 632 |
],
|
| 633 |
outputs=[result, seed],
|
| 634 |
)
|
|
|
|
| 21 |
import datetime
|
| 22 |
from gradio import themes
|
| 23 |
from image_gen_aux import UpscaleWithModel
|
| 24 |
+
from ip_adapter import IPAdapterXL
|
| 25 |
+
|
| 26 |
+
from huggingface_hub import snapshot_download
|
| 27 |
|
| 28 |
torch.backends.cuda.matmul.allow_tf32 = False
|
| 29 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
|
|
| 52 |
]
|
| 53 |
|
| 54 |
MODEL_OPTIONS = {
|
| 55 |
+
"REALVISXL V5.0 BF16": "ford442/RealVisXL_V5.0_BF16",
|
| 56 |
}
|
| 57 |
|
| 58 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
|
|
|
|
| 90 |
STYLE_NAMES = list(styles.keys())
|
| 91 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 92 |
|
| 93 |
+
## load IP Adapter
|
| 94 |
+
repo_id = "ford442/SDXL-IP_ADAPTER"
|
| 95 |
+
subfolder = "image_encoder"
|
| 96 |
+
subfolder2 = "ip_adapter"
|
| 97 |
+
local_repo_path = snapshot_download(repo_id=repo_id, repo_type="model")
|
| 98 |
+
local_folder = os.path.join(local_repo_path, subfolder)
|
| 99 |
+
local_folder2 = os.path.join(local_repo_path, subfolder2) # Path to the ip_adapter dir
|
| 100 |
+
ip_ckpt = os.path.join(local_folder2, "ip-adapter_sdxl_vit-h.bin") # Correct path
|
| 101 |
+
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
| 102 |
+
|
| 103 |
#sampling_schedule = AysSchedules["StableDiffusionXLTimesteps"]
|
| 104 |
upscaler = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.device("cuda:0"))
|
| 105 |
|
|
|
|
| 112 |
negative = ""
|
| 113 |
return p.replace("{prompt}", positive), n + negative
|
| 114 |
|
| 115 |
+
def load_and_prepare_model():
|
| 116 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
| 117 |
vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None,use_safetensors=False)
|
| 118 |
#vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
|
|
|
|
| 129 |
#pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0").to(torch.bfloat16)
|
| 130 |
#pipeX = StableDiffusionXLPipeline.from_pretrained("ford442/Juggernaut-XI-v11-fp32",use_safetensors=True)
|
| 131 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 132 |
+
'ford442/RealVisXL_V5.0_BF16',
|
| 133 |
#'ford442/Juggernaut-XI-v11-fp32',
|
| 134 |
# 'SG161222/RealVisXL_V5.0',
|
| 135 |
+
#'John6666/uber-realistic-porn-merge-xl-urpmxl-v3-sdxl',
|
| 136 |
#torch_dtype=torch.bfloat16,
|
| 137 |
add_watermarker=False,
|
| 138 |
# custom_pipeline="lpw_stable_diffusion_xl",
|
|
|
|
| 215 |
return pipe
|
| 216 |
|
| 217 |
# Preload and compile both models
|
| 218 |
+
models = {key: load_and_prepare_model()}
|
| 219 |
|
| 220 |
MAX_SEED = np.iinfo(np.int32).max
|
| 221 |
|
|
|
|
| 265 |
|
| 266 |
@spaces.GPU(duration=40)
|
| 267 |
def generate_30(
|
|
|
|
| 268 |
prompt: str,
|
| 269 |
negative_prompt: str = "",
|
| 270 |
use_negative_prompt: bool = False,
|
|
|
|
| 276 |
num_inference_steps: int = 125,
|
| 277 |
randomize_seed: bool = False,
|
| 278 |
use_resolution_binning: bool = True,
|
| 279 |
+
latent_file, # Add latents file input
|
| 280 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 281 |
):
|
| 282 |
#torch.backends.cudnn.benchmark = False
|
| 283 |
#torch.cuda.empty_cache()
|
| 284 |
#gc.collect()
|
| 285 |
global models
|
| 286 |
+
pipe = models
|
| 287 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 288 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 289 |
+
if latent_file: # Check if a latent file is provided
|
| 290 |
+
#sd_image_a = torch.load(latent_file.name) # Load the latent
|
| 291 |
+
sd_image_a = Image.open(latent_file.name)
|
| 292 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 293 |
+
filename= f'rv_IP_{timestamp}.txt'
|
| 294 |
+
print("-- using image file --")
|
| 295 |
+
print('-- generating image --')
|
| 296 |
+
#with torch.no_grad():
|
| 297 |
+
sd_image = ip_model.generate(
|
| 298 |
+
pil_image=sd_image_a,
|
| 299 |
+
prompt=prompt,
|
| 300 |
+
num_samples=1,
|
| 301 |
+
num_inference_steps=num_inference_steps,
|
| 302 |
+
guidance_scale=guidance_scale,
|
| 303 |
+
seed=seed
|
| 304 |
+
)
|
| 305 |
+
sd_image[0].save(filename,optimize=False,compress_level=0)
|
| 306 |
+
upload_to_ftp(filename)
|
| 307 |
+
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
| 308 |
+
image_paths = save_image(sd_image)
|
| 309 |
+
torch.setfloat32_matmul_precision("medium")
|
| 310 |
+
with torch.no_grad():
|
| 311 |
+
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
| 312 |
+
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
| 313 |
+
downscale_path = f"rv50_upscale_{timestamp}.png"
|
| 314 |
+
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
| 315 |
+
upload_to_ftp(downscale_path)
|
| 316 |
+
image_paths = [save_image(downscale1)]
|
| 317 |
+
else:
|
| 318 |
+
print('-- IMAGE REQUIRED --')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 319 |
return image_paths, seed
|
| 320 |
|
| 321 |
@spaces.GPU(duration=60)
|
| 322 |
def generate_60(
|
|
|
|
| 323 |
prompt: str,
|
| 324 |
negative_prompt: str = "",
|
| 325 |
use_negative_prompt: bool = False,
|
|
|
|
| 331 |
num_inference_steps: int = 250,
|
| 332 |
randomize_seed: bool = False,
|
| 333 |
use_resolution_binning: bool = True,
|
| 334 |
+
latent_file, # Add latents file input
|
| 335 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 336 |
):
|
| 337 |
#torch.backends.cudnn.benchmark = True
|
| 338 |
#torch.cuda.empty_cache()
|
| 339 |
#gc.collect()
|
| 340 |
global models
|
| 341 |
+
pipe = models
|
| 342 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 343 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 344 |
+
if latent_file: # Check if a latent file is provided
|
| 345 |
+
#sd_image_a = torch.load(latent_file.name) # Load the latent
|
| 346 |
+
sd_image_a = Image.open(latent_file.name)
|
| 347 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 348 |
+
filename= f'rv_IP_{timestamp}.txt'
|
| 349 |
+
print("-- using image file --")
|
| 350 |
+
print('-- generating image --')
|
| 351 |
+
#with torch.no_grad():
|
| 352 |
+
sd_image = ip_model.generate(
|
| 353 |
+
pil_image=sd_image_a,
|
| 354 |
+
prompt=prompt,
|
| 355 |
+
num_samples=1,
|
| 356 |
+
num_inference_steps=num_inference_steps,
|
| 357 |
+
guidance_scale=guidance_scale,
|
| 358 |
+
seed=seed
|
| 359 |
+
)
|
| 360 |
+
sd_image[0].save(filename,optimize=False,compress_level=0)
|
| 361 |
+
upload_to_ftp(filename)
|
| 362 |
+
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
| 363 |
+
image_paths = save_image(sd_image)
|
| 364 |
+
torch.setfloat32_matmul_precision("medium")
|
| 365 |
+
with torch.no_grad():
|
| 366 |
+
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
| 367 |
+
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
| 368 |
+
downscale_path = f"rv50_upscale_{timestamp}.png"
|
| 369 |
+
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
| 370 |
+
upload_to_ftp(downscale_path)
|
| 371 |
+
image_paths = [save_image(downscale1)]
|
| 372 |
+
else:
|
| 373 |
+
print('-- IMAGE REQUIRED --')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 374 |
return image_paths, seed
|
| 375 |
|
| 376 |
@spaces.GPU(duration=90)
|
| 377 |
def generate_90(
|
|
|
|
| 378 |
prompt: str,
|
| 379 |
negative_prompt: str = "",
|
| 380 |
use_negative_prompt: bool = False,
|
|
|
|
| 386 |
num_inference_steps: int = 250,
|
| 387 |
randomize_seed: bool = False,
|
| 388 |
use_resolution_binning: bool = True,
|
| 389 |
+
latent_file, # Add latents file input
|
| 390 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 391 |
):
|
| 392 |
#torch.backends.cudnn.benchmark = True
|
| 393 |
#torch.cuda.empty_cache()
|
| 394 |
#gc.collect()
|
| 395 |
global models
|
| 396 |
+
pipe = models
|
| 397 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 398 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 399 |
+
if latent_file: # Check if a latent file is provided
|
| 400 |
+
#sd_image_a = torch.load(latent_file.name) # Load the latent
|
| 401 |
+
sd_image_a = Image.open(latent_file.name)
|
| 402 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 403 |
+
filename= f'rv_IP_{timestamp}.txt'
|
| 404 |
+
print("-- using image file --")
|
| 405 |
+
print('-- generating image --')
|
| 406 |
+
#with torch.no_grad():
|
| 407 |
+
sd_image = ip_model.generate(
|
| 408 |
+
pil_image=sd_image_a,
|
| 409 |
+
prompt=prompt,
|
| 410 |
+
num_samples=1,
|
| 411 |
+
num_inference_steps=num_inference_steps,
|
| 412 |
+
guidance_scale=guidance_scale,
|
| 413 |
+
seed=seed
|
| 414 |
+
)
|
| 415 |
+
sd_image[0].save(filename,optimize=False,compress_level=0)
|
| 416 |
+
upload_to_ftp(filename)
|
| 417 |
+
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
| 418 |
+
image_paths = save_image(sd_image)
|
| 419 |
+
torch.setfloat32_matmul_precision("medium")
|
| 420 |
+
with torch.no_grad():
|
| 421 |
+
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
| 422 |
+
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
| 423 |
+
downscale_path = f"rv50_upscale_{timestamp}.png"
|
| 424 |
+
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
| 425 |
+
upload_to_ftp(downscale_path)
|
| 426 |
+
image_paths = [save_image(downscale1)]
|
| 427 |
+
else:
|
| 428 |
+
print('-- IMAGE REQUIRED --')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 429 |
return image_paths, seed
|
| 430 |
|
| 431 |
def load_predefined_images1():
|
|
|
|
| 466 |
placeholder="Enter your prompt",
|
| 467 |
container=False,
|
| 468 |
)
|
| 469 |
+
latent_file = gr.File(label="Image File (Required)")
|
| 470 |
run_button_30 = gr.Button("Run 30 Seconds", scale=0)
|
| 471 |
run_button_60 = gr.Button("Run 60 Seconds", scale=0)
|
| 472 |
run_button_90 = gr.Button("Run 90 Seconds", scale=0)
|
| 473 |
result = gr.Gallery(label="Result", columns=1, show_label=False)
|
| 474 |
|
| 475 |
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 476 |
style_selection = gr.Radio(
|
| 477 |
show_label=True,
|
| 478 |
container=True,
|
|
|
|
| 499 |
step=1,
|
| 500 |
value=0,
|
| 501 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 502 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 503 |
with gr.Row():
|
| 504 |
width = gr.Slider(
|
|
|
|
| 528 |
minimum=10,
|
| 529 |
maximum=1000,
|
| 530 |
step=10,
|
| 531 |
+
value=170,
|
| 532 |
)
|
| 533 |
|
| 534 |
gr.Examples(
|
|
|
|
| 551 |
# api_name="generate", # Add this line
|
| 552 |
fn=generate_30,
|
| 553 |
inputs=[
|
|
|
|
| 554 |
prompt,
|
| 555 |
negative_prompt,
|
| 556 |
use_negative_prompt,
|
|
|
|
| 561 |
guidance_scale,
|
| 562 |
num_inference_steps,
|
| 563 |
randomize_seed,
|
| 564 |
+
latent_file,
|
| 565 |
],
|
| 566 |
outputs=[result, seed],
|
| 567 |
)
|
|
|
|
| 573 |
# api_name="generate", # Add this line
|
| 574 |
fn=generate_60,
|
| 575 |
inputs=[
|
|
|
|
| 576 |
prompt,
|
| 577 |
negative_prompt,
|
| 578 |
use_negative_prompt,
|
|
|
|
| 583 |
guidance_scale,
|
| 584 |
num_inference_steps,
|
| 585 |
randomize_seed,
|
| 586 |
+
latent_file,
|
| 587 |
],
|
| 588 |
outputs=[result, seed],
|
| 589 |
)
|
|
|
|
| 595 |
# api_name="generate", # Add this line
|
| 596 |
fn=generate_90,
|
| 597 |
inputs=[
|
|
|
|
| 598 |
prompt,
|
| 599 |
negative_prompt,
|
| 600 |
use_negative_prompt,
|
|
|
|
| 605 |
guidance_scale,
|
| 606 |
num_inference_steps,
|
| 607 |
randomize_seed,
|
| 608 |
+
latent_file,
|
| 609 |
],
|
| 610 |
outputs=[result, seed],
|
| 611 |
)
|