Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -153,40 +153,8 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
| 153 |
seed = random.randint(0, MAX_SEED)
|
| 154 |
return seed
|
| 155 |
|
| 156 |
-
GPU_DURATION_OPTIONS = {
|
| 157 |
-
"Short (25s)": 25,
|
| 158 |
-
"Short (45s)": 45,
|
| 159 |
-
"Medium (60s)": 60,
|
| 160 |
-
"Medium (80s)": 80,
|
| 161 |
-
"Long (100s)": 100,
|
| 162 |
-
"Long (120s)": 120,
|
| 163 |
-
"Long (140s)": 140,
|
| 164 |
-
}
|
| 165 |
-
|
| 166 |
-
def set_gpu_duration(duration_choice):
|
| 167 |
-
os.environ["GPU_DURATION"] = str(GPU_DURATION_OPTIONS[duration_choice])
|
| 168 |
-
|
| 169 |
-
@spaces.GPU(duration=45)
|
| 170 |
-
def generate_45s(*args, **kwargs):
|
| 171 |
-
progress = kwargs.pop('progress', gr.Progress(track_tqdm=True))
|
| 172 |
-
return generate(*args, **kwargs, progress=progress) # Pass progress to generate
|
| 173 |
-
|
| 174 |
@spaces.GPU(duration=60)
|
| 175 |
-
def
|
| 176 |
-
progress = kwargs.pop('progress', gr.Progress(track_tqdm=True))
|
| 177 |
-
return generate(*args, **kwargs, progress=progress) # Pass progress to generate
|
| 178 |
-
|
| 179 |
-
@spaces.GPU(duration=80)
|
| 180 |
-
def generate_80s(*args, **kwargs):
|
| 181 |
-
progress = kwargs.pop('progress', gr.Progress(track_tqdm=True))
|
| 182 |
-
return generate(*args, **kwargs, progress=progress) # Pass progress to generate
|
| 183 |
-
|
| 184 |
-
@spaces.GPU(duration=100)
|
| 185 |
-
def generate_100s(*args, **kwargs):
|
| 186 |
-
progress = kwargs.pop('progress', gr.Progress(track_tqdm=True))
|
| 187 |
-
return generate(*args, **kwargs, progress=progress) # Pass progress to generate
|
| 188 |
-
|
| 189 |
-
def generate(
|
| 190 |
model_choice: str,
|
| 191 |
prompt: str,
|
| 192 |
negative_prompt: str = "",
|
|
@@ -239,7 +207,9 @@ def generate(
|
|
| 239 |
gc.collect()
|
| 240 |
return image_paths, seed
|
| 241 |
|
| 242 |
-
|
|
|
|
|
|
|
| 243 |
model_choice: str,
|
| 244 |
prompt: str,
|
| 245 |
negative_prompt: str = "",
|
|
@@ -248,19 +218,17 @@ def generate_cpu(
|
|
| 248 |
seed: int = 1,
|
| 249 |
width: int = 768,
|
| 250 |
height: int = 768,
|
| 251 |
-
guidance_scale: float = 3,
|
| 252 |
num_inference_steps: int = 250,
|
| 253 |
randomize_seed: bool = False,
|
| 254 |
use_resolution_binning: bool = True,
|
| 255 |
num_images: int = 1,
|
| 256 |
-
progress=gr.Progress(track_tqdm=True)
|
| 257 |
):
|
| 258 |
global models
|
| 259 |
pipe = models[model_choice]
|
| 260 |
-
pipe.to("cpu")
|
| 261 |
-
|
| 262 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 263 |
-
generator = torch.Generator(device='
|
| 264 |
|
| 265 |
prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
| 266 |
|
|
@@ -279,20 +247,21 @@ def generate_cpu(
|
|
| 279 |
options["use_resolution_binning"] = True
|
| 280 |
|
| 281 |
images = []
|
|
|
|
| 282 |
for i in range(0, num_images, BATCH_SIZE):
|
| 283 |
batch_options = options.copy()
|
| 284 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
| 285 |
if "negative_prompt" in batch_options:
|
| 286 |
batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
|
| 287 |
images.extend(pipe(**batch_options).images)
|
| 288 |
-
|
|
|
|
|
|
|
| 289 |
image_paths = [save_image(img) for img in images]
|
|
|
|
|
|
|
| 290 |
return image_paths, seed
|
| 291 |
|
| 292 |
-
def set_gpu_duration(duration_choice):
|
| 293 |
-
global global_gpu_duration
|
| 294 |
-
global_gpu_duration = GPU_DURATION_OPTIONS[duration_choice]
|
| 295 |
-
|
| 296 |
def load_predefined_images1():
|
| 297 |
predefined_images1 = [
|
| 298 |
"assets/7.png",
|
|
@@ -333,11 +302,8 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
| 333 |
placeholder="Enter your prompt",
|
| 334 |
container=False,
|
| 335 |
)
|
| 336 |
-
run_button_45 = gr.Button("Run_45", scale=0)
|
| 337 |
-
cpu_run_button = gr.Button("CPU Run", scale=0)
|
| 338 |
run_button_60 = gr.Button("Run_60", scale=0)
|
| 339 |
-
run_button_80 = gr.Button("
|
| 340 |
-
run_button_100 = gr.Button("Run_100", scale=0)
|
| 341 |
result = gr.Gallery(label="Result", columns=1, show_label=False)
|
| 342 |
|
| 343 |
with gr.Row():
|
|
@@ -347,15 +313,6 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
| 347 |
value="REALVISXL V5.0 BF16"
|
| 348 |
)
|
| 349 |
|
| 350 |
-
with gr.Accordion("Advanced options", open=False, visible=True):
|
| 351 |
-
gpu_duration = gr.Dropdown(
|
| 352 |
-
label="GPU Duration",
|
| 353 |
-
choices=list(GPU_DURATION_OPTIONS.keys()),
|
| 354 |
-
value="Medium (60s)" # Default value
|
| 355 |
-
)
|
| 356 |
-
|
| 357 |
-
gpu_duration.change(fn=set_gpu_duration, inputs=gpu_duration, outputs=[])
|
| 358 |
-
|
| 359 |
style_selection = gr.Radio(
|
| 360 |
show_label=True,
|
| 361 |
container=True,
|
|
@@ -434,58 +391,12 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
| 434 |
api_name=False,
|
| 435 |
)
|
| 436 |
|
| 437 |
-
gr.on(
|
| 438 |
-
triggers=[
|
| 439 |
-
run_button_100.click,
|
| 440 |
-
],
|
| 441 |
-
# api_name="generate", # Add this line
|
| 442 |
-
fn=generate_100s,
|
| 443 |
-
inputs=[
|
| 444 |
-
model_choice,
|
| 445 |
-
prompt,
|
| 446 |
-
negative_prompt,
|
| 447 |
-
use_negative_prompt,
|
| 448 |
-
style_selection,
|
| 449 |
-
seed,
|
| 450 |
-
width,
|
| 451 |
-
height,
|
| 452 |
-
guidance_scale,
|
| 453 |
-
num_inference_steps,
|
| 454 |
-
randomize_seed,
|
| 455 |
-
num_images,
|
| 456 |
-
],
|
| 457 |
-
outputs=[result, seed],
|
| 458 |
-
)
|
| 459 |
-
|
| 460 |
-
gr.on(
|
| 461 |
-
triggers=[
|
| 462 |
-
run_button_80.click,
|
| 463 |
-
],
|
| 464 |
-
# api_name="generate", # Add this line
|
| 465 |
-
fn=generate_80s,
|
| 466 |
-
inputs=[
|
| 467 |
-
model_choice,
|
| 468 |
-
prompt,
|
| 469 |
-
negative_prompt,
|
| 470 |
-
use_negative_prompt,
|
| 471 |
-
style_selection,
|
| 472 |
-
seed,
|
| 473 |
-
width,
|
| 474 |
-
height,
|
| 475 |
-
guidance_scale,
|
| 476 |
-
num_inference_steps,
|
| 477 |
-
randomize_seed,
|
| 478 |
-
num_images,
|
| 479 |
-
],
|
| 480 |
-
outputs=[result, seed],
|
| 481 |
-
)
|
| 482 |
-
|
| 483 |
gr.on(
|
| 484 |
triggers=[
|
| 485 |
run_button_60.click,
|
| 486 |
],
|
| 487 |
# api_name="generate", # Add this line
|
| 488 |
-
fn=
|
| 489 |
inputs=[
|
| 490 |
model_choice,
|
| 491 |
prompt,
|
|
@@ -502,13 +413,13 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
| 502 |
],
|
| 503 |
outputs=[result, seed],
|
| 504 |
)
|
| 505 |
-
|
| 506 |
gr.on(
|
| 507 |
triggers=[
|
| 508 |
-
|
| 509 |
],
|
| 510 |
# api_name="generate", # Add this line
|
| 511 |
-
fn=
|
| 512 |
inputs=[
|
| 513 |
model_choice,
|
| 514 |
prompt,
|
|
|
|
| 153 |
seed = random.randint(0, MAX_SEED)
|
| 154 |
return seed
|
| 155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
@spaces.GPU(duration=60)
|
| 157 |
+
def generate_60(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
model_choice: str,
|
| 159 |
prompt: str,
|
| 160 |
negative_prompt: str = "",
|
|
|
|
| 207 |
gc.collect()
|
| 208 |
return image_paths, seed
|
| 209 |
|
| 210 |
+
|
| 211 |
+
@spaces.GPU(duration=90)
|
| 212 |
+
def generate_90(
|
| 213 |
model_choice: str,
|
| 214 |
prompt: str,
|
| 215 |
negative_prompt: str = "",
|
|
|
|
| 218 |
seed: int = 1,
|
| 219 |
width: int = 768,
|
| 220 |
height: int = 768,
|
| 221 |
+
guidance_scale: float = 3.0,
|
| 222 |
num_inference_steps: int = 250,
|
| 223 |
randomize_seed: bool = False,
|
| 224 |
use_resolution_binning: bool = True,
|
| 225 |
num_images: int = 1,
|
| 226 |
+
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 227 |
):
|
| 228 |
global models
|
| 229 |
pipe = models[model_choice]
|
|
|
|
|
|
|
| 230 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 231 |
+
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 232 |
|
| 233 |
prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
| 234 |
|
|
|
|
| 247 |
options["use_resolution_binning"] = True
|
| 248 |
|
| 249 |
images = []
|
| 250 |
+
#with torch.no_grad():
|
| 251 |
for i in range(0, num_images, BATCH_SIZE):
|
| 252 |
batch_options = options.copy()
|
| 253 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
| 254 |
if "negative_prompt" in batch_options:
|
| 255 |
batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
|
| 256 |
images.extend(pipe(**batch_options).images)
|
| 257 |
+
sd_image_path = f"rv50_{seed}.png"
|
| 258 |
+
images[0].save(sd_image_path,optimize=False,compress_level=0)
|
| 259 |
+
upload_to_ftp(sd_image_path)
|
| 260 |
image_paths = [save_image(img) for img in images]
|
| 261 |
+
torch.cuda.empty_cache()
|
| 262 |
+
gc.collect()
|
| 263 |
return image_paths, seed
|
| 264 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 265 |
def load_predefined_images1():
|
| 266 |
predefined_images1 = [
|
| 267 |
"assets/7.png",
|
|
|
|
| 302 |
placeholder="Enter your prompt",
|
| 303 |
container=False,
|
| 304 |
)
|
|
|
|
|
|
|
| 305 |
run_button_60 = gr.Button("Run_60", scale=0)
|
| 306 |
+
run_button_80 = gr.Button("Run_90", scale=0)
|
|
|
|
| 307 |
result = gr.Gallery(label="Result", columns=1, show_label=False)
|
| 308 |
|
| 309 |
with gr.Row():
|
|
|
|
| 313 |
value="REALVISXL V5.0 BF16"
|
| 314 |
)
|
| 315 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 316 |
style_selection = gr.Radio(
|
| 317 |
show_label=True,
|
| 318 |
container=True,
|
|
|
|
| 391 |
api_name=False,
|
| 392 |
)
|
| 393 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 394 |
gr.on(
|
| 395 |
triggers=[
|
| 396 |
run_button_60.click,
|
| 397 |
],
|
| 398 |
# api_name="generate", # Add this line
|
| 399 |
+
fn=generate_60,
|
| 400 |
inputs=[
|
| 401 |
model_choice,
|
| 402 |
prompt,
|
|
|
|
| 413 |
],
|
| 414 |
outputs=[result, seed],
|
| 415 |
)
|
| 416 |
+
|
| 417 |
gr.on(
|
| 418 |
triggers=[
|
| 419 |
+
run_button_90.click,
|
| 420 |
],
|
| 421 |
# api_name="generate", # Add this line
|
| 422 |
+
fn=generate_90,
|
| 423 |
inputs=[
|
| 424 |
model_choice,
|
| 425 |
prompt,
|