Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -239,7 +239,6 @@ def generate_30(
|
|
239 |
num_inference_steps: int = 125,
|
240 |
randomize_seed: bool = False,
|
241 |
use_resolution_binning: bool = True,
|
242 |
-
num_images: int = 1,
|
243 |
juggernaut: bool = False,
|
244 |
denoise: float = 0.3,
|
245 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
@@ -255,7 +254,7 @@ def generate_30(
|
|
255 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
256 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
257 |
options = {
|
258 |
-
"prompt": [prompt]
|
259 |
"negative_prompt": [negative_prompt],
|
260 |
"negative_prompt_2": [neg_prompt_2],
|
261 |
"strength": denoise,
|
@@ -273,16 +272,12 @@ def generate_30(
|
|
273 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
274 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
275 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
276 |
-
|
277 |
-
|
278 |
-
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
279 |
-
if "negative_prompt" in batch_options:
|
280 |
-
batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
|
281 |
-
images.extend(pipe(**batch_options).images)
|
282 |
sd_image_path = f"rv50_B_{seed}.png"
|
283 |
-
|
284 |
upload_to_ftp(sd_image_path)
|
285 |
-
image_paths =
|
286 |
torch.cuda.empty_cache()
|
287 |
gc.collect()
|
288 |
return image_paths, seed
|
@@ -301,7 +296,6 @@ def generate_60(
|
|
301 |
num_inference_steps: int = 250,
|
302 |
randomize_seed: bool = False,
|
303 |
use_resolution_binning: bool = True,
|
304 |
-
num_images: int = 1,
|
305 |
juggernaut: bool = False,
|
306 |
denoise: float = 0.3,
|
307 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
@@ -317,7 +311,7 @@ def generate_60(
|
|
317 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
318 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
319 |
options = {
|
320 |
-
"prompt": [prompt]
|
321 |
"negative_prompt": [negative_prompt],
|
322 |
"negative_prompt_2": [neg_prompt_2],
|
323 |
"strength": denoise,
|
@@ -335,20 +329,16 @@ def generate_60(
|
|
335 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
336 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
337 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
338 |
-
|
339 |
-
|
340 |
-
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
341 |
-
if "negative_prompt" in batch_options:
|
342 |
-
batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
|
343 |
-
images.extend(pipe(**batch_options).images)
|
344 |
sd_image_path = f"rv50_B_{seed}.png"
|
345 |
-
|
346 |
upload_to_ftp(sd_image_path)
|
347 |
-
image_paths =
|
348 |
torch.cuda.empty_cache()
|
349 |
gc.collect()
|
350 |
return image_paths, seed
|
351 |
-
|
352 |
@spaces.GPU(duration=90)
|
353 |
def generate_90(
|
354 |
model_choice: str,
|
@@ -363,7 +353,6 @@ def generate_90(
|
|
363 |
num_inference_steps: int = 250,
|
364 |
randomize_seed: bool = False,
|
365 |
use_resolution_binning: bool = True,
|
366 |
-
num_images: int = 1,
|
367 |
juggernaut: bool = False,
|
368 |
denoise: float = 0.3,
|
369 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
@@ -379,7 +368,7 @@ def generate_90(
|
|
379 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
380 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
381 |
options = {
|
382 |
-
"prompt": [prompt]
|
383 |
"negative_prompt": [negative_prompt],
|
384 |
"negative_prompt_2": [neg_prompt_2],
|
385 |
"strength": denoise,
|
@@ -397,16 +386,12 @@ def generate_90(
|
|
397 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
398 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
399 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
400 |
-
|
401 |
-
|
402 |
-
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
403 |
-
if "negative_prompt" in batch_options:
|
404 |
-
batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
|
405 |
-
images.extend(pipe(**batch_options).images)
|
406 |
sd_image_path = f"rv50_B_{seed}.png"
|
407 |
-
|
408 |
upload_to_ftp(sd_image_path)
|
409 |
-
image_paths =
|
410 |
torch.cuda.empty_cache()
|
411 |
gc.collect()
|
412 |
return image_paths, seed
|
@@ -469,13 +454,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
469 |
value=DEFAULT_STYLE_NAME,
|
470 |
label="Quality Style",
|
471 |
)
|
472 |
-
num_images = gr.Slider(
|
473 |
-
label="Number of Images",
|
474 |
-
minimum=1,
|
475 |
-
maximum=5,
|
476 |
-
step=1,
|
477 |
-
value=1,
|
478 |
-
)
|
479 |
with gr.Row():
|
480 |
with gr.Column(scale=1):
|
481 |
use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
|
@@ -565,7 +543,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
565 |
guidance_scale,
|
566 |
num_inference_steps,
|
567 |
randomize_seed,
|
568 |
-
num_images,
|
569 |
juggernaut,
|
570 |
denoise
|
571 |
],
|
@@ -590,7 +567,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
590 |
guidance_scale,
|
591 |
num_inference_steps,
|
592 |
randomize_seed,
|
593 |
-
num_images,
|
594 |
juggernaut,
|
595 |
denoise
|
596 |
],
|
@@ -615,7 +591,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
615 |
guidance_scale,
|
616 |
num_inference_steps,
|
617 |
randomize_seed,
|
618 |
-
num_images,
|
619 |
juggernaut,
|
620 |
denoise
|
621 |
],
|
|
|
239 |
num_inference_steps: int = 125,
|
240 |
randomize_seed: bool = False,
|
241 |
use_resolution_binning: bool = True,
|
|
|
242 |
juggernaut: bool = False,
|
243 |
denoise: float = 0.3,
|
244 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
|
|
254 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
255 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
256 |
options = {
|
257 |
+
"prompt": [prompt],
|
258 |
"negative_prompt": [negative_prompt],
|
259 |
"negative_prompt_2": [neg_prompt_2],
|
260 |
"strength": denoise,
|
|
|
272 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
273 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
274 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
275 |
+
batch_options = options.copy()
|
276 |
+
rv_image = images.extend(pipe(**batch_options).images[0])
|
|
|
|
|
|
|
|
|
277 |
sd_image_path = f"rv50_B_{seed}.png"
|
278 |
+
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
279 |
upload_to_ftp(sd_image_path)
|
280 |
+
image_paths = save_image(rv_image)
|
281 |
torch.cuda.empty_cache()
|
282 |
gc.collect()
|
283 |
return image_paths, seed
|
|
|
296 |
num_inference_steps: int = 250,
|
297 |
randomize_seed: bool = False,
|
298 |
use_resolution_binning: bool = True,
|
|
|
299 |
juggernaut: bool = False,
|
300 |
denoise: float = 0.3,
|
301 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
|
|
311 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
312 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
313 |
options = {
|
314 |
+
"prompt": [prompt],
|
315 |
"negative_prompt": [negative_prompt],
|
316 |
"negative_prompt_2": [neg_prompt_2],
|
317 |
"strength": denoise,
|
|
|
329 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
330 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
331 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
332 |
+
batch_options = options.copy()
|
333 |
+
rv_image = images.extend(pipe(**batch_options).images[0])
|
|
|
|
|
|
|
|
|
334 |
sd_image_path = f"rv50_B_{seed}.png"
|
335 |
+
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
336 |
upload_to_ftp(sd_image_path)
|
337 |
+
image_paths = save_image(rv_image)
|
338 |
torch.cuda.empty_cache()
|
339 |
gc.collect()
|
340 |
return image_paths, seed
|
341 |
+
|
342 |
@spaces.GPU(duration=90)
|
343 |
def generate_90(
|
344 |
model_choice: str,
|
|
|
353 |
num_inference_steps: int = 250,
|
354 |
randomize_seed: bool = False,
|
355 |
use_resolution_binning: bool = True,
|
|
|
356 |
juggernaut: bool = False,
|
357 |
denoise: float = 0.3,
|
358 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
|
|
368 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
369 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
370 |
options = {
|
371 |
+
"prompt": [prompt],
|
372 |
"negative_prompt": [negative_prompt],
|
373 |
"negative_prompt_2": [neg_prompt_2],
|
374 |
"strength": denoise,
|
|
|
386 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
387 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
388 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
389 |
+
batch_options = options.copy()
|
390 |
+
rv_image = images.extend(pipe(**batch_options).images[0])
|
|
|
|
|
|
|
|
|
391 |
sd_image_path = f"rv50_B_{seed}.png"
|
392 |
+
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
393 |
upload_to_ftp(sd_image_path)
|
394 |
+
image_paths = save_image(rv_image)
|
395 |
torch.cuda.empty_cache()
|
396 |
gc.collect()
|
397 |
return image_paths, seed
|
|
|
454 |
value=DEFAULT_STYLE_NAME,
|
455 |
label="Quality Style",
|
456 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
457 |
with gr.Row():
|
458 |
with gr.Column(scale=1):
|
459 |
use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
|
|
|
543 |
guidance_scale,
|
544 |
num_inference_steps,
|
545 |
randomize_seed,
|
|
|
546 |
juggernaut,
|
547 |
denoise
|
548 |
],
|
|
|
567 |
guidance_scale,
|
568 |
num_inference_steps,
|
569 |
randomize_seed,
|
|
|
570 |
juggernaut,
|
571 |
denoise
|
572 |
],
|
|
|
591 |
guidance_scale,
|
592 |
num_inference_steps,
|
593 |
randomize_seed,
|
|
|
594 |
juggernaut,
|
595 |
denoise
|
596 |
],
|