Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -267,6 +267,7 @@ def generate_30(
|
|
267 |
num_inference_steps: int = 125,
|
268 |
latent_file = gr.File(), # Add latents file input
|
269 |
latent_file_2 = gr.File(), # Add latents file input
|
|
|
270 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
271 |
):
|
272 |
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
@@ -274,6 +275,10 @@ def generate_30(
|
|
274 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
275 |
if latent_file is not None: # Check if a latent file is provided
|
276 |
sd_image_a = Image.open(latent_file.name)
|
|
|
|
|
|
|
|
|
277 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
278 |
filename= f'rv_IP_{timestamp}.png'
|
279 |
print("-- using image file --")
|
@@ -315,6 +320,7 @@ def generate_60(
|
|
315 |
num_inference_steps: int = 125,
|
316 |
latent_file = gr.File(), # Add latents file input
|
317 |
latent_file_2 = gr.File(), # Add latents file input
|
|
|
318 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
319 |
):
|
320 |
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
@@ -322,6 +328,10 @@ def generate_60(
|
|
322 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
323 |
if latent_file is not None: # Check if a latent file is provided
|
324 |
sd_image_a = Image.open(latent_file.name)
|
|
|
|
|
|
|
|
|
325 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
326 |
filename= f'rv_IP_{timestamp}.png'
|
327 |
print("-- using image file --")
|
@@ -363,6 +373,7 @@ def generate_90(
|
|
363 |
num_inference_steps: int = 125,
|
364 |
latent_file = gr.File(), # Add latents file input
|
365 |
latent_file_2 = gr.File(), # Add latents file input
|
|
|
366 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
367 |
):
|
368 |
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
@@ -370,6 +381,10 @@ def generate_90(
|
|
370 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
371 |
if latent_file is not None: # Check if a latent file is provided
|
372 |
sd_image_a = Image.open(latent_file.name)
|
|
|
|
|
|
|
|
|
373 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
374 |
filename= f'rv_IP_{timestamp}.png'
|
375 |
print("-- using image file --")
|
@@ -533,6 +548,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
533 |
num_inference_steps,
|
534 |
latent_file,
|
535 |
latent_file_2,
|
|
|
536 |
],
|
537 |
outputs=[result],
|
538 |
)
|
@@ -554,6 +570,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
554 |
num_inference_steps,
|
555 |
latent_file,
|
556 |
latent_file_2,
|
|
|
557 |
],
|
558 |
outputs=[result],
|
559 |
)
|
@@ -575,6 +592,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
575 |
num_inference_steps,
|
576 |
latent_file,
|
577 |
latent_file_2,
|
|
|
578 |
],
|
579 |
outputs=[result],
|
580 |
)
|
|
|
267 |
num_inference_steps: int = 125,
|
268 |
latent_file = gr.File(), # Add latents file input
|
269 |
latent_file_2 = gr.File(), # Add latents file input
|
270 |
+
samples=1,
|
271 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
272 |
):
|
273 |
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
|
|
275 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
276 |
if latent_file is not None: # Check if a latent file is provided
|
277 |
sd_image_a = Image.open(latent_file.name)
|
278 |
+
if latent_file_2 is not None: # Check if a latent file is provided
|
279 |
+
sd_image_b = Image.open(latent_file_2.name)
|
280 |
+
else:
|
281 |
+
sd_image_b = None
|
282 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
283 |
filename= f'rv_IP_{timestamp}.png'
|
284 |
print("-- using image file --")
|
|
|
320 |
num_inference_steps: int = 125,
|
321 |
latent_file = gr.File(), # Add latents file input
|
322 |
latent_file_2 = gr.File(), # Add latents file input
|
323 |
+
samples=1,
|
324 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
325 |
):
|
326 |
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
|
|
328 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
329 |
if latent_file is not None: # Check if a latent file is provided
|
330 |
sd_image_a = Image.open(latent_file.name)
|
331 |
+
if latent_file_2 is not None: # Check if a latent file is provided
|
332 |
+
sd_image_b = Image.open(latent_file_2.name)
|
333 |
+
else:
|
334 |
+
sd_image_b = None
|
335 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
336 |
filename= f'rv_IP_{timestamp}.png'
|
337 |
print("-- using image file --")
|
|
|
373 |
num_inference_steps: int = 125,
|
374 |
latent_file = gr.File(), # Add latents file input
|
375 |
latent_file_2 = gr.File(), # Add latents file input
|
376 |
+
samples=1,
|
377 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
378 |
):
|
379 |
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
|
|
381 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
382 |
if latent_file is not None: # Check if a latent file is provided
|
383 |
sd_image_a = Image.open(latent_file.name)
|
384 |
+
if latent_file_2 is not None: # Check if a latent file is provided
|
385 |
+
sd_image_b = Image.open(latent_file_2.name)
|
386 |
+
else:
|
387 |
+
sd_image_b = None
|
388 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
389 |
filename= f'rv_IP_{timestamp}.png'
|
390 |
print("-- using image file --")
|
|
|
548 |
num_inference_steps,
|
549 |
latent_file,
|
550 |
latent_file_2,
|
551 |
+
samples,
|
552 |
],
|
553 |
outputs=[result],
|
554 |
)
|
|
|
570 |
num_inference_steps,
|
571 |
latent_file,
|
572 |
latent_file_2,
|
573 |
+
samples,
|
574 |
],
|
575 |
outputs=[result],
|
576 |
)
|
|
|
592 |
num_inference_steps,
|
593 |
latent_file,
|
594 |
latent_file_2,
|
595 |
+
samples,
|
596 |
],
|
597 |
outputs=[result],
|
598 |
)
|