Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -34,14 +34,10 @@ def run(image, src_style, src_prompt, prompts, shared_score_shift, shared_score_
|
|
| 34 |
g_cpu.manual_seed(seed)
|
| 35 |
latents = torch.randn(len(prompts), 4, d, d, device='cpu', generator=g_cpu, dtype=pipeline.unet.dtype,).to(device)
|
| 36 |
latents[0] = zT
|
| 37 |
-
|
| 38 |
images_a = pipeline(prompts, latents=latents, callback_on_step_end=inversion_callback, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images
|
| 39 |
-
|
| 40 |
handler.remove()
|
| 41 |
torch.cuda.empty_cache()
|
| 42 |
-
|
| 43 |
-
return images_pil
|
| 44 |
-
|
| 45 |
|
| 46 |
with gr.Blocks() as demo:
|
| 47 |
gr.Markdown('''# Welcome to Tonic's Stable Style Align
|
|
|
|
| 34 |
g_cpu.manual_seed(seed)
|
| 35 |
latents = torch.randn(len(prompts), 4, d, d, device='cpu', generator=g_cpu, dtype=pipeline.unet.dtype,).to(device)
|
| 36 |
latents[0] = zT
|
|
|
|
| 37 |
images_a = pipeline(prompts, latents=latents, callback_on_step_end=inversion_callback, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images
|
|
|
|
| 38 |
handler.remove()
|
| 39 |
torch.cuda.empty_cache()
|
| 40 |
+
return images_a
|
|
|
|
|
|
|
| 41 |
|
| 42 |
with gr.Blocks() as demo:
|
| 43 |
gr.Markdown('''# Welcome to Tonic's Stable Style Align
|