amos1088 commited on
Commit
e8dd799
·
1 Parent(s): 2fd610d

test gradio

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -3,7 +3,7 @@ from huggingface_hub import login
3
  import os
4
  import spaces,tempfile
5
  import torch
6
- from diffusers import AnimateDiffSparseControlNetPipeline
7
  from diffusers.models import AutoencoderKL, MotionAdapter, SparseControlNetModel
8
  from diffusers.schedulers import DPMSolverMultistepScheduler
9
  from diffusers.utils import export_to_gif, load_image
@@ -68,7 +68,7 @@ scheduler = DPMSolverMultistepScheduler.from_pretrained(
68
  algorithm_type="dpmsolver++",
69
  use_karras_sigmas=True,
70
  )
71
- gif_pipe = AnimateDiffSparseControlNetPipeline.from_pretrained(
72
  model_id,
73
  motion_adapter=motion_adapter,
74
  controlnet=controlnet,
@@ -160,7 +160,7 @@ This way, each frame represents a distinct scene, and there’s no redundancy be
160
  ).frames[0]
161
  export_to_gif(video, "output.gif")
162
 
163
- return "animation.gif"
164
 
165
  # Set up Gradio interface
166
  interface = gr.Interface(
@@ -173,7 +173,7 @@ interface = gr.Interface(
173
  gr.Slider(label="Number of frames", minimum=0, maximum=100.0, step=1.0, value=10.0),
174
 
175
  ],
176
- outputs="image",
177
  title="Image Generation with Stable Diffusion 3 medium and ControlNet",
178
  description="Generates an image based on a text prompt and a reference image using Stable Diffusion 3 medium with ControlNet."
179
 
 
3
  import os
4
  import spaces,tempfile
5
  import torch
6
+ from diffusers import AnimateDiffControlNetPipeline
7
  from diffusers.models import AutoencoderKL, MotionAdapter, SparseControlNetModel
8
  from diffusers.schedulers import DPMSolverMultistepScheduler
9
  from diffusers.utils import export_to_gif, load_image
 
68
  algorithm_type="dpmsolver++",
69
  use_karras_sigmas=True,
70
  )
71
+ gif_pipe = AnimateDiffControlNetPipeline.from_pretrained(
72
  model_id,
73
  motion_adapter=motion_adapter,
74
  controlnet=controlnet,
 
160
  ).frames[0]
161
  export_to_gif(video, "output.gif")
162
 
163
+ return conditioning_frames,"animation.gif"
164
 
165
  # Set up Gradio interface
166
  interface = gr.Interface(
 
173
  gr.Slider(label="Number of frames", minimum=0, maximum=100.0, step=1.0, value=10.0),
174
 
175
  ],
176
+ outputs=["gallery","image"],
177
  title="Image Generation with Stable Diffusion 3 medium and ControlNet",
178
  description="Generates an image based on a text prompt and a reference image using Stable Diffusion 3 medium with ControlNet."
179