vilarin commited on
Commit
bab7b15
·
verified ·
1 Parent(s): a92a2f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -104,8 +104,8 @@ def change_media(image_in, video_in, selected):
104
  @spaces.GPU(duration=120)
105
  def generate(
106
  media,
107
- prompt: str = "best quality",
108
  selected,
 
109
  seed: Optional[int] = -1,
110
  num_inference_steps: int = 5,
111
  animatediff_batch_size: int = 32,
@@ -170,12 +170,12 @@ def generate(
170
 
171
 
172
  examples = [
173
- ['./walking.mp4', "A woman walking on the street", "Diffutoon"],
174
- ['./smilegirl.mp4', "A girl stand on the grass", "Diffutoon"],
175
- ['./working.mp4', "A woman is doing the dishes", "Diffutoon"],
176
- ["./train.jpg", "", "ExVideo"],
177
- ["./girl.webp", "", "ExVideo"],
178
- ["./robo.jpg", "", "ExVideo"],
179
  ]
180
 
181
 
@@ -259,14 +259,14 @@ with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
259
 
260
  gr.Examples(
261
  examples=examples,
262
- inputs=[media, prompt, selected],
263
  outputs=[video, seed],
264
  fn=generate,
265
  cache_examples="lazy",
266
  examples_per_page=4,
267
  )
268
  selected.change(change_media, inputs=[image_in, video_in, selected], outputs=[image_in, video_in, media, prompt])
269
- submit_event = submit_btn.click(fn=generate, inputs=[media, prompt, selected, seed, num_inference_steps, animatediff_batch_size, animatediff_stride, motion_bucket_id, fps_id, num_frames], outputs=[video, seed], api_name="video")
270
  #stop_btn.click(fn=None, inputs=None, outputs=None, cancels=[submit_event])
271
 
272
  demo.queue().launch()
 
104
  @spaces.GPU(duration=120)
105
  def generate(
106
  media,
 
107
  selected,
108
+ prompt: str = "best quality",
109
  seed: Optional[int] = -1,
110
  num_inference_steps: int = 5,
111
  animatediff_batch_size: int = 32,
 
170
 
171
 
172
  examples = [
173
+ ['./walking.mp4', "Diffutoon", "A woman walking on the street"],
174
+ ['./smilegirl.mp4', "Diffutoon", "A girl stand on the grass"],
175
+ ['./working.mp4', "Diffutoon", "A woman is doing the dishes"],
176
+ ["./train.jpg", "ExVideo", ""],
177
+ ["./girl.webp", "ExVideo", ""],
178
+ ["./robo.jpg", "ExVideo", ""],
179
  ]
180
 
181
 
 
259
 
260
  gr.Examples(
261
  examples=examples,
262
+ inputs=[media, selected, prompt],
263
  outputs=[video, seed],
264
  fn=generate,
265
  cache_examples="lazy",
266
  examples_per_page=4,
267
  )
268
  selected.change(change_media, inputs=[image_in, video_in, selected], outputs=[image_in, video_in, media, prompt])
269
+ submit_event = submit_btn.click(fn=generate, inputs=[media, selected, prompt, seed, num_inference_steps, animatediff_batch_size, animatediff_stride, motion_bucket_id, fps_id, num_frames], outputs=[video, seed], api_name="video")
270
  #stop_btn.click(fn=None, inputs=None, outputs=None, cancels=[submit_event])
271
 
272
  demo.queue().launch()