multimodalart HF Staff commited on
Commit
8116465
·
verified ·
1 Parent(s): 8268b44

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -16
app.py CHANGED
@@ -39,10 +39,14 @@ FIXED_FPS = 24
39
  MIN_FRAMES_MODEL = 8
40
  MAX_FRAMES_MODEL = 81
41
 
42
- def _calculate_new_dimensions_wan(pil_image: Image.Image, mod_val: int, calculation_max_area: float,
43
- min_slider_h: int, max_slider_h: int,
44
- min_slider_w: int, max_slider_w: int,
45
- default_h: int, default_w: int) -> tuple[int, int]:
 
 
 
 
46
  orig_w, orig_h = pil_image.size
47
  if orig_w <= 0 or orig_h <= 0:
48
  return default_h, default_w
@@ -60,7 +64,7 @@ def _calculate_new_dimensions_wan(pil_image: Image.Image, mod_val: int, calculat
60
 
61
  return new_h, new_w
62
 
63
- def handle_image_upload_for_dims_wan(uploaded_pil_image: Image.Image | None, current_h_val: int, current_w_val: int):
64
  if uploaded_pil_image is None:
65
  return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
66
  try:
@@ -75,10 +79,10 @@ def handle_image_upload_for_dims_wan(uploaded_pil_image: Image.Image | None, cur
75
  return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
76
 
77
  @spaces.GPU
78
- def generate_video(input_image: Image.Image, prompt: str, negative_prompt: str,
79
- height: int, width: int, duration_seconds: float,
80
- guidance_scale: float, steps: int,
81
- seed: int, randomize_seed: bool,
82
  progress=gr.Progress(track_tqdm=True)):
83
 
84
  if input_image is None:
@@ -106,9 +110,6 @@ def generate_video(input_image: Image.Image, prompt: str, negative_prompt: str,
106
  export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
107
  return video_path
108
 
109
- default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
110
- default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature"
111
-
112
  with gr.Blocks() as demo:
113
  gr.Markdown("# Fast 4 steps Wan 2.1 I2V (14B) with CausVid LoRA")
114
  with gr.Row():
@@ -144,16 +145,16 @@ with gr.Blocks() as demo:
144
  )
145
 
146
  ui_inputs = [
147
- input_image_component, prompt_input, negative_prompt_input,
148
- height_input, width_input, duration_seconds_input,
149
  guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
150
  ]
151
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=video_output)
152
 
153
  gr.Examples(
154
  examples=[
155
- ["peng.png", "a penguin playfully dancing in the snow, Antarctica", default_negative_prompt, 896, 512, 2.0, 1.0, 4, 42, False],
156
- ["forg.jpg", "the frog jumps around", default_negative_prompt, 448, 832, 2.0, 1.0, 4, 123, False],
157
  ],
158
  inputs=ui_inputs, outputs=video_output, fn=generate_video, cache_examples="lazy"
159
  )
 
39
  MIN_FRAMES_MODEL = 8
40
  MAX_FRAMES_MODEL = 81
41
 
42
+ default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
43
+ default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature"
44
+
45
+
46
+ def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
47
+ min_slider_h, max_slider_h,
48
+ min_slider_w, max_slider_w,
49
+ default_h, default_w):
50
  orig_w, orig_h = pil_image.size
51
  if orig_w <= 0 or orig_h <= 0:
52
  return default_h, default_w
 
64
 
65
  return new_h, new_w
66
 
67
+ def handle_image_upload_for_dims_wan(uploaded_pil_image, current_h_val, current_w_val):
68
  if uploaded_pil_image is None:
69
  return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
70
  try:
 
79
  return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
80
 
81
  @spaces.GPU
82
+ def generate_video(input_image, prompt, height, width,
83
+ negative_prompt=default_negative_prompt, duration_seconds = 2,
84
+ guidance_scale = 1, steps = 4,
85
+ seed = 42, randomize_seed = False,
86
  progress=gr.Progress(track_tqdm=True)):
87
 
88
  if input_image is None:
 
110
  export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
111
  return video_path
112
 
 
 
 
113
  with gr.Blocks() as demo:
114
  gr.Markdown("# Fast 4 steps Wan 2.1 I2V (14B) with CausVid LoRA")
115
  with gr.Row():
 
145
  )
146
 
147
  ui_inputs = [
148
+ input_image_component, prompt_input, height_input, width_input,
149
+ negative_prompt_input, duration_seconds_input,
150
  guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
151
  ]
152
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=video_output)
153
 
154
  gr.Examples(
155
  examples=[
156
+ ["peng.png", "a penguin playfully dancing in the snow, Antarctica", 896, 512],
157
+ ["forg.jpg", "the frog jumps around", 448, 832],
158
  ],
159
  inputs=ui_inputs, outputs=video_output, fn=generate_video, cache_examples="lazy"
160
  )