linoyts HF Staff commited on
Commit
39627f8
·
verified ·
1 Parent(s): 07bfdfa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -111,8 +111,9 @@ def generate_video(
111
  prompt,
112
  negative_prompt=default_negative_prompt,
113
  duration_seconds = MAX_DURATION,
114
- guidance_scale = 1,
115
- steps = 4,
 
116
  seed = 42,
117
  randomize_seed = False,
118
  progress=gr.Progress(track_tqdm=True),
@@ -133,6 +134,8 @@ def generate_video(
133
  Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
134
  guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
135
  Defaults to 1.0. Range: 0.0-20.0.
 
 
136
  steps (int, optional): Number of inference steps. More steps = higher quality but slower.
137
  Defaults to 4. Range: 1-30.
138
  seed (int, optional): Random seed for reproducible results. Defaults to 42.
@@ -171,6 +174,7 @@ def generate_video(
171
  width=resized_image.width,
172
  num_frames=num_frames,
173
  guidance_scale=float(guidance_scale),
 
174
  num_inference_steps=int(steps),
175
  generator=torch.Generator(device="cuda").manual_seed(current_seed),
176
  ).frames[0]
@@ -196,7 +200,8 @@ with gr.Blocks() as demo:
196
  seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
197
  randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
198
  steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps")
199
- guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
 
200
 
201
  generate_button = gr.Button("Generate Video", variant="primary")
202
  with gr.Column():
@@ -205,7 +210,7 @@ with gr.Blocks() as demo:
205
  ui_inputs = [
206
  input_image_component, prompt_input,
207
  negative_prompt_input, duration_seconds_input,
208
- guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
209
  ]
210
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
211
 
 
111
  prompt,
112
  negative_prompt=default_negative_prompt,
113
  duration_seconds = MAX_DURATION,
114
+ guidance_scale = 2.5,
115
+ guidance_scale_2 = 3.5,
116
+ steps = 6,
117
  seed = 42,
118
  randomize_seed = False,
119
  progress=gr.Progress(track_tqdm=True),
 
134
  Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
135
  guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
136
  Defaults to 1.0. Range: 0.0-20.0.
137
+ guidance_scale_2 (float, optional): Controls adherence to the prompt. Higher values = more adherence.
138
+ Defaults to 1.0. Range: 0.0-20.0.
139
  steps (int, optional): Number of inference steps. More steps = higher quality but slower.
140
  Defaults to 4. Range: 1-30.
141
  seed (int, optional): Random seed for reproducible results. Defaults to 42.
 
174
  width=resized_image.width,
175
  num_frames=num_frames,
176
  guidance_scale=float(guidance_scale),
177
+ guidance_scale_2=float(guidance_scale_2),
178
  num_inference_steps=int(steps),
179
  generator=torch.Generator(device="cuda").manual_seed(current_seed),
180
  ).frames[0]
 
200
  seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
201
  randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
202
  steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps")
203
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=2.5, label="Guidance Scale - high noise stage", visible=False)
204
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=3.5, label="Guidance Scale 2 - low noise stage", visible=False)
205
 
206
  generate_button = gr.Button("Generate Video", variant="primary")
207
  with gr.Column():
 
210
  ui_inputs = [
211
  input_image_component, prompt_input,
212
  negative_prompt_input, duration_seconds_input,
213
+ guidance_scale_input, guidance_scale_2_input, steps_slider, seed_input, randomize_seed_checkbox
214
  ]
215
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
216