linoyts HF Staff commited on
Commit
aeb50f2
Β·
verified Β·
1 Parent(s): e784d79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -14
app.py CHANGED
@@ -192,7 +192,7 @@ def generate_video(
192
  reference_video,
193
  prompt,
194
  control_type,
195
- current_lora_state,
196
  duration=3.0,
197
  negative_prompt="worst quality, inconsistent motion, blurry, jittery, distorted",
198
  height=768,
@@ -229,14 +229,14 @@ def generate_video(
229
  # Load the appropriate control LoRA and update state
230
  # updated_lora_state = load_control_lora(control_type, current_lora_state)
231
 
232
- # Loads video into a list of pil images
233
- video = load_video(reference_video)
234
  # progress(0.1, desc="Processing video for control...")
235
 
236
  # Process video based on control type
237
- #processed_video = process_video_for_control(video, control_type)
238
 
239
- processed_video = read_video(video) # turns to tensor
240
 
241
  progress(0.2, desc="Preparing generation parameters...")
242
 
@@ -310,11 +310,11 @@ def generate_video(
310
 
311
  progress(1.0, desc="Complete!")
312
 
313
- return output_path, updated_lora_state
314
 
315
  except Exception as e:
316
  print(e)
317
- return None, current_lora_state
318
 
319
  # Create Gradio interface
320
  with gr.Blocks() as demo:
@@ -425,21 +425,41 @@ with gr.Blocks() as demo:
425
  )
426
  control_video = gr.Video(
427
  label="Control Video",
428
- height=400
 
429
  )
430
 
431
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
432
 
433
  # Event handlers
434
  generate_btn.click(
435
- fn = process_video_for_control,
436
- inputs = [reference_video, control_type], outputs = [control_video]).then(
437
  fn=generate_video,
438
  inputs=[
439
- control_video,
440
  prompt,
441
  control_type,
442
- current_lora_state,
443
  duration,
444
  negative_prompt,
445
  height,
@@ -449,7 +469,7 @@ with gr.Blocks() as demo:
449
  seed,
450
  randomize_seed
451
  ],
452
- outputs=[output_video, current_lora_state],
453
  show_progress=True
454
  )
455
 
 
192
  reference_video,
193
  prompt,
194
  control_type,
195
+ # current_lora_state,
196
  duration=3.0,
197
  negative_prompt="worst quality, inconsistent motion, blurry, jittery, distorted",
198
  height=768,
 
229
  # Load the appropriate control LoRA and update state
230
  # updated_lora_state = load_control_lora(control_type, current_lora_state)
231
 
232
+ # # Loads video into a list of pil images
233
+ # video = load_video(reference_video)
234
  # progress(0.1, desc="Processing video for control...")
235
 
236
  # Process video based on control type
237
+ processed_video = process_video_for_control(reference_video, control_type)
238
 
239
+ processed_video = read_video(processed_video) # turns to tensor
240
 
241
  progress(0.2, desc="Preparing generation parameters...")
242
 
 
310
 
311
  progress(1.0, desc="Complete!")
312
 
313
+ return output_path, seed
314
 
315
  except Exception as e:
316
  print(e)
317
+ return None, seed
318
 
319
  # Create Gradio interface
320
  with gr.Blocks() as demo:
 
425
  )
426
  control_video = gr.Video(
427
  label="Control Video",
428
+ height=400,
429
+ visible=False
430
  )
431
 
432
+ gr.Examples(
433
+ examples=[
434
+ ["video_assets/vid_1.mp4", "", "canny", 3, "", 1024, 1024, 7, 1, 0, True],
435
+ ["video_assets/vid_2.mp4", "", "canny", 3, "", 1024, 1024, 7, 1, 0, True],
436
+ ["video_assets/vid_3.mp4", "", "canny", 3, "", 1024, 1024, 7, 1, 0, True],
437
+ ["video_assets/vid_4.mp4", "", "canny", 3, "", 1024, 1024, 7, 1, 0, True],
438
+ ],
439
+ inputs=[reference_video,
440
+ prompt,
441
+ control_type,
442
+ # current_lora_state,
443
+ duration,
444
+ negative_prompt,
445
+ height,
446
+ width,
447
+ num_inference_steps,
448
+ guidance_scale,
449
+ seed,
450
+ randomize_seed],
451
+ outputs=[output_video, seed],
452
+ fn=generate_video, cache_examples="lazy"
453
+ )
454
 
455
  # Event handlers
456
  generate_btn.click(
 
 
457
  fn=generate_video,
458
  inputs=[
459
+ reference_video,
460
  prompt,
461
  control_type,
462
+ # current_lora_state,
463
  duration,
464
  negative_prompt,
465
  height,
 
469
  seed,
470
  randomize_seed
471
  ],
472
+ outputs=[output_video, seed],
473
  show_progress=True
474
  )
475