alexnasa commited on
Commit
dec181d
·
verified ·
1 Parent(s): f888cec

session state fixed

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -557,7 +557,7 @@ ADAPTIVE_PROMPT_TEMPLATES = [
557
  "A realistic video of a person speaking and sometimes looking directly to the camera and moving their eyes and pupils and head accordingly and turning and looking at the camera and looking away from the camera based on their movements with dynamic and rhythmic and extensive hand gestures that complement their speech. Their hands are clearly visible, independent, and unobstructed. Their facial expressions are expressive and full of emotion, enhancing the delivery. The camera remains steady, capturing sharp, clear movements and a focused, engaging presence."
558
  ]
559
 
560
- def slider_value_change(image_path, audio_path, text, num_steps, adaptive_text):
561
 
562
  if adaptive_text:
563
 
@@ -568,15 +568,15 @@ def slider_value_change(image_path, audio_path, text, num_steps, adaptive_text):
568
  else:
569
  text = ADAPTIVE_PROMPT_TEMPLATES[2]
570
 
571
- return update_generate_button(image_path, audio_path, text, num_steps), text
572
 
573
 
574
- def update_generate_button(image_path, audio_path, text, num_steps):
575
 
576
  if image_path is None or audio_path is None:
577
  return gr.update(value="⌚ Zero GPU Required: --")
578
 
579
- duration_s = get_duration(image_path, audio_path, text, num_steps, None, None)
580
  duration_m = duration_s / 60
581
 
582
  return gr.update(value=f"⌚ Zero GPU Required: ~{duration_s}.0s ({duration_m:.1f} mins)")
@@ -930,9 +930,9 @@ with gr.Blocks(css=css) as demo:
930
  )
931
 
932
  image_input.upload(fn=preprocess_img, inputs=[image_input, session_state], outputs=[image_input])
933
- image_input.change(fn=update_generate_button, inputs=[image_input, audio_input, text_input, num_steps], outputs=[time_required])
934
- audio_input.change(fn=update_generate_button, inputs=[image_input, audio_input, text_input, num_steps], outputs=[time_required])
935
- num_steps.change(fn=slider_value_change, inputs=[image_input, audio_input, text_input, num_steps, adaptive_text], outputs=[time_required, text_input])
936
  adaptive_text.change(fn=check_box_clicked, inputs=[adaptive_text], outputs=[text_input])
937
  audio_input.upload(fn=apply, inputs=[audio_input], outputs=[audio_input]
938
  ).then(
 
557
  "A realistic video of a person speaking and sometimes looking directly to the camera and moving their eyes and pupils and head accordingly and turning and looking at the camera and looking away from the camera based on their movements with dynamic and rhythmic and extensive hand gestures that complement their speech. Their hands are clearly visible, independent, and unobstructed. Their facial expressions are expressive and full of emotion, enhancing the delivery. The camera remains steady, capturing sharp, clear movements and a focused, engaging presence."
558
  ]
559
 
560
+ def slider_value_change(image_path, audio_path, text, num_steps, session_state, adaptive_text):
561
 
562
  if adaptive_text:
563
 
 
568
  else:
569
  text = ADAPTIVE_PROMPT_TEMPLATES[2]
570
 
571
+ return update_generate_button(image_path, audio_path, text, num_steps, session_state), text
572
 
573
 
574
+ def update_generate_button(image_path, audio_path, text, num_steps, session_state):
575
 
576
  if image_path is None or audio_path is None:
577
  return gr.update(value="⌚ Zero GPU Required: --")
578
 
579
+ duration_s = get_duration(image_path, audio_path, text, num_steps, session_state, None)
580
  duration_m = duration_s / 60
581
 
582
  return gr.update(value=f"⌚ Zero GPU Required: ~{duration_s}.0s ({duration_m:.1f} mins)")
 
930
  )
931
 
932
  image_input.upload(fn=preprocess_img, inputs=[image_input, session_state], outputs=[image_input])
933
+ image_input.change(fn=update_generate_button, inputs=[image_input, audio_input, text_input, num_steps, session_state], outputs=[time_required])
934
+ audio_input.change(fn=update_generate_button, inputs=[image_input, audio_input, text_input, num_steps, session_state], outputs=[time_required])
935
+ num_steps.change(fn=slider_value_change, inputs=[image_input, audio_input, text_input, num_steps, session_state, adaptive_text], outputs=[time_required, text_input])
936
  adaptive_text.change(fn=check_box_clicked, inputs=[adaptive_text], outputs=[text_input])
937
  audio_input.upload(fn=apply, inputs=[audio_input], outputs=[audio_input]
938
  ).then(