yerang commited on
Commit
e14b577
·
verified ·
1 Parent(s): dfc269a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -17
app.py CHANGED
@@ -56,7 +56,7 @@ print("PATH:", os.environ['PATH'])
56
  print("LD_LIBRARY_PATH:", os.environ['LD_LIBRARY_PATH'])
57
 
58
  from stf_utils import STFPipeline
59
- stf_pipeline = STFPipeline()
60
 
61
 
62
  # audio_path="assets/examples/driving/test_aud.mp3"
@@ -202,12 +202,12 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
202
 
203
 
204
  gr.Markdown("### 2. Audio to Driving-Video")
205
- with gr.Row():
206
- audio_path_component = gr.Textbox(label="Input", value="assets/examples/driving/test_aud.mp3")
207
- video_gen_button = gr.Button("Audio to Video generation", variant="primary")
208
- with gr.Row():
209
- #a2v_output = gr.Video()
210
- driving_video_path.render()
211
 
212
 
213
  gr.Markdown("### 3. Image to Talking Video")
@@ -217,7 +217,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
217
  image_input = gr.Image(type="filepath")
218
  gr.Examples(
219
  examples=[
220
- [osp.join(example_portrait_dir, "01.webp")],
221
  [osp.join(example_portrait_dir, "02.webp")],
222
  [osp.join(example_portrait_dir, "03.jpg")],
223
  [osp.join(example_portrait_dir, "04.jpg")],
@@ -310,15 +310,15 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
310
  show_progress=True
311
  )
312
 
313
- video_gen_button.click(
314
- fn=gpu_wrapped_stf_pipeline_execute,
315
- inputs=[
316
- #output_audio
317
- audio_path_component
318
- ],
319
- outputs=[driving_video_path],
320
- show_progress=True
321
- )
322
 
323
 
324
 
 
56
  print("LD_LIBRARY_PATH:", os.environ['LD_LIBRARY_PATH'])
57
 
58
  from stf_utils import STFPipeline
59
+
60
 
61
 
62
  # audio_path="assets/examples/driving/test_aud.mp3"
 
202
 
203
 
204
  gr.Markdown("### 2. Audio to Driving-Video")
205
+ # with gr.Row():
206
+ # audio_path_component = gr.Textbox(label="Input", value="assets/examples/driving/test_aud.mp3")
207
+ # video_gen_button = gr.Button("Audio to Video generation", variant="primary")
208
+ # with gr.Row():
209
+ # #a2v_output = gr.Video()
210
+ # driving_video_path.render()
211
 
212
 
213
  gr.Markdown("### 3. Image to Talking Video")
 
217
  image_input = gr.Image(type="filepath")
218
  gr.Examples(
219
  examples=[
220
+ #[osp.join(example_portrait_dir, "01.webp")],
221
  [osp.join(example_portrait_dir, "02.webp")],
222
  [osp.join(example_portrait_dir, "03.jpg")],
223
  [osp.join(example_portrait_dir, "04.jpg")],
 
310
  show_progress=True
311
  )
312
 
313
+ # video_gen_button.click(
314
+ # fn=gpu_wrapped_stf_pipeline_execute,
315
+ # inputs=[
316
+ # #output_audio
317
+ # audio_path_component
318
+ # ],
319
+ # outputs=[driving_video_path],
320
+ # show_progress=True
321
+ # )
322
 
323
 
324