yerang commited on
Commit
11620ae
·
verified ·
1 Parent(s): 97856be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -6
app.py CHANGED
@@ -126,11 +126,28 @@ gradio_pipeline = GradioPipeline(
126
 
127
  # 추가 정의
128
  elevenlabs_pipeline = ElevenLabsPipeline()
129
- stf_pipeline = STFPipeline()
 
 
 
 
 
 
 
130
 
131
 
132
- @spaces.GPU() #duration=240)
133
- def gpu_wrapped_stf_pipeline_execute(audio_path):
 
 
 
 
 
 
 
 
 
 
134
  return stf_pipeline.execute(audio_path)
135
 
136
 
@@ -161,9 +178,9 @@ def is_square_video(video_path):
161
  return gr.update(visible=True)
162
 
163
 
164
- def txt_to_driving_video(input_text):
165
  audio_outpath = gpu_wrapped_elevenlabs_pipeline_generate_voice(text=input_text, voice=None)
166
- video_outpath = gpu_wrapped_stf_pipeline_execute(audio_outpath)
167
  return video_outpath
168
 
169
 
@@ -211,6 +228,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
211
  # audio_gen_button = gr.Button("Audio generation", variant="primary")
212
  # with gr.Column():
213
  # txt2video_gen_button = gr.Button("txt2video generation", variant="primary")
 
214
  txt2video_gen_button = gr.Button("txt2video generation", variant="primary")
215
 
216
  #with gr.Column():
@@ -314,7 +332,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
314
  txt2video_gen_button.click(
315
  fn=txt_to_driving_video,
316
  inputs=[
317
- script_txt
318
  ],
319
  outputs=[video_input],
320
  show_progress=True
 
126
 
127
  # 추가 정의
128
  elevenlabs_pipeline = ElevenLabsPipeline()
129
+ #stf_pipeline = STFPipeline()
130
+ stf_pipeline_female = STFPipeline()
131
+ stf_pipeline_male = STFPipeline(
132
+ template_video_path="TEMP/Cam2_2309071202_0012_Natural_Looped.mp4",
133
+ config_path="front_config_v3.json",
134
+ checkpoint_path="TEMP/0157.pth",
135
+ female_video=False
136
+ )
137
 
138
 
139
+
140
+ # @spaces.GPU() #duration=240)
141
+ # def gpu_wrapped_stf_pipeline_execute(audio_path):
142
+ # return stf_pipeline.execute(audio_path)
143
+
144
+ @spaces.GPU()
145
+ def gpu_wrapped_stf_pipeline_execute(audio_path, video_type):
146
+ if video_type == "Female video":
147
+ stf_pipeline = stf_pipeline_female
148
+ else:
149
+ stf_pipeline = stf_pipeline_male
150
+
151
  return stf_pipeline.execute(audio_path)
152
 
153
 
 
178
  return gr.update(visible=True)
179
 
180
 
181
+ def txt_to_driving_video(input_text, video_type):
182
  audio_outpath = gpu_wrapped_elevenlabs_pipeline_generate_voice(text=input_text, voice=None)
183
+ video_outpath = gpu_wrapped_stf_pipeline_execute(audio_outpath, video_type)
184
  return video_outpath
185
 
186
 
 
228
  # audio_gen_button = gr.Button("Audio generation", variant="primary")
229
  # with gr.Column():
230
  # txt2video_gen_button = gr.Button("txt2video generation", variant="primary")
231
+ video_type = gr.Radio(choices=["Female video", "Male video"], label="Select video type", value="Female video")
232
  txt2video_gen_button = gr.Button("txt2video generation", variant="primary")
233
 
234
  #with gr.Column():
 
332
  txt2video_gen_button.click(
333
  fn=txt_to_driving_video,
334
  inputs=[
335
+ script_txt, video_type
336
  ],
337
  outputs=[video_input],
338
  show_progress=True