roychao19477 commited on
Commit
a0b18ac
·
1 Parent(s): fead3e6
Files changed (1) hide show
  1. app.py +4 -13
app.py CHANGED
@@ -60,7 +60,6 @@ import os
60
  import tempfile
61
  from ultralytics import YOLO
62
  from moviepy import ImageSequenceClip
63
- from moviepy.video import fx as vfx
64
  from scipy.io import wavfile
65
  from avse_code import run_avse
66
 
@@ -209,24 +208,16 @@ def extract_faces(video_file):
209
  vn=None # no video
210
  ).run(overwrite_output=True)
211
 
 
 
 
212
  # ------------------------------- #
213
  # AVSE models
214
 
215
  enhanced_audio_path = run_avse_inference(output_path, audio_path)
216
 
217
 
218
- flip_output_path = os.path.join(tmpdir, "flip_face_only_video.mp4")
219
- clip_to_mirror = VideoFileClip(output_path)
220
- mirrored_clip = clip_to_mirror.with_effects([MirrorY().copy()])
221
- mirrored_clip.write_videofile(
222
- flip_output_path,
223
- codec='libx264', # Common H.264 codec
224
- audio_codec='aac' # Common audio codec if your clip has audio
225
- # You can also specify fps if needed: fps=mirrored_clip.fps
226
- )
227
-
228
- return flip_output_path, enhanced_audio_path
229
- #return output_path, enhanced_audio_path
230
  #return output_path, audio_path
231
 
232
  iface = gr.Interface(
 
60
  import tempfile
61
  from ultralytics import YOLO
62
  from moviepy import ImageSequenceClip
 
63
  from scipy.io import wavfile
64
  from avse_code import run_avse
65
 
 
208
  vn=None # no video
209
  ).run(overwrite_output=True)
210
 
211
+
212
+
213
+
214
  # ------------------------------- #
215
  # AVSE models
216
 
217
  enhanced_audio_path = run_avse_inference(output_path, audio_path)
218
 
219
 
220
+ return output_path, enhanced_audio_path
 
 
 
 
 
 
 
 
 
 
 
221
  #return output_path, audio_path
222
 
223
  iface = gr.Interface(