qqwjq1981 commited on
Commit
82831f3
·
verified ·
1 Parent(s): 4e48d07

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -125,14 +125,14 @@ def handle_feedback(feedback):
125
  conn.commit()
126
  return "Thank you for your feedback!", None
127
 
128
- def segment_background_audio(audio_path, output_path="background_segments.wav"):
129
 
130
  """
131
  Detects and extracts non-speech (background) segments from audio using pyannote VAD.
132
 
133
  Parameters:
134
  - audio_path (str): Path to input audio (.wav).
135
- - output_path (str): Path to save the output non-speech audio.
136
  - hf_token (str): Hugging Face auth token for pyannote.
137
 
138
  Returns:
@@ -171,8 +171,8 @@ def segment_background_audio(audio_path, output_path="background_segments.wav"):
171
  non_speech_audio += segment
172
 
173
  # Step 6: Export the non-speech audio
174
- non_speech_audio.export(output_path, format="wav")
175
- print(f"🎵 Non-speech audio saved to: {output_path}")
176
 
177
  return background_segments
178
 
@@ -433,7 +433,7 @@ def process_entry(entry, i, tts_model, video_width, video_height, process_mode,
433
  else:
434
  generate_voiceover_OpenAI(entry['translated'], target_language, desired_speed, segment_audio_path)
435
 
436
- if not output_path or not os.path.exists(segment_audio_path):
437
  raise FileNotFoundError(f"Voiceover file not generated at: {segment_audio_path}")
438
 
439
  audio_clip = AudioFileClip(segment_audio_path)
 
125
  conn.commit()
126
  return "Thank you for your feedback!", None
127
 
128
+ def segment_background_audio(audio_path, segment_audio_path="background_segments.wav"):
129
 
130
  """
131
  Detects and extracts non-speech (background) segments from audio using pyannote VAD.
132
 
133
  Parameters:
134
  - audio_path (str): Path to input audio (.wav).
135
+ - segment_audio_path (str): Path to save the output non-speech audio.
136
  - hf_token (str): Hugging Face auth token for pyannote.
137
 
138
  Returns:
 
171
  non_speech_audio += segment
172
 
173
  # Step 6: Export the non-speech audio
174
+ non_speech_audio.export(segment_audio_path, format="wav")
175
+ print(f"🎵 Non-speech audio saved to: {segment_audio_path}")
176
 
177
  return background_segments
178
 
 
433
  else:
434
  generate_voiceover_OpenAI(entry['translated'], target_language, desired_speed, segment_audio_path)
435
 
436
+ if not segment_audio_path or not os.path.exists(segment_audio_path):
437
  raise FileNotFoundError(f"Voiceover file not generated at: {segment_audio_path}")
438
 
439
  audio_clip = AudioFileClip(segment_audio_path)