qqwjq1981 commited on
Commit
de6304d
·
verified ·
1 Parent(s): 373bc7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -53
app.py CHANGED
@@ -189,7 +189,6 @@ def translate_text(transcription_json, source_language, target_language):
189
  # Return the translated timestamps as a JSON string
190
  return translated_json
191
 
192
-
193
  def update_translations(file, edited_table):
194
  """
195
  Update the translations based on user edits in the Gradio Dataframe.
@@ -260,7 +259,7 @@ def add_transcript_voiceover(video_path, translated_json, output_path, add_voice
260
  if add_voiceover:
261
  segment_audio_path = f"segment_{i}_voiceover.wav"
262
  generate_voiceover([entry], target_language, segment_audio_path)
263
- audio_segment = AudioFileClip(segment_audio_path).subclip(0, entry["end"] - entry["start"])
264
  audio_segments.append(audio_segment)
265
  else:
266
  raise ValueError(f"Invalid entry format: {entry}")
@@ -271,7 +270,7 @@ def add_transcript_voiceover(video_path, translated_json, output_path, add_voice
271
  # Concatenate all audio segments if voiceover was added
272
  if add_voiceover:
273
  final_audio = sum(audio_segments, AudioFileClip("silent_audio.wav")) # Mix all audio segments
274
- final_audio = final_audio.subclip(0, video.duration) # Ensure the final audio matches the video duration
275
  final_video = final_video.set_audio(final_audio)
276
 
277
  # Write the result to a file
@@ -291,56 +290,6 @@ def generate_voiceover(translated_json, language, output_audio_path):
291
  tts = gTTS(text=full_text, lang=language)
292
  tts.save(output_audio_path)
293
 
294
- def replace_audio_in_video(video_path: str, new_audio_path: str, final_video_path: str):
295
- """
296
- Replace the audio in the video with the provided new audio.
297
- """
298
- try:
299
- # Load the video file
300
- logger.info(f"Loading video from: {video_path}")
301
- video = VideoFileClip(video_path)
302
-
303
- # Load the new audio file
304
- logger.info(f"Loading audio from: {new_audio_path}")
305
- new_audio = AudioFileClip(new_audio_path)
306
-
307
- # Ensure the audio matches the video's duration
308
- audio_duration = new_audio.duration
309
- video_duration = video.duration
310
- if audio_duration < video_duration:
311
- logger.info(f"Audio is shorter than video. Looping audio to match video duration.")
312
- new_audio = new_audio.fx("audio_loop", duration=video_duration)
313
- elif audio_duration > video_duration:
314
- logger.info(f"Audio is longer than video. Truncating audio.")
315
- new_audio = new_audio.subclip(0, video_duration)
316
-
317
- # Set the new audio to the video
318
- logger.info("Replacing video audio...")
319
- video = video.with_audio(new_audio)
320
-
321
- # Save the final video
322
- logger.info(f"Saving the final video to: {final_video_path}")
323
- video.write_videofile(final_video_path, codec="libx264", audio_codec="aac")
324
-
325
- logger.info("Video processing completed successfully.")
326
- except Exception as e:
327
- logger.error(f"Error replacing audio in video: {e}")
328
-
329
- def check_for_time_gaps(translated_json):
330
- """
331
- Ensure there are no gaps in the timestamps, and adjust if necessary.
332
- """
333
- for i in range(1, len(translated_json)):
334
- prev_end = translated_json[i - 1]["end"]
335
- curr_start = translated_json[i]["start"]
336
-
337
- if prev_end > curr_start:
338
- logger.warning(f"Found gap between segments at {i}. Adjusting timestamps.")
339
- # Optionally, adjust the start time of the next segment
340
- translated_json[i]["start"] = prev_end # You can adjust this to smooth the transition
341
-
342
- return translated_json
343
-
344
  def upload_and_manage(file, target_language, mode="transcription"):
345
  if file is None:
346
  logger.info("No file uploaded. Please upload a video/audio file.")
 
189
  # Return the translated timestamps as a JSON string
190
  return translated_json
191
 
 
192
  def update_translations(file, edited_table):
193
  """
194
  Update the translations based on user edits in the Gradio Dataframe.
 
259
  if add_voiceover:
260
  segment_audio_path = f"segment_{i}_voiceover.wav"
261
  generate_voiceover([entry], target_language, segment_audio_path)
262
+ audio_segment = AudioFileClip(segment_audio_path).set_duration(entry["end"] - entry["start"])
263
  audio_segments.append(audio_segment)
264
  else:
265
  raise ValueError(f"Invalid entry format: {entry}")
 
270
  # Concatenate all audio segments if voiceover was added
271
  if add_voiceover:
272
  final_audio = sum(audio_segments, AudioFileClip("silent_audio.wav")) # Mix all audio segments
273
+ final_audio = final_audio.set_duration(video.duration) # Ensure the final audio matches the video duration
274
  final_video = final_video.set_audio(final_audio)
275
 
276
  # Write the result to a file
 
290
  tts = gTTS(text=full_text, lang=language)
291
  tts.save(output_audio_path)
292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
  def upload_and_manage(file, target_language, mode="transcription"):
294
  if file is None:
295
  logger.info("No file uploaded. Please upload a video/audio file.")