Update app.py
Browse files
app.py
CHANGED
@@ -3,14 +3,21 @@ from datetime import datetime
|
|
3 |
import random
|
4 |
from transformers import pipeline
|
5 |
from transformers.pipelines.audio_utils import ffmpeg_read
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
import speech_recognition as sr
|
8 |
import json
|
9 |
from nltk.tokenize import sent_tokenize
|
10 |
|
11 |
def transcribe_video(video_path):
|
12 |
# Load the video file and extract audio
|
13 |
-
video =
|
14 |
audio_path = "audio.wav"
|
15 |
video.audio.write_audiofile(audio_path)
|
16 |
|
@@ -79,14 +86,14 @@ def translate_text(timestamps_json, target_language):
|
|
79 |
|
80 |
def add_transcript_to_video(video_path, timestamps, output_path):
|
81 |
# Load the video file
|
82 |
-
video =
|
83 |
|
84 |
# Create text clips based on timestamps
|
85 |
text_clips = []
|
86 |
|
87 |
for entry in timestamps:
|
88 |
# Create a text clip for each sentence
|
89 |
-
txt_clip =
|
90 |
|
91 |
# Set the start time and duration for each text clip
|
92 |
txt_clip = txt_clip.set_start(entry["start"]).set_duration(3).set_position(('bottom')).set_opacity(0.7) # Display each sentence for 3 seconds
|
@@ -95,7 +102,7 @@ def add_transcript_to_video(video_path, timestamps, output_path):
|
|
95 |
text_clips.append(txt_clip)
|
96 |
|
97 |
# Overlay all text clips on the original video
|
98 |
-
final_video =
|
99 |
|
100 |
# Write the result to a file
|
101 |
final_video.write_videofile(output_path, codec='libx264', audio_codec='aac')
|
|
|
3 |
import random
|
4 |
from transformers import pipeline
|
5 |
from transformers.pipelines.audio_utils import ffmpeg_read
|
6 |
+
from moviepy import (
|
7 |
+
ImageClip,
|
8 |
+
VideoFileClip,
|
9 |
+
TextClip,
|
10 |
+
CompositeVideoClip,
|
11 |
+
AudioFileClip,
|
12 |
+
concatenate_videoclips
|
13 |
+
)
|
14 |
import speech_recognition as sr
|
15 |
import json
|
16 |
from nltk.tokenize import sent_tokenize
|
17 |
|
18 |
def transcribe_video(video_path):
|
19 |
# Load the video file and extract audio
|
20 |
+
video = VideoFileClip(video_path)
|
21 |
audio_path = "audio.wav"
|
22 |
video.audio.write_audiofile(audio_path)
|
23 |
|
|
|
86 |
|
87 |
def add_transcript_to_video(video_path, timestamps, output_path):
|
88 |
# Load the video file
|
89 |
+
video = VideoFileClip(video_path)
|
90 |
|
91 |
# Create text clips based on timestamps
|
92 |
text_clips = []
|
93 |
|
94 |
for entry in timestamps:
|
95 |
# Create a text clip for each sentence
|
96 |
+
txt_clip = TextClip(entry["text"], fontsize=24, color='white', bg_color='black', size=video.size)
|
97 |
|
98 |
# Set the start time and duration for each text clip
|
99 |
txt_clip = txt_clip.set_start(entry["start"]).set_duration(3).set_position(('bottom')).set_opacity(0.7) # Display each sentence for 3 seconds
|
|
|
102 |
text_clips.append(txt_clip)
|
103 |
|
104 |
# Overlay all text clips on the original video
|
105 |
+
final_video = CompositeVideoClip([video] + text_clips)
|
106 |
|
107 |
# Write the result to a file
|
108 |
final_video.write_videofile(output_path, codec='libx264', audio_codec='aac')
|