File size: 14,067 Bytes
e793ef5 eea784a c7ac97c 219f1d3 47f955a 6319910 9d68248 1acdf97 d5c151e 9c80aa8 cb60a18 82ed6ca e26face 1efc480 e26face 0c64b03 1247d9c 0c64b03 e26face 1acdf97 8216457 d5c151e 9d68248 219f1d3 9d68248 9c80aa8 9d68248 9c80aa8 9d68248 2b22edd 92533b0 a6cf7f8 9f53b30 9d68248 2b22edd 1acdf97 2b22edd 9d68248 9f53b30 9d68248 9f53b30 9d68248 9f53b30 9d68248 9c80aa8 9d68248 dd9a5ec 9d68248 07652a2 9d68248 bbd65e5 9d68248 219f1d3 9d68248 dd9a5ec 8ed8cba 50824cf 35e86e9 558e9b3 50824cf 9d68248 219f1d3 9d68248 eea784a 4831c39 88cf326 4831c39 3734b02 7d3e4d6 3734b02 4831c39 c9e6abe 88cf326 4278613 88cf326 4278613 88cf326 c9e6abe 4831c39 10fc6a1 a742df8 10fc6a1 94d7fe8 10fc6a1 eea784a c9e6abe eea784a c9e6abe 9d68248 c9e6abe 10fc6a1 c7ac97c c9e6abe eea784a 10fc6a1 eea784a 10fc6a1 c9e6abe 61572ab 10fc6a1 bc9d718 10fc6a1 c9e6abe 8f5743b c9e6abe cb60a18 c9e6abe eea784a 158d2de ffcb48f e5a54e6 65be1ca 486838c 65be1ca aefab09 b587d5e e5a54e6 3734b02 4831c39 2616c58 4831c39 b82606e 4831c39 158d2de 3734b02 cb60a18 3734b02 844f5c9 f4793ba cb60a18 f4793ba e26face 844f5c9 e26face cb60a18 e26face 3734b02 c9e6abe 4831c39 cb60a18 3734b02 b587d5e c9e6abe 830dab9 cb60a18 b587d5e 830dab9 82ed6ca e26face 82ed6ca e26face 82ed6ca ffcb48f eea784a e793ef5 9d68248 eea784a 9d68248 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 |
import gradio as gr
from datetime import datetime
import random
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
from moviepy import (
ImageClip,
VideoFileClip,
TextClip,
CompositeVideoClip,
AudioFileClip,
concatenate_videoclips
)
from gtts import gTTS
import subprocess
import speech_recognition as sr
import json
from nltk.tokenize import sent_tokenize
import logging
from textblob import TextBlob
import whisper
import time
import sqlite3
# Define the passcode
PASSCODE = "show_feedback_db"
css = """
/* Adjust row height */
.dataframe-container tr {
height: 50px !important;
}
/* Ensure text wrapping and prevent overflow */
.dataframe-container td {
white-space: normal !important;
word-break: break-word !important;
}
/* Set column widths */
[data-testid="block-container"] .scrolling-dataframe th:nth-child(1),
[data-testid="block-container"] .scrolling-dataframe td:nth-child(1) {
width: 6%; /* Start column */
}
[data-testid="block-container"] .scrolling-dataframe th:nth-child(2),
[data-testid="block-container"] .scrolling-dataframe td:nth-child(2) {
width: 47%; /* Original text */
}
[data-testid="block-container"] .scrolling-dataframe th:nth-child(3),
[data-testid="block-container"] .scrolling-dataframe td:nth-child(3) {
width: 47%; /* Translated text */
}
[data-testid="block-container"] .scrolling-dataframe th:nth-child(4),
[data-testid="block-container"] .scrolling-dataframe td:nth-child(4) {
display: none !important;
}
"""
# Function to save feedback or provide access to the database file
def handle_feedback(feedback):
feedback = feedback.strip() # Clean up leading/trailing whitespace
if not feedback:
return "Feedback cannot be empty.", None
if feedback == PASSCODE:
# Provide access to the feedback.db file
return "Access granted! Download the database file below.", "feedback.db"
else:
# Save feedback to the database
with sqlite3.connect("feedback.db") as conn:
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS studio_feedback (id INTEGER PRIMARY KEY, comment TEXT)")
cursor.execute("INSERT INTO studio_feedback (comment) VALUES (?)", (feedback,))
conn.commit()
return "Thank you for your feedback!", None
# Configure logging
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)
def list_available_fonts():
try:
# Run the 'fc-list' command to list fonts
result = subprocess.run(
["fc-list", "--format", "%{file}\\n"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=True
)
fonts = result.stdout.splitlines()
logger.debug(f"Available fonts:\n{fonts}")
return fonts
except subprocess.CalledProcessError as e:
logger.error(f"Error while listing fonts: {e.stderr}")
return []
def split_into_sentences(text):
blob = TextBlob(text)
return [str(sentence) for sentence in blob.sentences]
def transcribe_video(video_path):
# Load the video file and extract audio
video = VideoFileClip(video_path)
audio_path = "audio.wav"
video.audio.write_audiofile(audio_path)
# Load Whisper model
model = whisper.load_model("base") # Options: tiny, base, small, medium, large
# Transcribe with Whisper
result = model.transcribe(audio_path, word_timestamps=True)
# Extract timestamps and text
transcript_with_timestamps = [
{
"start": segment["start"],
"end": segment["end"],
"text": segment["text"]
}
for segment in result["segments"]
]
return transcript_with_timestamps
# Function to get the appropriate translation model based on target language
def get_translation_model(target_language):
# Map of target languages to their corresponding model names
model_map = {
"es": "Helsinki-NLP/opus-mt-en-es", # English to Spanish
"fr": "Helsinki-NLP/opus-mt-en-fr", # English to French
"zh": "Helsinki-NLP/opus-mt-en-zh", # English to Chinese
# Add more languages as needed
}
return model_map.get(target_language, "Helsinki-NLP/opus-mt-en-zh") # Default to Chinese if not found
def translate_text(transcription_json, target_language):
# Load the translation model for the specified target language
translation_model_id = get_translation_model(target_language)
logger.debug(f"Translation model: {translation_model_id}")
translator = pipeline("translation", model=translation_model_id)
# Prepare output structure
translated_json = []
# Translate each sentence and store it with its start time
for entry in transcription_json:
original_text = entry["text"]
translated_text = translator(original_text)[0]['translation_text']
translated_json.append({
"start": entry["start"],
"original": original_text,
"translated": translated_text,
"end": entry["end"]
})
# Log the components being added to translated_json
logger.debug("Adding to translated_json: start=%s, original=%s, translated=%s, end=%s",
entry["start"], original_text, translated_text, entry["end"])
# Return the translated timestamps as a JSON string
return translated_json
def add_transcript_to_video(video_path, translated_json, output_path):
# Load the video file
video = VideoFileClip(video_path)
# Create text clips based on timestamps
text_clips = []
logger.debug("Full translated_json: %s", translated_json)
for entry in translated_json:
logger.debug("Processing entry: %s", entry)
font_path = "./NotoSansSC-Regular.ttf"
for entry in translated_json:
# Ensure `entry` is a dictionary with keys "start", "end", and "translated"
if isinstance(entry, dict) and "translated" in entry:
txt_clip = TextClip(
text=entry["translated"], font=font_path, method='caption', color='yellow', size=video.size
).with_start(entry["start"]).with_duration(entry["end"] - entry["start"]).with_position(('bottom')).with_opacity(0.7)
text_clips.append(txt_clip)
else:
raise ValueError(f"Invalid entry format: {entry}")
# Overlay all text clips on the original video
final_video = CompositeVideoClip([video] + text_clips)
# Write the result to a file
final_video.write_videofile(output_path, codec='libx264', audio_codec='aac')
# Mock functions for platform actions and analytics
def mock_post_to_platform(platform, content_title):
return f"Content '{content_title}' successfully posted on {platform}!"
def mock_analytics():
return {
"YouTube": {"Views": random.randint(1000, 5000), "Engagement Rate": f"{random.uniform(5, 15):.2f}%"},
"Instagram": {"Views": random.randint(500, 3000), "Engagement Rate": f"{random.uniform(10, 20):.2f}%"},
}
def update_translations(file, edited_table):
"""
Update the translations based on user edits in the Gradio Dataframe.
"""
output_video_path = "output_video.mp4"
logger.debug(f"Editable Table: {edited_table}")
try:
start_time = time.time() # Start the timer
# Convert the edited_table (list of lists) back to list of dictionaries
updated_translations = [
{
"start": row["start"], # Access by column name
"original": row["original"],
"translated": row["translated"],
"end": row["end"]
}
for _, row in edited_table.iterrows()
]
# Call the function to process the video with updated translations
add_transcript_to_video(file.name, updated_translations, output_video_path)
# Calculate elapsed time
elapsed_time = time.time() - start_time
elapsed_time_display = f"Updates applied successfully in {elapsed_time:.2f} seconds."
return output_video_path, elapsed_time_display
except Exception as e:
raise ValueError(f"Error updating translations: {e}")
def generate_voiceover(translated_json, language, output_audio_path):
# Concatenate translated text into a single string
full_text = " ".join(entry["translated"] for entry in translated_json)
# Generate speech
tts = gTTS(text=full_text, lang=language)
tts.save(output_audio_path)
def replace_audio_in_video(video_path, new_audio_path, final_video_path):
video = VideoFileClip(video_path)
new_audio = AudioFileClip(new_audio_path)
# Set the new audio
video = video.set_audio(new_audio)
# Save the final output
video.write_videofile(final_video_path, codec="libx264", audio_codec="aac")
def upload_and_manage(file, language, mode="transcription"):
if file is None:
return None, [], None, "No file uploaded. Please upload a video/audio file."
try:
start_time = time.time() # Start the timer
# Define paths for audio and output files
audio_path = "audio.wav"
output_video_path = "output_video.mp4"
voiceover_path = "voiceover.wav"
list_available_fonts()
# Step 1: Transcribe audio from uploaded media file and get timestamps
transcription_json = transcribe_video(file.name)
# Step 2: Translate the transcription
translated_json = translate_text(transcription_json, language)
# Step 3: Add transcript to video based on timestamps
add_transcript_to_video(file.name, translated_json, output_video_path)
# Step 4 (Optional): Generate voiceover if mode is "transcription_voiceover"
if mode == "Transcription with Voiceover":
generate_voiceover(translated_json, language, voiceover_path)
replace_audio_in_video(output_video_path, voiceover_path, output_video_path)
# Convert translated JSON into a format for the editable table
editable_table = [
[float(entry["start"]), entry["original"], entry["translated"], float(entry["end"])]
for entry in translated_json
]
# Calculate elapsed time
elapsed_time = time.time() - start_time
elapsed_time_display = f"Processing completed in {elapsed_time:.2f} seconds."
return translated_json, editable_table, output_video_path, elapsed_time_display
except Exception as e:
return None, [], None, f"An error occurred: {str(e)}"
# Gradio Interface with Tabs
def build_interface():
with gr.Blocks(css=css) as demo:
gr.Markdown("## Video Localization")
with gr.Row():
with gr.Column(scale=4):
file_input = gr.File(label="Upload Video/Audio File")
language_input = gr.Dropdown(["en", "es", "fr", "zh"], label="Select Language") # Language codes
process_mode = gr.Radio(choices=["Transcription", "Transcription with Voiceover"], label="Choose Processing Type", value="Transcription")
submit_button = gr.Button("Post and Process")
editable_translations = gr.State(value=[])
with gr.Column(scale=8):
gr.Markdown("## Edit Translations")
# Editable JSON Data
editable_table = gr.Dataframe(
value=[], # Default to an empty list to avoid undefined values
headers=["start", "original", "translated", "end"],
datatype=["number", "str", "str", "number"],
row_count=1, # Initially empty
col_count=4,
interactive=[False, True, True, False], # Control editability
label="Edit Translations",
wrap=True # Enables text wrapping if supported
)
save_changes_button = gr.Button("Save Changes")
processed_video_output = gr.File(label="Download Processed Video", interactive=True) # Download button
elapsed_time_display = gr.Textbox(label="Elapsed Time", lines=1, interactive=False)
with gr.Column(scale=1):
gr.Markdown("**Feedback**")
feedback_input = gr.Textbox(
placeholder="Leave your feedback here...",
label=None,
lines=3,
)
feedback_btn = gr.Button("Submit Feedback")
response_message = gr.Textbox(label=None, lines=1, interactive=False)
db_download = gr.File(label="Download Database File", visible=False)
# Link the feedback handling
def feedback_submission(feedback):
message, file_path = handle_feedback(feedback)
if file_path:
return message, gr.update(value=file_path, visible=True)
return message, gr.update(visible=False)
save_changes_button.click(
update_translations,
inputs=[file_input, editable_table],
outputs=[processed_video_output, elapsed_time_display]
)
submit_button.click(
upload_and_manage,
inputs=[file_input, language_input, process_mode],
outputs=[editable_translations, editable_table, processed_video_output, elapsed_time_display]
)
# Connect submit button to save_feedback_db function
feedback_btn.click(
feedback_submission,
inputs=[feedback_input],
outputs=[response_message, db_download]
)
return demo
# Launch the Gradio interface
demo = build_interface()
demo.launch() |