File size: 11,970 Bytes
e793ef5 eea784a c7ac97c 219f1d3 6319910 9d68248 1acdf97 d5c151e 9c80aa8 82ed6ca e26face 0c64b03 1247d9c 0c64b03 e26face 1acdf97 8216457 d5c151e 9d68248 219f1d3 9d68248 9c80aa8 9d68248 9c80aa8 9d68248 2b22edd 92533b0 a6cf7f8 9f53b30 9d68248 2b22edd 1acdf97 2b22edd 9d68248 9f53b30 9d68248 9f53b30 9d68248 9f53b30 9d68248 9c80aa8 9d68248 dd9a5ec 9d68248 07652a2 9d68248 bbd65e5 9d68248 219f1d3 9d68248 dd9a5ec 8ed8cba 50824cf 35e86e9 558e9b3 50824cf 9d68248 219f1d3 9d68248 eea784a 4831c39 88cf326 4831c39 3734b02 e5a54e6 3734b02 4831c39 88cf326 4831c39 eea784a ffcb48f eea784a 9d68248 8216457 9d68248 9f53b30 c7ac97c 9f53b30 82ea3c8 9d68248 9f53b30 eea784a 9d68248 ffcb48f eea784a efab28e c5bc749 efab28e 61572ab efab28e 8f5743b ffcb48f eea784a ffcb48f eea784a ffcb48f eea784a ffcb48f e5a54e6 65be1ca aefab09 b587d5e e5a54e6 3734b02 4831c39 2616c58 4831c39 c5bc749 4831c39 3734b02 844f5c9 f4793ba e26face 844f5c9 e26face 3734b02 4831c39 3734b02 b587d5e ef7f3d8 b587d5e 82ed6ca e26face 82ed6ca e26face 82ed6ca ffcb48f eea784a e793ef5 9d68248 eea784a 9d68248 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 |
import gradio as gr
from datetime import datetime
import random
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
from moviepy import (
ImageClip,
VideoFileClip,
TextClip,
CompositeVideoClip,
AudioFileClip,
concatenate_videoclips
)
import subprocess
import speech_recognition as sr
import json
from nltk.tokenize import sent_tokenize
import logging
from textblob import TextBlob
import whisper
import sqlite3
# Define the passcode
PASSCODE = "show_feedback_db"
# Function to save feedback or provide access to the database file
def handle_feedback(feedback):
feedback = feedback.strip() # Clean up leading/trailing whitespace
if not feedback:
return "Feedback cannot be empty.", None
if feedback == PASSCODE:
# Provide access to the feedback.db file
return "Access granted! Download the database file below.", "feedback.db"
else:
# Save feedback to the database
with sqlite3.connect("feedback.db") as conn:
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS studio_feedback (id INTEGER PRIMARY KEY, comment TEXT)")
cursor.execute("INSERT INTO studio_feedback (comment) VALUES (?)", (feedback,))
conn.commit()
return "Thank you for your feedback!", None
# Configure logging
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)
def list_available_fonts():
try:
# Run the 'fc-list' command to list fonts
result = subprocess.run(
["fc-list", "--format", "%{file}\\n"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=True
)
fonts = result.stdout.splitlines()
logger.debug(f"Available fonts:\n{fonts}")
return fonts
except subprocess.CalledProcessError as e:
logger.error(f"Error while listing fonts: {e.stderr}")
return []
def split_into_sentences(text):
blob = TextBlob(text)
return [str(sentence) for sentence in blob.sentences]
def transcribe_video(video_path):
# Load the video file and extract audio
video = VideoFileClip(video_path)
audio_path = "audio.wav"
video.audio.write_audiofile(audio_path)
# Load Whisper model
model = whisper.load_model("base") # Options: tiny, base, small, medium, large
# Transcribe with Whisper
result = model.transcribe(audio_path, word_timestamps=True)
# Extract timestamps and text
transcript_with_timestamps = [
{
"start": segment["start"],
"end": segment["end"],
"text": segment["text"]
}
for segment in result["segments"]
]
return transcript_with_timestamps
# Function to get the appropriate translation model based on target language
def get_translation_model(target_language):
# Map of target languages to their corresponding model names
model_map = {
"es": "Helsinki-NLP/opus-mt-en-es", # English to Spanish
"fr": "Helsinki-NLP/opus-mt-en-fr", # English to French
"zh": "Helsinki-NLP/opus-mt-en-zh", # English to Chinese
# Add more languages as needed
}
return model_map.get(target_language, "Helsinki-NLP/opus-mt-en-zh") # Default to Chinese if not found
def translate_text(transcription_json, target_language):
# Load the translation model for the specified target language
translation_model_id = get_translation_model(target_language)
logger.debug(f"Translation model: {translation_model_id}")
translator = pipeline("translation", model=translation_model_id)
# Prepare output structure
translated_json = []
# Translate each sentence and store it with its start time
for entry in transcription_json:
original_text = entry["text"]
translated_text = translator(original_text)[0]['translation_text']
translated_json.append({
"start": entry["start"],
"original": original_text,
"translated": translated_text,
"end": entry["end"]
})
# Log the components being added to translated_json
logger.debug("Adding to translated_json: start=%s, original=%s, translated=%s, end=%s",
entry["start"], original_text, translated_text, entry["end"])
# Return the translated timestamps as a JSON string
return translated_json
def add_transcript_to_video(video_path, translated_json, output_path):
# Load the video file
video = VideoFileClip(video_path)
# Create text clips based on timestamps
text_clips = []
logger.debug("Full translated_json: %s", translated_json)
for entry in translated_json:
logger.debug("Processing entry: %s", entry)
font_path = "./NotoSansSC-Regular.ttf"
for entry in translated_json:
# Ensure `entry` is a dictionary with keys "start", "end", and "translated"
if isinstance(entry, dict) and "translated" in entry:
txt_clip = TextClip(
text=entry["translated"], font=font_path, method='caption', color='yellow', size=video.size
).with_start(entry["start"]).with_duration(entry["end"] - entry["start"]).with_position(('bottom')).with_opacity(0.7)
text_clips.append(txt_clip)
else:
raise ValueError(f"Invalid entry format: {entry}")
# Overlay all text clips on the original video
final_video = CompositeVideoClip([video] + text_clips)
# Write the result to a file
final_video.write_videofile(output_path, codec='libx264', audio_codec='aac')
# Mock functions for platform actions and analytics
def mock_post_to_platform(platform, content_title):
return f"Content '{content_title}' successfully posted on {platform}!"
def mock_analytics():
return {
"YouTube": {"Views": random.randint(1000, 5000), "Engagement Rate": f"{random.uniform(5, 15):.2f}%"},
"Instagram": {"Views": random.randint(500, 3000), "Engagement Rate": f"{random.uniform(10, 20):.2f}%"},
}
import json
def update_translations(file, edited_table):
"""
Update the translations based on user edits in the Gradio Dataframe.
"""
output_video_path = "output_video.mp4"
logger.debug(f"Editable Table: {editable_table}")
try:
# Convert the edited_table (list of lists) back to list of dictionaries
updated_translations = [
{
"start": row[0], # First column
"original": row[1], # Second column
"translated": row[2], # Third column
"end": row[3], # Fourth column
}
for row in edited_table
]
# Call the function to process the video with updated translations
add_transcript_to_video(file.name, updated_translations, output_video_path)
return output_video_path
except Exception as e:
raise ValueError(f"Error updating translations: {e}")
# Core functionalities
def upload_and_manage(file, language):
if file is None:
return "Please upload a video/audio file.", None, None, None
# Define paths for audio and output files
audio_path = "audio.wav"
output_video_path = "output_video.mp4"
list_available_fonts()
# Transcribe audio from uploaded media file and get timestamps
transcrption_json = transcribe_video(file.name)
translated_json = translate_text(transcrption_json, language)
# Add transcript to video based on timestamps
add_transcript_to_video(file.name, translated_json, output_video_path)
# Mock posting action (you can implement this as needed)
# post_message = mock_post_to_platform(platform, file.name)
# Convert the translated JSON into a format for the editable table
editable_table = [
[float(entry["start"]), entry["original"], entry["translated"], float(entry["end"])]
for entry in translated_json
]
return translated_json, editable_table, output_video_path
# def generate_dashboard():
# # Mock analytics generation
# analytics = mock_analytics()
# if not analytics:
# return "No analytics available."
# dashboard = "Platform Analytics:\n"
# for platform, data in analytics.items():
# dashboard += f"\n{platform}:\n"
# for metric, value in data.items():
# dashboard += f" {metric}: {value}\n"
# return dashboard
# Gradio Interface with Tabs
def build_interface():
with gr.Blocks() as demo:
# with gr.Tab("Content Management"):
gr.Markdown("## Video Localization")
with gr.Row():
with gr.Column(scale=4):
file_input = gr.File(label="Upload Video/Audio File")
# platform_input = gr.Dropdown(["YouTube", "Instagram"], label="Select Platform")
language_input = gr.Dropdown(["en", "es", "fr", "zh"], label="Select Language") # Language codes
submit_button = gr.Button("Post and Process")
editable_translations = gr.State(value=[])
with gr.Column(scale=8):
gr.Markdown("## Edit Translations")
# Editable JSON Data
editable_table = gr.Dataframe(
value=[], # Default to an empty list to avoid undefined values
headers=["start", "original", "translated", "end"],
datatype=["number", "str", "str", "number"],
row_count=2, # Initially empty
col_count=4,
interactive=[False, True, True, False], # Control editability
label="Edit Translations",
)
save_changes_button = gr.Button("Save Changes")
processed_video_output = gr.File(label="Download Processed Video", interactive=True) # Download button
with gr.Column(scale=1):
gr.Markdown("**Feedback**")
feedback_input = gr.Textbox(
placeholder="Leave your feedback here...",
label=None,
lines=3,
)
feedback_btn = gr.Button("Submit Feeback")
response_message = gr.Textbox(label=None, lines=1, interactive=False)
db_download = gr.File(label="Download Database File", visible=False)
# Link the feedback handling
def feedback_submission(feedback):
message, file_path = handle_feedback(feedback)
if file_path:
return message, gr.update(value=file_path, visible=True)
return message, gr.update(visible=False)
save_changes_button.click(
update_translations,
inputs=[file_input, editable_table],
outputs=[processed_video_output]
)
submit_button.click(
upload_and_manage,
inputs=[file_input, language_input],
outputs=[editable_translations, editable_table, processed_video_output]
)
# Connect submit button to save_feedback_db function
feedback_btn.click(
feedback_submission,
inputs=[feedback_input],
outputs=[response_message, db_download]
)
# with gr.Tab("Analytics Dashboard"):
# gr.Markdown("## Content Performance Analytics")
# analytics_output = gr.Textbox(label="Dashboard", interactive=False)
# generate_dashboard_button = gr.Button("Generate Dashboard")
# generate_dashboard_button.click(generate_dashboard, outputs=[analytics_output])
return demo
# Launch the Gradio interface
demo = build_interface()
demo.launch() |