|
import gradio as gr |
|
from datetime import datetime |
|
import random |
|
from transformers import pipeline |
|
from transformers.pipelines.audio_utils import ffmpeg_read |
|
|
|
|
|
whisper_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-medium") |
|
|
|
def transcribe_audio_from_file(file_path): |
|
""" |
|
Transcribes audio from a local file using the Whisper pipeline. |
|
|
|
Args: |
|
file_path (str): Path to the local media file. |
|
|
|
Returns: |
|
str: Transcription text if successful, otherwise None. |
|
""" |
|
try: |
|
|
|
transcription = whisper_pipeline(file_path, return_timestamps=True) |
|
logger.debug(f"Transcription: {transcription['text']}") |
|
return transcription["text"] |
|
except Exception as e: |
|
logger.error(f"An error occurred during transcription: {e}") |
|
return None |
|
|
|
|
|
translation_pipeline = pipeline("translation", model="Helsinki-NLP/opus-mt-en-{target_language}") |
|
|
|
|
|
def get_translation_model(target_language): |
|
|
|
model_map = { |
|
"es": "Helsinki-NLP/opus-mt-en-es", |
|
"fr": "Helsinki-NLP/opus-mt-en-fr", |
|
"zh": "Helsinki-NLP/opus-mt-en-zh", |
|
|
|
} |
|
return model_map.get(target_language, "Helsinki-NLP/opus-mt-en-fr") |
|
|
|
|
|
def translate_text(text, target_language): |
|
translation_model_id = get_translation_model(target_language) |
|
translator = pipeline("translation", model=translation_model_id) |
|
return translator(text)[0]['translation_text'] |
|
|
|
|
|
def mock_post_to_platform(platform, content_title): |
|
return f"Content '{content_title}' successfully posted on {platform}!" |
|
|
|
def mock_analytics(): |
|
return { |
|
"YouTube": {"Views": random.randint(1000, 5000), "Engagement Rate": f"{random.uniform(5, 15):.2f}%"}, |
|
"Instagram": {"Views": random.randint(500, 3000), "Engagement Rate": f"{random.uniform(10, 20):.2f}%"}, |
|
} |
|
|
|
|
|
def upload_and_manage(file, platform, language): |
|
if file is None: |
|
return "Please upload a video/audio file.", None, None, None |
|
|
|
|
|
transcription = transcribe_audio_from_media_file(file.name) |
|
|
|
|
|
translation = translate_text(transcription, language) |
|
|
|
|
|
post_message = mock_post_to_platform(platform, file.name) |
|
|
|
|
|
analytics = mock_analytics() |
|
|
|
return post_message, transcription, translation, analytics |
|
|
|
def generate_dashboard(analytics): |
|
if not analytics: |
|
return "No analytics available." |
|
|
|
dashboard = "Platform Analytics:\n" |
|
for platform, data in analytics.items(): |
|
dashboard += f"\n{platform}:\n" |
|
for metric, value in data.items(): |
|
dashboard += f" {metric}: {value}\n" |
|
return dashboard |
|
|
|
|
|
def build_interface(): |
|
with gr.Blocks() as demo: |
|
with gr.Tab("Content Management"): |
|
gr.Markdown("## Integrated Content Management") |
|
with gr.Row(): |
|
file_input = gr.File(label="Upload Video/Audio File") |
|
platform_input = gr.Dropdown(["YouTube", "Instagram"], label="Select Platform") |
|
language_input = gr.Dropdown(["en", "es", "fr", "zh"], label="Select Language") |
|
|
|
submit_button = gr.Button("Post and Process") |
|
|
|
with gr.Row(): |
|
post_output = gr.Textbox(label="Posting Status", interactive=False) |
|
transcription_output = gr.Textbox(label="Transcription", interactive=False) |
|
translation_output = gr.Textbox(label="Translation", interactive=False) |
|
|
|
submit_button.click(upload_and_manage, |
|
inputs=[file_input, platform_input, language_input], |
|
outputs=[post_output, transcription_output, translation_output, gr.State()]) |
|
|
|
with gr.Tab("Analytics Dashboard"): |
|
gr.Markdown("## Content Performance Analytics") |
|
analytics_output = gr.Textbox(label="Dashboard", interactive=False) |
|
generate_dashboard_button = gr.Button("Generate Dashboard") |
|
|
|
generate_dashboard_button.click(generate_dashboard, inputs=[gr.State()], outputs=[analytics_output]) |
|
|
|
return demo |
|
|
|
demo = build_interface() |
|
demo.launch() |
|
|