Studio_V0 / app.py
qqwjq1981's picture
Update app.py
2b22edd verified
raw
history blame
4.8 kB
import gradio as gr
from datetime import datetime
import random
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
# Initialize the Whisper pipeline
whisper_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-medium")
def transcribe_audio_from_file(file_path):
"""
Transcribes audio from a local file using the Whisper pipeline.
Args:
file_path (str): Path to the local media file.
Returns:
str: Transcription text if successful, otherwise None.
"""
try:
# Transcribe the audio using Whisper
transcription = whisper_pipeline(file_path, return_timestamps=True)
logger.debug(f"Transcription: {transcription['text']}")
return transcription["text"]
except Exception as e:
logger.error(f"An error occurred during transcription: {e}")
return None
# Initialize the translation pipeline
translation_pipeline = pipeline("translation", model="Helsinki-NLP/opus-mt-en-{target_language}")
# Function to get the appropriate translation model based on target language
def get_translation_model(target_language):
# Map of target languages to their corresponding model names
model_map = {
"es": "Helsinki-NLP/opus-mt-en-es", # English to Spanish
"fr": "Helsinki-NLP/opus-mt-en-fr", # English to French
"zh": "Helsinki-NLP/opus-mt-en-zh", # English to Chinese
# Add more languages as needed
}
return model_map.get(target_language, "Helsinki-NLP/opus-mt-en-fr") # Default to French if not found
# Example usage in your application
def translate_text(text, target_language):
translation_model_id = get_translation_model(target_language)
translator = pipeline("translation", model=translation_model_id)
return translator(text)[0]['translation_text']
# Mock functions for platform actions and analytics
def mock_post_to_platform(platform, content_title):
return f"Content '{content_title}' successfully posted on {platform}!"
def mock_analytics():
return {
"YouTube": {"Views": random.randint(1000, 5000), "Engagement Rate": f"{random.uniform(5, 15):.2f}%"},
"Instagram": {"Views": random.randint(500, 3000), "Engagement Rate": f"{random.uniform(10, 20):.2f}%"},
}
# Core functionalities
def upload_and_manage(file, platform, language):
if file is None:
return "Please upload a video/audio file.", None, None, None
# Transcribe audio from uploaded media file
transcription = transcribe_audio_from_media_file(file.name)
# Translate transcription to the selected language
translation = translate_text(transcription, language)
# Mock posting action
post_message = mock_post_to_platform(platform, file.name)
# Mock analytics generation
analytics = mock_analytics()
return post_message, transcription, translation, analytics
def generate_dashboard(analytics):
if not analytics:
return "No analytics available."
dashboard = "Platform Analytics:\n"
for platform, data in analytics.items():
dashboard += f"\n{platform}:\n"
for metric, value in data.items():
dashboard += f" {metric}: {value}\n"
return dashboard
# Gradio Interface with Tabs
def build_interface():
with gr.Blocks() as demo:
with gr.Tab("Content Management"):
gr.Markdown("## Integrated Content Management")
with gr.Row():
file_input = gr.File(label="Upload Video/Audio File")
platform_input = gr.Dropdown(["YouTube", "Instagram"], label="Select Platform")
language_input = gr.Dropdown(["en", "es", "fr", "zh"], label="Select Language") # Language codes
submit_button = gr.Button("Post and Process")
with gr.Row():
post_output = gr.Textbox(label="Posting Status", interactive=False)
transcription_output = gr.Textbox(label="Transcription", interactive=False)
translation_output = gr.Textbox(label="Translation", interactive=False)
submit_button.click(upload_and_manage,
inputs=[file_input, platform_input, language_input],
outputs=[post_output, transcription_output, translation_output, gr.State()])
with gr.Tab("Analytics Dashboard"):
gr.Markdown("## Content Performance Analytics")
analytics_output = gr.Textbox(label="Dashboard", interactive=False)
generate_dashboard_button = gr.Button("Generate Dashboard")
generate_dashboard_button.click(generate_dashboard, inputs=[gr.State()], outputs=[analytics_output])
return demo
demo = build_interface()
demo.launch()