transart / app.py
pravin0077's picture
Update app.py
1b7a9d8 verified
raw
history blame
3.74 kB
import os
from huggingface_hub import login
from transformers import MarianMTModel, MarianTokenizer, pipeline
import requests
import io
from PIL import Image
import gradio as gr
# Retrieve the token from the environment variable
hf_token = os.getenv("HUGGINGFACE_API_KEY")
if not hf_token:
raise ValueError("Hugging Face token not found in environment variables.")
login(token=hf_token, add_to_git_credential=True)
# Define available languages with their respective Helsinki model names
language_models = {
"Arabic": "Helsinki-NLP/opus-mt-ar-en",
"Bengali": "Helsinki-NLP/opus-mt-bn-en",
"French": "Helsinki-NLP/opus-mt-fr-en",
"Hindi": "Helsinki-NLP/opus-mt-hi-en",
"Russian": "Helsinki-NLP/opus-mt-ru-en",
"German": "Helsinki-NLP/opus-mt-de-en",
"Spanish": "Helsinki-NLP/opus-mt-es-en",
"Tamil": "Helsinki-NLP/opus-mt-mul-en" # Using multilingual model for Tamil
}
# Function to load a translation model dynamically
def load_translation_pipeline(language):
model_name = language_models[language]
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
return pipeline("translation", model=model, tokenizer=tokenizer)
# API credentials and endpoint for FLUX (Image generation)
flux_API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
flux_headers = {"Authorization": f"Bearer {hf_token}"}
# Function for translation
def translate_text(text, language):
translator = load_translation_pipeline(language)
try:
translation = translator(text, max_length=40)
translated_text = translation[0]['translation_text']
return translated_text
except Exception as e:
return f"An error occurred: {str(e)}"
# Function to send payload and generate an image
def generate_image(prompt):
try:
response = requests.post(flux_API_URL, headers=flux_headers, json={"inputs": prompt})
if response.status_code == 200:
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
return image
else:
return None
except Exception as e:
print(f"An error occurred: {e}")
return None
# Function for Mistral API call to generate creative text
mistral_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1"
mistral_headers = {"Authorization": f"Bearer {hf_token}"}
def generate_creative_text(translated_text):
try:
response = requests.post(mistral_API_URL, headers=mistral_headers, json={"inputs": translated_text})
if response.status_code == 200:
creative_text = response.json()[0]['generated_text']
return creative_text
else:
return "Error generating creative text"
except Exception as e:
return None
# Function to handle the full workflow
def translate_generate_image_and_text(input_text, language):
translated_text = translate_text(input_text, language)
image = generate_image(translated_text)
creative_text = generate_creative_text(translated_text)
return translated_text, creative_text, image
# Create Gradio interface with language selection
interface = gr.Interface(
fn=translate_generate_image_and_text,
inputs=[
gr.Textbox(label="Input Text in Source Language"),
gr.Dropdown(choices=list(language_models.keys()), label="Source Language")
],
outputs=["text", "text", "image"],
title="Multilingual Translation, Image Generation & Creative Text",
description="Enter text to translate to English, generate an image, and create creative content based on the translation."
)
# Launch Gradio app
interface.launch()