Spaces:
Sleeping
Sleeping
File size: 3,743 Bytes
7c1115e 93cb70c 39ba994 630f14f 93cb70c 630f14f 83184e1 93fee0b 83184e1 7c1115e 83184e1 7c1115e 83184e1 48c1712 83184e1 1b7a9d8 83184e1 93fee0b 83184e1 93fee0b 83184e1 7c1115e d21eb1e 93fee0b 83184e1 1b7a9d8 83184e1 7c1115e 1b7a9d8 48c1712 83184e1 93fee0b 83184e1 7c1115e 83184e1 7c1115e 83184e1 93fee0b 83184e1 7c1115e 83184e1 93fee0b 83184e1 1b7a9d8 83184e1 1786f21 7c1115e 83184e1 7c1115e 83184e1 93cb70c 3201c4f 83184e1 7c1115e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import os
from huggingface_hub import login
from transformers import MarianMTModel, MarianTokenizer, pipeline
import requests
import io
from PIL import Image
import gradio as gr
# Retrieve the token from the environment variable
hf_token = os.getenv("HUGGINGFACE_API_KEY")
if not hf_token:
raise ValueError("Hugging Face token not found in environment variables.")
login(token=hf_token, add_to_git_credential=True)
# Define available languages with their respective Helsinki model names
language_models = {
"Arabic": "Helsinki-NLP/opus-mt-ar-en",
"Bengali": "Helsinki-NLP/opus-mt-bn-en",
"French": "Helsinki-NLP/opus-mt-fr-en",
"Hindi": "Helsinki-NLP/opus-mt-hi-en",
"Russian": "Helsinki-NLP/opus-mt-ru-en",
"German": "Helsinki-NLP/opus-mt-de-en",
"Spanish": "Helsinki-NLP/opus-mt-es-en",
"Tamil": "Helsinki-NLP/opus-mt-mul-en" # Using multilingual model for Tamil
}
# Function to load a translation model dynamically
def load_translation_pipeline(language):
model_name = language_models[language]
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
return pipeline("translation", model=model, tokenizer=tokenizer)
# API credentials and endpoint for FLUX (Image generation)
flux_API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
flux_headers = {"Authorization": f"Bearer {hf_token}"}
# Function for translation
def translate_text(text, language):
translator = load_translation_pipeline(language)
try:
translation = translator(text, max_length=40)
translated_text = translation[0]['translation_text']
return translated_text
except Exception as e:
return f"An error occurred: {str(e)}"
# Function to send payload and generate an image
def generate_image(prompt):
try:
response = requests.post(flux_API_URL, headers=flux_headers, json={"inputs": prompt})
if response.status_code == 200:
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
return image
else:
return None
except Exception as e:
print(f"An error occurred: {e}")
return None
# Function for Mistral API call to generate creative text
mistral_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1"
mistral_headers = {"Authorization": f"Bearer {hf_token}"}
def generate_creative_text(translated_text):
try:
response = requests.post(mistral_API_URL, headers=mistral_headers, json={"inputs": translated_text})
if response.status_code == 200:
creative_text = response.json()[0]['generated_text']
return creative_text
else:
return "Error generating creative text"
except Exception as e:
return None
# Function to handle the full workflow
def translate_generate_image_and_text(input_text, language):
translated_text = translate_text(input_text, language)
image = generate_image(translated_text)
creative_text = generate_creative_text(translated_text)
return translated_text, creative_text, image
# Create Gradio interface with language selection
interface = gr.Interface(
fn=translate_generate_image_and_text,
inputs=[
gr.Textbox(label="Input Text in Source Language"),
gr.Dropdown(choices=list(language_models.keys()), label="Source Language")
],
outputs=["text", "text", "image"],
title="Multilingual Translation, Image Generation & Creative Text",
description="Enter text to translate to English, generate an image, and create creative content based on the translation."
)
# Launch Gradio app
interface.launch()
|