import os from huggingface_hub import login from transformers import MarianMTModel, MarianTokenizer, pipeline import requests import io from PIL import Image import gradio as gr # Set Hugging Face API key hf_token = os.getenv("HUGGINGFACE_API_KEY") if hf_token is None: raise ValueError("Hugging Face API key not found in environment variables.") # Login to Hugging Face login(token=hf_token) # Define language codes for around 10 languages language_codes = { "French": "fr", "Spanish": "es", "German": "de", "Tamil": "ta", "Hindi": "hi", "Chinese": "zh", "Russian": "ru", "Japanese": "ja", "Korean": "ko", "Arabic": "ar", "Portuguese": "pt", "Italian": "it" } model_name = "Helsinki-NLP/opus-mt-mul-en" tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) translator = pipeline("translation", model=model, tokenizer=tokenizer) # Function for translation def translate_text(input_text, src_lang): try: src_prefix = f">>{src_lang}<< " + input_text translation = translator(src_prefix, max_length=40) translated_text = translation[0]['translation_text'] return translated_text except Exception as e: return f"An error occurred: {str(e)}" # API credentials and endpoint for FLUX flux_API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev" flux_headers = {"Authorization": f"Bearer {hf_token}"} # Function to generate image based on prompt def generate_image(prompt): try: response = requests.post(flux_API_URL, headers=flux_headers, json={"inputs": prompt}) if response.status_code == 200: image_bytes = response.content image = Image.open(io.BytesIO(image_bytes)) return image else: print(f"Failed to get image: Status code {response.status_code}") return None except Exception as e: print(f"An error occurred: {e}") return None # API setup for Mistral model mistral_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1" mistral_headers = {"Authorization": f"Bearer {hf_token}"} def generate_creative_text(translated_text): try: response = requests.post(mistral_API_URL, headers=mistral_headers, json={"inputs": translated_text}) if response.status_code == 200: creative_text = response.json()[0]['generated_text'] return creative_text else: return "Error generating creative text" except Exception as e: return f"An error occurred: {str(e)}" # Main function to handle full workflow def translate_generate_image_and_text(input_text, src_lang): # Step 1: Translate input text translated_text = translate_text(input_text, language_codes[src_lang]) # Step 2: Generate an image image = generate_image(translated_text) # Step 3: Generate creative text based on the translation creative_text = generate_creative_text(translated_text) return translated_text, creative_text, image # Gradio interface interface = gr.Interface( fn=translate_generate_image_and_text, inputs=[ gr.Textbox(label="Enter text for translation"), gr.Dropdown(choices=list(language_codes.keys()), label="Source Language") ], outputs=[ gr.Textbox(label="Translated Text"), gr.Textbox(label="Creative Text"), gr.Image(label="Generated Image") ], title="Multilingual Translation, Image, and Creative Text Generator", description="Translates text from multiple languages to English, generates images, and creates creative text." ) # Launch the Gradio app interface.launch()