File size: 4,848 Bytes
625f3ad 1a79b75 37409eb 1a79b75 572127c 3782eb8 85152a6 3782eb8 ae23d7c 85152a6 1a79b75 ae23d7c 625f3ad ae23d7c 625f3ad 85152a6 3782eb8 85152a6 9f045ce 85152a6 3782eb8 85152a6 37409eb ae23d7c 625f3ad ae23d7c 9f045ce 75558ec 9f045ce 625f3ad ae23d7c 625f3ad 790888c 3782eb8 790888c 3782eb8 790888c 3782eb8 625f3ad ae23d7c 625f3ad 790888c 625f3ad 85152a6 625f3ad 1a79b75 625f3ad 1a79b75 9f045ce 1a79b75 625f3ad 1a79b75 ae23d7c 1a79b75 ae23d7c 43f7e99 f1e3355 1a79b75 9f045ce f1e3355 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
from transformers import MBartForConditionalGeneration, MBart50Tokenizer, AutoModelForCausalLM, AutoTokenizer, pipeline
import gradio as gr
import requests
import io
from PIL import Image
import os
# Load the translation model and tokenizer
model_name = "facebook/mbart-large-50-many-to-one-mmt"
tokenizer = MBart50Tokenizer.from_pretrained(model_name)
model = MBartForConditionalGeneration.from_pretrained(model_name)
# Use the Hugging Face API key from environment variables for text-to-image model
hf_api_key = os.getenv("full_token")
if hf_api_key is None:
raise ValueError("Hugging Face API key not found! Please set 'full_token' environment variable.")
else:
headers = {"Authorization": f"Bearer {hf_api_key}"}
# Define the text-to-image model URL (using a faster text-to-image model)
API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
# Load a smaller text generation model to reduce generation time
text_generation_model_name = "EleutherAI/gpt-neo-1.3B"
text_tokenizer = AutoTokenizer.from_pretrained(text_generation_model_name)
text_model = AutoModelForCausalLM.from_pretrained(text_generation_model_name)
# Create a pipeline for text generation using the selected model
text_generator = pipeline("text-generation", model=text_model, tokenizer=text_tokenizer)
# Function to generate an image using Hugging Face's text-to-image model
def generate_image_from_text(translated_text):
try:
print(f"Generating image from translated text: {translated_text}")
response = requests.post(API_URL, headers=headers, json={"inputs": translated_text})
# Check if the response is successful
if response.status_code != 200:
print(f"Error generating image: {response.text}")
return None, f"Error generating image: {response.text}"
# Read and return the generated image
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
print("Image generation completed.")
return image, None
except Exception as e:
print(f"Error during image generation: {e}")
return None, f"Error during image generation: {e}"
# Function to generate a shorter paragraph based on the translated text
def generate_short_paragraph_from_text(translated_text):
try:
print(f"Generating a short paragraph from translated text: {translated_text}")
paragraph = text_generator(
translated_text,
max_length=80, # Reduced to 80 tokens
num_return_sequences=1,
temperature=0.6,
top_p=0.8,
truncation=True # Added truncation to avoid long sequences
)[0]['generated_text']
print(f"Paragraph generation completed: {paragraph}")
return paragraph
except Exception as e:
print(f"Error during paragraph generation: {e}")
return f"Error during paragraph generation: {e}"
# Define the function to translate Tamil text, generate a short paragraph, and create an image
def translate_generate_paragraph_and_image(tamil_text):
# Step 1: Translate Tamil text to English using mbart-large-50
try:
print("Translating Tamil text to English...")
tokenizer.src_lang = "ta_IN"
inputs = tokenizer(tamil_text, return_tensors="pt")
translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
print(f"Translation completed: {translated_text}")
except Exception as e:
return f"Error during translation: {e}", "", None, None
# Step 2: Generate a shorter paragraph based on the translated English text
paragraph = generate_short_paragraph_from_text(translated_text)
if "Error" in paragraph:
return translated_text, paragraph, None, None
# Step 3: Generate an image using the translated English text
image, error_message = generate_image_from_text(translated_text)
if error_message:
return translated_text, paragraph, None, error_message
return translated_text, paragraph, image, None
# Gradio interface setup with share=True to make the app public
iface = gr.Interface(
fn=translate_generate_paragraph_and_image,
inputs=gr.Textbox(lines=2, placeholder="Enter Tamil text here..."),
outputs=[gr.Textbox(label="Translated English Text"),
gr.Textbox(label="Generated Short Paragraph"),
gr.Image(label="Generated Image")],
title="Tamil to English Translation, Short Paragraph Generation, and Image Creation",
description="Translate Tamil text to English, generate a short paragraph, and create an image using the translated text.",
)
# Launch the app with the share option
iface.launch(share=True)
|