File size: 2,599 Bytes
2277b4f
1a79b75
 
 
 
 
2277b4f
1a79b75
 
 
 
 
 
d0fda9d
2277b4f
1a79b75
 
2277b4f
 
 
1a79b75
 
 
 
d0fda9d
1a79b75
 
 
 
 
 
 
 
 
2277b4f
d0fda9d
1a79b75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2277b4f
 
1a79b75
 
d0fda9d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
from transformers import MBartForConditionalGeneration, MBart50Tokenizer, pipeline
import gradio as gr
import requests
import io
from PIL import Image
import os
import torch  # For text generation models

# Load the translation model and tokenizer
model_name = "facebook/mbart-large-50-many-to-one-mmt"
tokenizer = MBart50Tokenizer.from_pretrained(model_name)
model = MBartForConditionalGeneration.from_pretrained(model_name)

# Use GPT-2 for text generation instead of restricted models
text_gen_model = "gpt2"
pipe = pipeline(
    "text-generation", 
    model=text_gen_model, 
    torch_dtype=torch.bfloat16, 
    device_map="auto"
)

# Use the Hugging Face API key from environment variables for text-to-image model
API_URL = "https://api-inference.huggingface.co/models/ZB-Tech/Text-to-Image"
headers = {"Authorization": f"Bearer {os.getenv('full_token')}"}

# Define the translation, text generation, and image generation function
def translate_and_generate_image(tamil_text):
    # Step 1: Translate Tamil text to English using mbart-large-50
    tokenizer.src_lang = "ta_IN"
    inputs = tokenizer(tamil_text, return_tensors="pt")
    translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
    translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]

    # Step 2: Generate descriptive English text using GPT-2
    generated_text = pipe(translated_text, max_length=50, num_return_sequences=1, truncation=True)[0]['generated_text']
    
    # Step 3: Use the generated English text to create an image
    def query(payload):
        response = requests.post(API_URL, headers=headers, json=payload)
        return response.content

    # Generate image using the generated text
    image_bytes = query({"inputs": generated_text})
    image = Image.open(io.BytesIO(image_bytes))

    return translated_text, generated_text, image

# Gradio interface setup
iface = gr.Interface(
    fn=translate_and_generate_image,
    inputs=gr.Textbox(lines=2, placeholder="Enter Tamil text here..."),
    outputs=[gr.Textbox(label="Translated English Text"), 
             gr.Textbox(label="Generated Descriptive Text"),
             gr.Image(label="Generated Image")],
    title="Tamil to English Translation, Text Generation, and Image Creation",
    description="Translate Tamil text to English using Facebook's mbart-large-50 model, generate descriptive text using GPT-2, and create an image using the generated text.",
)

# Launch Gradio app without `share=True` (Hugging Face Spaces already handles sharing)
iface.launch()