Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from transformers import MBartForConditionalGeneration, MBart50Tokenizer
|
2 |
import gradio as gr
|
3 |
import requests
|
4 |
import io
|
@@ -21,6 +21,14 @@ else:
|
|
21 |
# Define the text-to-image model URL (using a stable diffusion model)
|
22 |
API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
# Function to generate an image using Hugging Face's text-to-image model
|
25 |
def generate_image_from_text(translated_text):
|
26 |
try:
|
@@ -41,8 +49,20 @@ def generate_image_from_text(translated_text):
|
|
41 |
print(f"Error during image generation: {e}")
|
42 |
return None, f"Error during image generation: {e}"
|
43 |
|
44 |
-
#
|
45 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
# Step 1: Translate Tamil text to English using mbart-large-50
|
47 |
try:
|
48 |
print("Translating Tamil text to English...")
|
@@ -52,24 +72,30 @@ def translate_and_generate_image(tamil_text):
|
|
52 |
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
|
53 |
print(f"Translation completed: {translated_text}")
|
54 |
except Exception as e:
|
55 |
-
return f"Error during translation: {e}", None
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
-
# Step
|
58 |
image, error_message = generate_image_from_text(translated_text)
|
59 |
if error_message:
|
60 |
-
return translated_text, error_message
|
61 |
|
62 |
-
return translated_text, image
|
63 |
|
64 |
# Gradio interface setup
|
65 |
iface = gr.Interface(
|
66 |
-
fn=
|
67 |
inputs=gr.Textbox(lines=2, placeholder="Enter Tamil text here..."),
|
68 |
outputs=[gr.Textbox(label="Translated English Text"),
|
|
|
69 |
gr.Image(label="Generated Image")],
|
70 |
-
title="Tamil to English Translation and Image Creation",
|
71 |
-
description="Translate Tamil text to English using Facebook's mbart-large-50 model and create an image using the translated text.",
|
72 |
)
|
73 |
|
74 |
# Launch Gradio app without `share=True`
|
75 |
-
iface.launch()
|
|
|
1 |
+
from transformers import MBartForConditionalGeneration, MBart50Tokenizer, AutoModelForCausalLM, AutoTokenizer, pipeline
|
2 |
import gradio as gr
|
3 |
import requests
|
4 |
import io
|
|
|
21 |
# Define the text-to-image model URL (using a stable diffusion model)
|
22 |
API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
23 |
|
24 |
+
# Load the text generation model for generating detailed paragraphs
|
25 |
+
text_generation_model_name = "EleutherAI/gpt-neo-2.7B"
|
26 |
+
text_tokenizer = AutoTokenizer.from_pretrained(text_generation_model_name)
|
27 |
+
text_model = AutoModelForCausalLM.from_pretrained(text_generation_model_name)
|
28 |
+
|
29 |
+
# Create a pipeline for text generation
|
30 |
+
text_generator = pipeline("text-generation", model=text_model, tokenizer=text_tokenizer)
|
31 |
+
|
32 |
# Function to generate an image using Hugging Face's text-to-image model
|
33 |
def generate_image_from_text(translated_text):
|
34 |
try:
|
|
|
49 |
print(f"Error during image generation: {e}")
|
50 |
return None, f"Error during image generation: {e}"
|
51 |
|
52 |
+
# Function to generate a detailed paragraph from the translated text
|
53 |
+
def generate_paragraph_from_text(translated_text):
|
54 |
+
try:
|
55 |
+
print(f"Generating paragraph from translated text: {translated_text}")
|
56 |
+
# Generate detailed text from translated text using the text generation model
|
57 |
+
paragraph = text_generator(translated_text, max_length=250, num_return_sequences=1, temperature=0.7, top_p=0.9)[0]['generated_text']
|
58 |
+
print(f"Paragraph generation completed: {paragraph}")
|
59 |
+
return paragraph
|
60 |
+
except Exception as e:
|
61 |
+
print(f"Error during paragraph generation: {e}")
|
62 |
+
return f"Error during paragraph generation: {e}"
|
63 |
+
|
64 |
+
# Define the function to translate Tamil text, generate a paragraph, and create an image
|
65 |
+
def translate_generate_paragraph_and_image(tamil_text):
|
66 |
# Step 1: Translate Tamil text to English using mbart-large-50
|
67 |
try:
|
68 |
print("Translating Tamil text to English...")
|
|
|
72 |
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
|
73 |
print(f"Translation completed: {translated_text}")
|
74 |
except Exception as e:
|
75 |
+
return f"Error during translation: {e}", "", None, None
|
76 |
+
|
77 |
+
# Step 2: Generate a detailed paragraph based on the translated English text
|
78 |
+
paragraph = generate_paragraph_from_text(translated_text)
|
79 |
+
if "Error" in paragraph:
|
80 |
+
return translated_text, paragraph, None, None
|
81 |
|
82 |
+
# Step 3: Generate an image using the translated English text
|
83 |
image, error_message = generate_image_from_text(translated_text)
|
84 |
if error_message:
|
85 |
+
return translated_text, paragraph, None, error_message
|
86 |
|
87 |
+
return translated_text, paragraph, image, None
|
88 |
|
89 |
# Gradio interface setup
|
90 |
iface = gr.Interface(
|
91 |
+
fn=translate_generate_paragraph_and_image,
|
92 |
inputs=gr.Textbox(lines=2, placeholder="Enter Tamil text here..."),
|
93 |
outputs=[gr.Textbox(label="Translated English Text"),
|
94 |
+
gr.Textbox(label="Generated Descriptive Paragraph"),
|
95 |
gr.Image(label="Generated Image")],
|
96 |
+
title="Tamil to English Translation, Paragraph Generation, and Image Creation",
|
97 |
+
description="Translate Tamil text to English using Facebook's mbart-large-50 model, generate a detailed paragraph, and create an image using the translated text.",
|
98 |
)
|
99 |
|
100 |
# Launch Gradio app without `share=True`
|
101 |
+
iface.launch()
|