Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
import torch
|
2 |
-
from transformers import MBartForConditionalGeneration,
|
3 |
import gradio as gr
|
4 |
import requests
|
5 |
import io
|
@@ -15,18 +15,18 @@ headers = {"Authorization": f"Bearer {hf_api_key}"}
|
|
15 |
# Define the text-to-image model URL
|
16 |
API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
17 |
|
18 |
-
#
|
19 |
translation_model_name = "facebook/mbart-large-50-many-to-one-mmt"
|
20 |
-
tokenizer =
|
21 |
translation_model = MBartForConditionalGeneration.from_pretrained(translation_model_name)
|
22 |
|
23 |
# Load a text generation model from Hugging Face using accelerate for memory optimization
|
24 |
-
text_generation_model_name = "EleutherAI/gpt-neo-2.7B"
|
25 |
text_tokenizer = AutoTokenizer.from_pretrained(text_generation_model_name)
|
26 |
text_model = AutoModelForCausalLM.from_pretrained(
|
27 |
text_generation_model_name,
|
28 |
-
device_map="auto",
|
29 |
-
torch_dtype=torch.float32
|
30 |
)
|
31 |
|
32 |
# Create a pipeline for text generation
|
@@ -35,14 +35,10 @@ text_generator = pipeline("text-generation", model=text_model, tokenizer=text_to
|
|
35 |
# Function to generate an image using Hugging Face's text-to-image model
|
36 |
def generate_image_from_text(translated_text):
|
37 |
try:
|
38 |
-
# Send the translated text to the text-to-image model
|
39 |
response = requests.post(API_URL, headers=headers, json={"inputs": translated_text})
|
40 |
-
|
41 |
-
# Check if the response is successful
|
42 |
if response.status_code != 200:
|
43 |
return None, f"Error generating image: {response.text}"
|
44 |
|
45 |
-
# Read and return the generated image
|
46 |
image_bytes = response.content
|
47 |
image = Image.open(io.BytesIO(image_bytes))
|
48 |
return image, None
|
@@ -52,7 +48,6 @@ def generate_image_from_text(translated_text):
|
|
52 |
# Define the function to translate Tamil text, generate an image, and create a descriptive text
|
53 |
def translate_generate_image_and_text(tamil_text):
|
54 |
try:
|
55 |
-
# Step 1: Translate Tamil text to English
|
56 |
tokenizer.src_lang = "ta_IN"
|
57 |
inputs = tokenizer(tamil_text, return_tensors="pt")
|
58 |
translated_tokens = translation_model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
|
@@ -61,7 +56,6 @@ def translate_generate_image_and_text(tamil_text):
|
|
61 |
return f"Error during translation: {e}", None, None
|
62 |
|
63 |
try:
|
64 |
-
# Step 2: Use the translated English text to generate an image directly
|
65 |
image, error_message = generate_image_from_text(translated_text)
|
66 |
if error_message:
|
67 |
return translated_text, None, error_message
|
@@ -69,7 +63,6 @@ def translate_generate_image_and_text(tamil_text):
|
|
69 |
return translated_text, None, f"Error during image generation: {e}"
|
70 |
|
71 |
try:
|
72 |
-
# Step 3: Generate a descriptive English text using GPT-Neo based on the translated text
|
73 |
descriptive_text = text_generator(translated_text, max_length=100, num_return_sequences=1, temperature=0.7, top_p=0.9)[0]['generated_text']
|
74 |
except Exception as e:
|
75 |
return translated_text, image, f"Error during text generation: {e}"
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import MBartForConditionalGeneration, AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
import gradio as gr
|
4 |
import requests
|
5 |
import io
|
|
|
15 |
# Define the text-to-image model URL
|
16 |
API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
17 |
|
18 |
+
# Use AutoTokenizer to avoid tokenizer mismatch warnings
|
19 |
translation_model_name = "facebook/mbart-large-50-many-to-one-mmt"
|
20 |
+
tokenizer = AutoTokenizer.from_pretrained(translation_model_name) # Use AutoTokenizer to avoid warnings
|
21 |
translation_model = MBartForConditionalGeneration.from_pretrained(translation_model_name)
|
22 |
|
23 |
# Load a text generation model from Hugging Face using accelerate for memory optimization
|
24 |
+
text_generation_model_name = "EleutherAI/gpt-neo-2.7B"
|
25 |
text_tokenizer = AutoTokenizer.from_pretrained(text_generation_model_name)
|
26 |
text_model = AutoModelForCausalLM.from_pretrained(
|
27 |
text_generation_model_name,
|
28 |
+
device_map="auto",
|
29 |
+
torch_dtype=torch.float32
|
30 |
)
|
31 |
|
32 |
# Create a pipeline for text generation
|
|
|
35 |
# Function to generate an image using Hugging Face's text-to-image model
|
36 |
def generate_image_from_text(translated_text):
|
37 |
try:
|
|
|
38 |
response = requests.post(API_URL, headers=headers, json={"inputs": translated_text})
|
|
|
|
|
39 |
if response.status_code != 200:
|
40 |
return None, f"Error generating image: {response.text}"
|
41 |
|
|
|
42 |
image_bytes = response.content
|
43 |
image = Image.open(io.BytesIO(image_bytes))
|
44 |
return image, None
|
|
|
48 |
# Define the function to translate Tamil text, generate an image, and create a descriptive text
|
49 |
def translate_generate_image_and_text(tamil_text):
|
50 |
try:
|
|
|
51 |
tokenizer.src_lang = "ta_IN"
|
52 |
inputs = tokenizer(tamil_text, return_tensors="pt")
|
53 |
translated_tokens = translation_model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
|
|
|
56 |
return f"Error during translation: {e}", None, None
|
57 |
|
58 |
try:
|
|
|
59 |
image, error_message = generate_image_from_text(translated_text)
|
60 |
if error_message:
|
61 |
return translated_text, None, error_message
|
|
|
63 |
return translated_text, None, f"Error during image generation: {e}"
|
64 |
|
65 |
try:
|
|
|
66 |
descriptive_text = text_generator(translated_text, max_length=100, num_return_sequences=1, temperature=0.7, top_p=0.9)[0]['generated_text']
|
67 |
except Exception as e:
|
68 |
return translated_text, image, f"Error during text generation: {e}"
|