Spaces:
Runtime error
Runtime error
import openai | |
from transformers import MBartForConditionalGeneration, MBart50Tokenizer | |
import gradio as gr | |
import requests | |
import io | |
from PIL import Image | |
import os | |
import time | |
# Set up your OpenAI API key (make sure it's stored as an environment variable) | |
openai_api_key = os.getenv("OPENAI_API_KEY") | |
if openai_api_key is None: | |
raise ValueError("OpenAI API key not found! Please set 'OPENAI_API_KEY' environment variable.") | |
else: | |
openai.api_key = openai_api_key | |
# Load the translation model and tokenizer | |
model_name = "facebook/mbart-large-50-many-to-one-mmt" | |
tokenizer = MBart50Tokenizer.from_pretrained(model_name) | |
model = MBartForConditionalGeneration.from_pretrained(model_name) | |
# Use the Hugging Face API key from environment variables for text-to-image model | |
hf_api_key = os.getenv("full_token") | |
if hf_api_key is None: | |
raise ValueError("Hugging Face API key not found! Please set 'hf_token' environment variable.") | |
else: | |
headers = {"Authorization": f"Bearer {hf_api_key}"} | |
API_URL = "https://api-inference.huggingface.co/models/ZB-Tech/Text-to-Image" | |
# Define the OpenAI ChatCompletion function using `gpt-3.5-turbo` | |
def generate_with_gpt3(prompt): | |
try: | |
print("Generating text with OpenAI ChatCompletion...") | |
# Use ChatCompletion with gpt-3.5-turbo | |
response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", # Use "gpt-4" if you have access | |
messages=[{"role": "system", "content": "You are a helpful assistant."}, | |
{"role": "user", "content": prompt}], | |
max_tokens=150, | |
temperature=0.2, | |
top_p=0.9, | |
) | |
generated_text = response['choices'][0]['message']['content'].strip() | |
print("Text generation completed.") | |
return generated_text | |
except Exception as e: | |
print(f"OpenAI API Error: {e}") | |
return "Error generating text with GPT-3. Check the OpenAI API settings." | |
# Define the translation, GPT-3 text generation, and image generation function | |
def translate_and_generate_image(tamil_text): | |
# Step 1: Translate Tamil text to English using mbart-large-50 | |
try: | |
print("Translating Tamil text to English...") | |
tokenizer.src_lang = "ta_IN" | |
inputs = tokenizer(tamil_text, return_tensors="pt") | |
translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"]) | |
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0] | |
print(f"Translation completed: {translated_text}") | |
except Exception as e: | |
return "Error during translation: " + str(e), "", None | |
time.sleep(1) # Optional: Small delay to ensure sequential execution | |
# Step 2: Generate high-quality descriptive text using OpenAI's ChatCompletion | |
try: | |
print("Generating descriptive text from translated English text...") | |
prompt = f"Create a detailed and creative description based on the following text: {translated_text}" | |
generated_text = generate_with_gpt3(prompt) | |
print(f"Text generation completed: {generated_text}") | |
except Exception as e: | |
return translated_text, f"Error during text generation: {e}", None | |
time.sleep(1) # Optional: Small delay to ensure sequential execution | |
# Step 3: Use the generated English text to create an image | |
try: | |
print("Generating image from the generated descriptive text...") | |
def query(payload): | |
response = requests.post(API_URL, headers=headers, json=payload) | |
response.raise_for_status() # Raise error if request fails | |
return response.content | |
# Generate image using the descriptive text | |
image_bytes = query({"inputs": generated_text}) | |
image = Image.open(io.BytesIO(image_bytes)) | |
print("Image generation completed.") | |
except Exception as e: | |
return translated_text, generated_text, f"Error during image generation: {e}" | |
return translated_text, generated_text, image | |
# Gradio interface setup | |
iface = gr.Interface( | |
fn=translate_and_generate_image, | |
inputs=gr.Textbox(lines=2, placeholder="Enter Tamil text here..."), | |
outputs=[gr.Textbox(label="Translated English Text"), | |
gr.Textbox(label="Generated Descriptive Text"), | |
gr.Image(label="Generated Image")], | |
title="Tamil to English Translation, GPT-3 Text Generation, and Image Creation", | |
description="Translate Tamil text to English using Facebook's mbart-large-50 model, generate high-quality text using GPT-3.5-turbo, and create an image using the generated text.", | |
) | |
# Launch Gradio app without `share=True` | |
iface.launch() | |