Spaces:
Runtime error
Runtime error
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
import gradio as gr | |
# Load the model and tokenizer | |
tokenizer = AutoTokenizer.from_pretrained("dalle-mini/dalle-mega") | |
model = AutoModelForCausalLM.from_pretrained("dalle-mini/dalle-mega") | |
# Define the function for Gradio interface | |
def generate_image(prompt): | |
inputs = tokenizer(prompt, return_tensors="pt") | |
# Generate image (or output) using the model | |
with torch.no_grad(): | |
outputs = model.generate(**inputs) | |
# Convert output to a format suitable for Gradio | |
# This part may need to be adapted based on actual output format | |
return outputs | |
# Set up Gradio interface | |
iface = gr.Interface( | |
fn=generate_image, | |
inputs=gr.Textbox(label="Enter prompt"), | |
outputs=gr.Image(type="pil", label="Generated Image"), | |
live=True | |
) | |
# Launch the app | |
iface.launch() | |