My-AI-Projects commited on
Commit
289c4e6
Β·
verified Β·
1 Parent(s): 5e69d3c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -23
app.py CHANGED
@@ -1,30 +1,23 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
2
- import torch
3
  import gradio as gr
 
4
 
5
- # Load the model and tokenizer
6
- tokenizer = AutoTokenizer.from_pretrained("dalle-mini/dalle-mega")
7
- model = AutoModelForCausalLM.from_pretrained("dalle-mini/dalle-mega")
8
 
9
- # Define the function for Gradio interface
10
- def generate_image(prompt):
11
- inputs = tokenizer(prompt, return_tensors="pt")
12
-
13
- # Generate image (or output) using the model
14
- with torch.no_grad():
15
- outputs = model.generate(**inputs)
16
-
17
- # Convert output to a format suitable for Gradio
18
- # This part may need to be adapted based on actual output format
19
- return outputs
20
 
21
- # Set up Gradio interface
22
- iface = gr.Interface(
23
  fn=generate_image,
24
- inputs=gr.Textbox(label="Enter prompt"),
25
- outputs=gr.Image(type="pil", label="Generated Image"),
26
- live=True
27
  )
28
 
29
- # Launch the app
30
- iface.launch()
 
 
 
1
  import gradio as gr
2
+ from transformers import eBart
3
 
4
+ # Load the DALL-E Mega model
5
+ model = eBart.from_pretrained("dalle-mini/dalle-mega")
 
6
 
7
+ # Define the function to generate images
8
+ def generate_image(text):
9
+ inputs = model.tokenizer(text, return_tensors="pt")
10
+ outputs = model.generate(**inputs)
11
+ image_url = model.tokenizer.decode(outputs[0], skip_special_tokens=True)
12
+ return image_url
 
 
 
 
 
13
 
14
+ # Create the Gradio interface
15
+ ui = gr.Interface(
16
  fn=generate_image,
17
+ inputs="text",
18
+ outputs="image",
19
+ title="DALL-E Mega Image Generator"
20
  )
21
 
22
+ # Launch the Gradio app
23
+ ui.launch()