File size: 920 Bytes
c82c2e4
bb3e285
 
5d2696c
2cec9ee
 
c82c2e4
5d2696c
bfe9080
5d2696c
68040ae
d10fb3e
ab690cb
 
 
 
 
 
 
 
 
 
 
d10fb3e
ab690cb
68040ae
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import gradio as gr
import torch
import torchvision
from diffusers import DiffusionPipeline
import PIL.Image
import numpy as np

ldm = DiffusionPipeline.from_pretrained("fusing/latent-diffusion-text2im-large")

generator = torch.manual_seed(42)
    
def greet(name):
    prompt = "A painting of a squirrel eating a burger"
    image = ldm([prompt], generator=generator, eta=0.3, guidance_scale=6.0, num_inference_steps=50)
    
    image_processed = image.cpu().permute(0, 2, 3, 1)
    image_processed = image_processed  * 255.
    image_processed = image_processed.numpy().astype(np.uint8)
    image_pil = PIL.Image.fromarray(image_processed[0])
    
    # save image
    image_pil.save("test.png")
    return "Hello " + prompt + "!!"

iface = gr.Interface(fn=greet, inputs="text", outputs=[image,gr.outputs.Carousel(label="Individual images",components=["image_pil"]),gr.outputs.Textbox(label="Error")])
iface.launch()