File size: 1,525 Bytes
683afc3
88d1237
 
 
c1497a6
0737dc8
a4cc7b2
 
e285507
7c8e69e
 
58c4ba9
 
38e6a4b
 
 
 
4fbc46c
c1497a6
88d1237
 
 
2fd610d
88d1237
 
 
2fd610d
a4cc7b2
88d1237
 
 
 
a4cc7b2
88d1237
 
 
 
 
a4cc7b2
88d1237
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import gradio as gr
import torch
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
import gradio as gr
from huggingface_hub import login
import os
import spaces,tempfile
import torch

from diffusers import AnimateDiffSparseControlNetPipeline
from diffusers.models import AutoencoderKL, MotionAdapter, SparseControlNetModel
from diffusers.schedulers import DPMSolverMultistepScheduler
from diffusers.utils import export_to_gif, load_image
from diffusers import AutoPipelineForText2Image
import openai,json


token = os.getenv("HF_TOKEN")
login(token=token)
model_id = "stabilityai/stable-diffusion-2-base"
scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16)

lora_path = "Jl-wei/ui-diffuser-v2"
pipe.load_lora_weights(lora_path)
pipe.to("cuda")


def gui_generation(text, num_imgs):
    prompt = f"Mobile app: {text}"
    images = pipe(prompt, num_inference_steps=30, guidance_scale=7.5, height=512, width=288, num_images_per_prompt=num_imgs).images
    yield images

with gr.Blocks() as demo:
    gallery = gr.Gallery(columns=[3], rows=[1], object_fit="contain", height="auto")
    number_slider = gr.Slider(1, 30, value=2, step=1, label="Batch size")
    prompt_box = gr.Textbox(label="Prompt", placeholder="Health monittoring report")
    gr.Interface(gui_generation, inputs=[prompt_box, number_slider], outputs=gallery)

if __name__ == "__main__":
    demo.launch()