File size: 2,925 Bytes
24a6868
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7de4c3e
24a6868
 
 
 
 
 
 
 
c34b260
24a6868
 
e638167
24a6868
 
d62c968
5a7e564
24a6868
 
 
c6a4957
24a6868
 
 
 
 
035ae32
 
24a6868
7de4c3e
24a6868
e0810f8
 
 
 
 
 
 
 
 
 
24a6868
e0810f8
3c11d57
 
24a6868
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# Imports
import gradio as gr
import random
import spaces
import torch
import numpy
import uuid
import json
import os
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
from PIL import Image

# Pre-Initialize
DEVICE = "auto"
if DEVICE == "auto":
    DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
print(f"[SYSTEM] | Using {DEVICE} type compute device.")

# Variables
MAX_SEED = 9007199254740991
DEFAULT_INPUT = ""
DEFAULT_NEGATIVE_INPUT = ""
DEFAULT_HEIGHT = 1024
DEFAULT_WIDTH = 1024

REPO = "sd-community/sdxl-flash"
REPO_WEIGHT = "ehristoforu/dalle-3-xl-v2"
WEIGHT = "dalle-3-xl-lora-v2.safetensors"
ADAPTER = "dalle"

model = StableDiffusionXLPipeline.from_pretrained(REPO, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
model.scheduler = EulerAncestralDiscreteScheduler.from_config(model.scheduler.config)
model.load_lora_weights(REPO_WEIGHT, weight_name=WEIGHT, adapter_name=ADAPTER)
model.set_adapters(ADAPTER, adapter_weights=[0.7])
model.to(DEVICE)

# Functions
def get_seed(seed):
    seed = seed.strip()
    if seed.isdigit():
        return int(seed)
    else:
        return random.randint(0, MAX_SEED)

@spaces.GPU(duration=30)
def generate(input=DEFAULT_INPUT, negative_input=DEFAULT_NEGATIVE_INPUT, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH, steps=1, guidance=0, seed=None, progress=gr.Progress(track_tqdm=True)):
    
    print(input, negative_input, height, width, steps, guidance, seed)
    model.to(DEVICE)

    parameters  = {
        "prompt": input,
        "negative_prompt": negative_input,
        "height": height,
        "width": width,
        "num_inference_steps": steps,
        "guidance_scale": guidance,
        "generator": torch.Generator().manual_seed(get_seed(seed)),
        "use_resolution_binning": True,
        "output_type":"pil",
    }
    
    image = model(**parameters).images[0]
    return image

# Initialize
with gr.Blocks() as main:
    with gr.Row():
        with gr.Column():
            input = gr.Textbox(lines=1, value=DEFAULT_INPUT, label="Input")
            negative_input = gr.Textbox(lines=1, value=DEFAULT_NEGATIVE_INPUT, label="Input Negative")
            height = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_HEIGHT, label="Height")
            width = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_WIDTH, label="Width")
            steps = gr.Slider(minimum=0, maximum=100, step=1, value=8, label="Steps")
            guidance = gr.Slider(minimum=0, maximum=100, step=0.001, value=3, label = "Guidance")
            seed = gr.Textbox(lines=1, value="", label="Seed (Blank for random)")
            submit = gr.Button("▶")

    with gr.Row():
        with gr.Column():
            image = gr.Image(label="Image")
            
    submit.click(generate, inputs=[input, negative_input, height, width, steps, guidance, seed], outputs=[image])

main.launch(show_api=True)