File size: 5,974 Bytes
392b88f
f4ff201
7e9a760
ceb106c
3fec1fb
f4ff201
1d60574
6fed0f7
0a2c532
 
7e19ced
 
fa66b11
 
 
 
 
 
 
 
 
6fed0f7
fae7b1e
 
11df3d4
7e19ced
fa66b11
d2e2903
f483809
 
aa83296
ba9d526
 
47886b3
 
ae9efe4
3fec1fb
 
70e3d12
3fec1fb
 
 
 
70e3d12
3fec1fb
 
 
89bcb6a
7e9a760
 
548031b
 
 
 
89bcb6a
dba1359
ea3b1d6
 
7e9a760
ae9efe4
b550dc4
3fec1fb
6fed0f7
 
a02443f
6fed0f7
ca74145
 
 
 
 
 
 
 
 
 
75f237b
2939163
 
5626083
b0d809e
2939163
b0d809e
5626083
 
 
 
 
 
 
c08f255
5626083
c08f255
5626083
c08f255
5626083
 
 
 
 
3fec1fb
0a14984
3fec1fb
f1ebf81
70e3d12
548031b
f1ebf81
89bcb6a
b550dc4
2839abc
b550dc4
89bcb6a
75f237b
 
 
89bcb6a
b550dc4
 
 
bb307d3
b550dc4
2939163
 
63aa355
6fed0f7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
import torch
import random
import os
import gradio as gr

model_id = int(os.getenv("Model"))

#stable-diffusion-xl-base-1.0  0 - base model
#Colossus_Project_XL           1 - better people
#Sevenof9_v3_sdxl              2 - nsfw
#JuggernautXL_version5         3 - better faces
#JuggernautXL_version6         4 - better faces
#RealVisXL_V2.0                5 - realistic
#AlbedoBaseXL_v11              6 - realistic
#BetterThanWords_v20_sdxl      7 - nsfw
#AcornIsSpinning_acornXLV1     8 - nsfw
#PyrosNSFWSDXL_v04.            9 - nsfw
#AltXL_v60                    10 - realistic
#SDXXXL_v10                   11 - nsfw
#DAC_PhotoEcstacy_XL_SE_V2    12 - doll people

model_url_list = ["stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors",
                 "Krebzonide/Colossus_Project_XL/blob/main/colossusProjectXLSFW_v202BakedVAE.safetensors",
                 "Krebzonide/Sevenof9_v3_sdxl/blob/main/nsfwSevenof9V3_nsfwSevenof9V3.safetensors",
                 "Krebzonide/JuggernautXL_version5/blob/main/juggernautXL_version5.safetensors",
                 "Krebzonide/JuggernautXL_version5/blob/main/juggernautXL_version6Rundiffusion.safetensors",
                 "SG161222/RealVisXL_V2.0/blob/main/RealVisXL_V2.0.safetensors",
                 "Krebzonide/AlbedoBaseXL_v11/blob/main/albedobaseXL_v11.safetensors",
                 "Krebzonide/BetterThanWords_v20_sdxl/blob/main/betterThanWords_v20.safetensors",
                 "Krebzonide/AcornIsSpinning_acornXLV1/blob/main/acornIsSpinning_acornxlV1.safetensors",
                 "Krebzonide/PyrosNSFWSDXL_v04/blob/main/pyrosNSFWSDXL_v04.safetensors",
                 "Krebzonide/AltXL_v60/blob/main/altxl_v60.safetensors",
                 "Krebzonide/SDXXXL_v10/blob/main/sdxxxl_v10.safetensors",
                 "Krebzonide/DAC_PhotoEcstacy_XL_SE_V2/blob/main/dacPhotoecstasyXLSE_v20.safetensors"]

css = """
.btn-green {
  background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important;
  border-color: #22c55e !important;
  color: #166534 !important;
}
.btn-green:hover {
  background-image: linear-gradient(to bottom right, #6dd178, #6dd178) !important;
}
"""

def generate(prompt, neg_prompt, samp_steps, cfg_scale, batch_size, seed, height, width, progress=gr.Progress(track_tqdm=True)):
    if seed < 0:
        seed = random.randint(1,999999)
    images = pipe(
        prompt,
        negative_prompt=neg_prompt,
        num_inference_steps=samp_steps,
        guidance_scale=cfg_scale,
        num_images_per_prompt=batch_size,
        height=height,
        width=width,
        generator=torch.manual_seed(seed),
    ).images
    return [(img, f"Image {i+1}") for i, img in enumerate(images), seed]
        
def set_base_model(base_model_id):
    vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
    global model_url_list
    model_url = "https://huggingface.co/" + model_url_list[base_model_id]
    pipe = StableDiffusionXLPipeline.from_single_file(
        model_url,
        torch_dtype = torch.float16,
        variant = "fp16",
        vae = vae,
        use_safetensors = True,
        use_auth_token="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"
    )
    pipe.to("cuda")
    return pipe

def update_pixel_ratio(num1, num2):
    return [round(num1*num2/1048576,3), num1-(num1%8)]

def round_to_8(num):
    return 

examples = [
    ['A serious capybara at work, wearing a suit',
    'low quality'],
    ['a graffiti of a robot serving meals to people',
    'low quality'],
    ['photo of a small cozy modern house in red woods on a mountain, solar panels, garage, driveway, great view, sunshine',
    'red house'],
    ['cinematic photo of a woman sitting at a cafe, 35mm photograph, film, bokeh, professional, 4k, highly detailede',
    'drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly'],
    ['analog film photo of old woman on the streets of london, faded film, desaturated, 35mm photo, grainy, vignette, vintage, Kodachrome, Lomography, stained, highly detailed, found footage',
    'painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured'],
    ['nude photo of a 20 year old model in the back seat of a car, detailed face',
    'big boobs'],
    ['nude photo of a 20 year old man, penis and testicles, dick and balls, erection',
    'woman']
]

with gr.Blocks(css=css) as demo:
    with gr.Column():
        prompt = gr.Textbox(label="Prompt")
        negative_prompt = gr.Textbox(label="Negative Prompt")
        submit_btn = gr.Button("Generate", elem_classes="btn-green")
        with gr.Row():
            samp_steps = gr.Slider(1, 50, value=20, step=1, label="Sampling steps")
            cfg_scale = gr.Slider(1, 10, value=3, step=0.5, label="Guidance scale")
            batch_size = gr.Slider(1, 4, value=1, step=1, label="Batch size")
        with gr.Row():
            height = gr.Slider(label="Height", value=1024, minimum=8, maximum=1536, step=8)
            width = gr.Slider(label="Width", value=1024, minimum=8, maximum=2560, step=8)
        with gr.Row():
            pixels = gr.Number(label="Pixel Ratio", value=1, interactive=False)
            seed = gr.Number(label="Seed", value=-1, minimum=-1, precision=0)
        gallery = gr.Gallery(label="Generated images")
        with gr.Row():
            lastSeed = gr.Number(label="Last Seed", value=-1, interactive=False)
            
        ex = gr.Examples(examples=examples, inputs=[prompt, negative_prompt])
    submit_btn.click(generate, [prompt, negative_prompt, samp_steps, cfg_scale, batch_size, seed, height, width], [gallery, lastSeed], queue=True)
    height.release(update_pixel_ratio, [height, width], [pixels, height], queue=False)
    width.release(update_pixel_ratio, [width, height], [pixels, width], queue=False)

pipe = set_base_model(model_id)
demo.launch(debug=True)