File size: 2,757 Bytes
f4ff201
 
3fec1fb
f4ff201
592103d
468aab2
90c02c2
f01e148
0a882ab
8b9b9de
0a882ab
f01e148
1780330
bacf257
8b9b9de
 
bf4b161
f4ff201
d3c021a
1780330
f4ff201
 
3fec1fb
 
70e3d12
3fec1fb
 
 
 
70e3d12
3fec1fb
 
 
70e3d12
548031b
 
 
 
 
 
bacf257
548031b
3fec1fb
 
 
 
0a14984
3fec1fb
5305898
70e3d12
 
548031b
70e3d12
548031b
 
3fec1fb
548031b
b00a80c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import torch
from diffusers import StableDiffusionPipeline
import gradio as gr

#model_base = "SG161222/Realistic_Vision_V5.1_noVAE" #fantasy people
#model_base = "Justin-Choo/epiCRealism-Natural_Sin_RC1_VAE" #cartoon people
#model_base = "Lykon/DreamShaper" #unrealistic people
#model_base = "runwayml/stable-diffusion-v1-5" #base
model_base = "Krebzonide/LazyMixPlus" #nsfw people
#model_base = "Krebzonide/Humans" #boring people
#model_base = "aufahr/unofficial_aom3" #anime people

#lora_model_path = "Krebzonide/LoRA-CH-0" #mecjh - Corey H, traind on epiCRealism
#lora_model_path = "Krebzonide/LoRA-CH-1" #mecjh - Corey H, traind on epiCRealism
lora_model_path = "Krebzonide/LoRA-EM1" #exgfem - Emily M, trained on LizyMixPlus
#lora_model_path = "Krebzonide/LoRA-EM-2-0" #exgfem - Emily M, trained on Humans
#lora_model_path = "Krebzonide/LoRA-YX1" #uwspyx - Professor Xing, trained on Realistic_Vision

pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16, use_safetensors=True, use_auth_token="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")
pipe.unet.load_attn_procs(lora_model_path, use_auth_token="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")
pipe.to("cuda")

css = """
.btn-green {
  background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important;
  border-color: #22c55e !important;
  color: #166534 !important;
}
.btn-green:hover {
  background-image: linear-gradient(to bottom right, #6dd178, #6dd178) !important;
}
"""

def generate(prompt, neg_prompt, samp_steps, guide_scale, lora_scale, progress=gr.Progress(track_tqdm=True)):
    images = pipe(
        prompt,
        negative_prompt=neg_prompt,
        num_inference_steps=samp_steps,
        guidance_scale=guide_scale,
        cross_attention_kwargs={"scale": lora_scale},
        num_images_per_prompt=6
    ).images
    return [(img, f"Image {i+1}") for i, img in enumerate(images)]
        

with gr.Blocks(css=css) as demo:
    with gr.Column():
        prompt = gr.Textbox(label="Prompt")
        negative_prompt = gr.Textbox(label="Negative Prompt", value="lowres, bad anatomy, bad hands, cropped, worst quality, disfigured, deformed, extra limbs, asian, filter, render")
        submit_btn = gr.Button("Generate", elem_classes="btn-green")
        gallery = gr.Gallery(label="Generated images", height=700)
        with gr.Row():
            samp_steps = gr.Slider(1, 100, value=25, step=1, label="Sampling steps")
            guide_scale = gr.Slider(1, 10, value=6, step=0.5, label="Guidance scale")
            lora_scale = gr.Slider(0, 1, value=0.5, step=0.01, label="LoRA power")

    submit_btn.click(generate, [prompt, negative_prompt, samp_steps, guide_scale, lora_scale], [gallery], queue=True)

demo.queue(1)
demo.launch(debug=True)