File size: 3,584 Bytes
392b88f f4ff201 7e9a760 54ad393 3fec1fb 0428186 f4ff201 8053227 b3471e3 1291143 c8d71a7 b3471e3 ae9efe4 c8d71a7 459e60d 58927e7 c8d71a7 58927e7 c8d71a7 ae9efe4 3fec1fb 70e3d12 3fec1fb 70e3d12 3fec1fb 2839abc 7e9a760 548031b 1291143 dba1359 ea3b1d6 7e9a760 ae9efe4 45bbd2a 3fec1fb 08c17b3 ca74145 3fec1fb 0a14984 3fec1fb f1ebf81 70e3d12 548031b f1ebf81 7e9a760 2839abc ae9efe4 7e9a760 3fec1fb 2839abc b00a80c ca74145 b00a80c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
import torch
import random
#from controlnet_aux import OpenposeDetector
#from diffusers.utils import load_image
import gradio as gr
import gc
#model_base = "stabilityai/stable-diffusion-xl-base-1.0"
#model_url = "https://huggingface.co/Krebzonide/Colossus_Project_XL/blob/main/colossusProjectXLSFW_v202BakedVAE.safetensors"
model_url = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors"
#model_url = "https://huggingface.co/Krebzonide/Sevenof9_v3_sdxl/blob/main/nsfwSevenof9V3_nsfwSevenof9V3.safetensors"
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
pipe = None
#pipe = StableDiffusionXLPipeline.from_pretrained(
# model_base, vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True
#)
css = """
.btn-green {
background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important;
border-color: #22c55e !important;
color: #166534 !important;
}
.btn-green:hover {
background-image: linear-gradient(to bottom right, #6dd178, #6dd178) !important;
}
"""
def generate(prompt, neg_prompt, samp_steps, guide_scale, batch_size, seed, height, width, progress=gr.Progress(track_tqdm=True)):
if seed < 0:
seed = random.randint(1,999999)
images = pipe(
prompt,
negative_prompt=neg_prompt,
num_inference_steps=samp_steps,
guidance_scale=guide_scale,
#cross_attention_kwargs={"scale": lora_scale},
num_images_per_prompt=batch_size,
height=height,
width=width,
generator=torch.manual_seed(seed),
).images
return [(img, f"Image {i+1}") for i, img in enumerate(images)]
def set_base_model(base_model_id):
global pipe
del pipe
torch.cuda.empty_cache()
gc.collect()
pipe = StableDiffusionXLPipeline.from_single_file(
model_url,
torch_dtype = torch.float16,
variant = "fp16",
vae = vae,
use_safetensors = True,
use_auth_token="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"
)
#pipe = load_model(base_model_id)
pipe.to("cuda")
return pipe
def load_model(base_model_id):
pipe = StableDiffusionXLPipeline.from_single_file(
model_url,
torch_dtype = torch.float16,
variant = "fp16",
vae = vae,
use_safetensors = True,
use_auth_token="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"
)
with gr.Blocks(css=css) as demo:
with gr.Column():
prompt = gr.Textbox(label="Prompt")
negative_prompt = gr.Textbox(label="Negative Prompt")
submit_btn = gr.Button("Generate", elem_classes="btn-green")
with gr.Row():
samp_steps = gr.Slider(1, 50, value=20, step=1, label="Sampling steps")
guide_scale = gr.Slider(1, 6, value=3, step=0.5, label="Guidance scale")
batch_size = gr.Slider(1, 6, value=1, step=1, label="Batch size")
with gr.Row():
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=999999, step=1)
height = gr.Slider(label="Height", value=1024, minimum=512, maximum=2048, step=16)
width = gr.Slider(label="Width", value=1024, minimum=512, maximum=2048, step=16)
gallery = gr.Gallery(label="Generated images", height=800)
submit_btn.click(generate, [prompt, negative_prompt, samp_steps, guide_scale, batch_size, seed, height, width], [gallery], queue=True)
pipe = set_base_model(model_url)
demo.queue(1)
demo.launch(debug=True) |