Update app.py
Browse files
app.py
CHANGED
|
@@ -19,19 +19,10 @@ print(f"[SYSTEM] | Using {DEVICE} type compute device.")
|
|
| 19 |
MAX_SEED = 9007199254740991
|
| 20 |
DEFAULT_INPUT = ""
|
| 21 |
DEFAULT_NEGATIVE_INPUT = "EasyNegative, deformed, distorted, disfigured, disconnected, disgusting, mutation, mutated, blur, blurry, scribble, abstract, watermark, ugly, amputation, limb, limbs, leg, legs, foot, feet, toe, toes, arm, arms, hand, hands, finger, fingers, head, heads, exposed, porn, nude, nudity, naked, nsfw"
|
|
|
|
| 22 |
DEFAULT_HEIGHT = 1024
|
| 23 |
DEFAULT_WIDTH = 1024
|
| 24 |
|
| 25 |
-
REPO = "hsalf-lxds/ytinummoc-ds"[::-1]
|
| 26 |
-
|
| 27 |
-
vae = AutoencoderKL.from_pretrained("xif-61pf-eav-lxds/nilloybedam"[::-1], torch_dtype=torch.float16)
|
| 28 |
-
controlnet = ControlNetModel.from_pretrained("k031-sdnah-dedocne-tenlortnoc/naPikaM"[::-1], torch_dtype=torch.float16)
|
| 29 |
-
|
| 30 |
-
model = StableDiffusionXLPipeline.from_pretrained(REPO, vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
|
| 31 |
-
model.load_lora_weights("2v-lx-3-ellad/urofotsirhe"[::-1], adapter_name="base")
|
| 32 |
-
model.set_adapters(["base"], adapter_weights=[0.7])
|
| 33 |
-
model.to(DEVICE)
|
| 34 |
-
|
| 35 |
css = '''
|
| 36 |
.gradio-container{max-width: 560px !important}
|
| 37 |
h1{text-align:center}
|
|
@@ -54,13 +45,30 @@ def get_seed(seed):
|
|
| 54 |
return random.randint(0, MAX_SEED)
|
| 55 |
|
| 56 |
@spaces.GPU(duration=30)
|
| 57 |
-
def generate(input=DEFAULT_INPUT, negative_input=DEFAULT_NEGATIVE_INPUT, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH, steps=1, guidance=0, number=1, seed=None):
|
| 58 |
-
|
|
|
|
| 59 |
seed = get_seed(seed)
|
| 60 |
|
| 61 |
-
print(input, negative_input, height, width, steps, guidance, number, seed)
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
model.to(DEVICE)
|
|
|
|
| 64 |
parameters = {
|
| 65 |
"prompt": input,
|
| 66 |
"negative_prompt": negative_input,
|
|
@@ -93,6 +101,7 @@ with gr.Blocks(css=css) as main:
|
|
| 93 |
with gr.Column():
|
| 94 |
input = gr.Textbox(lines=1, value=DEFAULT_INPUT, label="Input")
|
| 95 |
negative_input = gr.Textbox(lines=1, value=DEFAULT_NEGATIVE_INPUT, label="Input Negative")
|
|
|
|
| 96 |
height = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_HEIGHT, label="Height")
|
| 97 |
width = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_WIDTH, label="Width")
|
| 98 |
steps = gr.Slider(minimum=0, maximum=100, step=1, value=16, label="Steps")
|
|
@@ -105,7 +114,7 @@ with gr.Blocks(css=css) as main:
|
|
| 105 |
with gr.Column():
|
| 106 |
images = gr.Gallery(columns=1, label="Image")
|
| 107 |
|
| 108 |
-
submit.click(generate, inputs=[input, negative_input, height, width, steps, guidance, number, seed], outputs=[images], queue=False)
|
| 109 |
maintain.click(cloud, inputs=[], outputs=[], queue=False)
|
| 110 |
|
| 111 |
main.launch(show_api=True)
|
|
|
|
| 19 |
MAX_SEED = 9007199254740991
|
| 20 |
DEFAULT_INPUT = ""
|
| 21 |
DEFAULT_NEGATIVE_INPUT = "EasyNegative, deformed, distorted, disfigured, disconnected, disgusting, mutation, mutated, blur, blurry, scribble, abstract, watermark, ugly, amputation, limb, limbs, leg, legs, foot, feet, toe, toes, arm, arms, hand, hands, finger, fingers, head, heads, exposed, porn, nude, nudity, naked, nsfw"
|
| 22 |
+
DEFAULT_MODEL = "None"
|
| 23 |
DEFAULT_HEIGHT = 1024
|
| 24 |
DEFAULT_WIDTH = 1024
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
css = '''
|
| 27 |
.gradio-container{max-width: 560px !important}
|
| 28 |
h1{text-align:center}
|
|
|
|
| 45 |
return random.randint(0, MAX_SEED)
|
| 46 |
|
| 47 |
@spaces.GPU(duration=30)
|
| 48 |
+
def generate(input=DEFAULT_INPUT, negative_input=DEFAULT_NEGATIVE_INPUT, model=DEFAULT_MODEL, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH, steps=1, guidance=0, number=1, seed=None):
|
| 49 |
+
|
| 50 |
+
repo = None
|
| 51 |
seed = get_seed(seed)
|
| 52 |
|
| 53 |
+
print(input, negative_input, model, height, width, steps, guidance, number, seed)
|
| 54 |
|
| 55 |
+
if model == "Anime":
|
| 56 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
| 57 |
+
controlnet = ControlNetModel.from_pretrained("MakiPan/controlnet-encoded-hands-130k", torch_dtype=torch.float16)
|
| 58 |
+
repo = StableDiffusionXLPipeline.from_pretrained(cagliostrolab/animagine-xl-3.1, vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
|
| 59 |
+
steps = steps or 16
|
| 60 |
+
guidance = guidance or 7
|
| 61 |
+
else:
|
| 62 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
| 63 |
+
controlnet = ControlNetModel.from_pretrained("MakiPan/controlnet-encoded-hands-130k", torch_dtype=torch.float16)
|
| 64 |
+
repo = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash", vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
|
| 65 |
+
repo.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
|
| 66 |
+
repo.set_adapters(["base"], adapter_weights=[0.7])
|
| 67 |
+
steps = steps or 16
|
| 68 |
+
guidance = guidance or 3
|
| 69 |
+
|
| 70 |
model.to(DEVICE)
|
| 71 |
+
|
| 72 |
parameters = {
|
| 73 |
"prompt": input,
|
| 74 |
"negative_prompt": negative_input,
|
|
|
|
| 101 |
with gr.Column():
|
| 102 |
input = gr.Textbox(lines=1, value=DEFAULT_INPUT, label="Input")
|
| 103 |
negative_input = gr.Textbox(lines=1, value=DEFAULT_NEGATIVE_INPUT, label="Input Negative")
|
| 104 |
+
model = gr.Dropdown(label="Models", choices=["Default", "Anime"], value=["Default"])
|
| 105 |
height = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_HEIGHT, label="Height")
|
| 106 |
width = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_WIDTH, label="Width")
|
| 107 |
steps = gr.Slider(minimum=0, maximum=100, step=1, value=16, label="Steps")
|
|
|
|
| 114 |
with gr.Column():
|
| 115 |
images = gr.Gallery(columns=1, label="Image")
|
| 116 |
|
| 117 |
+
submit.click(generate, inputs=[input, negative_input, model, height, width, steps, guidance, number, seed], outputs=[images], queue=False)
|
| 118 |
maintain.click(cloud, inputs=[], outputs=[], queue=False)
|
| 119 |
|
| 120 |
main.launch(show_api=True)
|