Update app.py
Browse files
app.py
CHANGED
@@ -31,6 +31,18 @@ footer {
|
|
31 |
}
|
32 |
'''
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
# Functions
|
35 |
def save_image(img, seed):
|
36 |
name = f"{seed}-{uuid.uuid4()}.png"
|
@@ -47,7 +59,7 @@ def get_seed(seed):
|
|
47 |
@spaces.GPU(duration=30)
|
48 |
def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATIVE_INPUT, model=DEFAULT_MODEL, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH, steps=1, guidance=0, number=1, seed=None):
|
49 |
|
50 |
-
repo =
|
51 |
filter_input = filter_input or ""
|
52 |
negative_input = negative_input or DEFAULT_NEGATIVE_INPUT
|
53 |
seed = get_seed(seed)
|
@@ -55,17 +67,9 @@ def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATI
|
|
55 |
print(input, filter_input, negative_input, model, height, width, steps, guidance, number, seed)
|
56 |
|
57 |
if model == "Anime":
|
58 |
-
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
59 |
-
controlnet = ControlNetModel.from_pretrained("MakiPan/controlnet-encoded-hands-130k", torch_dtype=torch.float16)
|
60 |
-
repo = StableDiffusionXLPipeline.from_pretrained("cagliostrolab/animagine-xl-3.1", vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
|
61 |
steps = (not steps or steps < 0 and 24) or steps
|
62 |
guidance = (not guidance or guidance < 0 and 7) or guidance
|
63 |
else:
|
64 |
-
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
65 |
-
controlnet = ControlNetModel.from_pretrained("MakiPan/controlnet-encoded-hands-130k", torch_dtype=torch.float16)
|
66 |
-
repo = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash", vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
|
67 |
-
repo.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
|
68 |
-
repo.set_adapters(["base"], adapter_weights=[0.7])
|
69 |
steps = (not steps or steps < 0 and 16) or steps
|
70 |
guidance = (not guidance or guidance < 0 and 3) or guidance
|
71 |
|
|
|
31 |
}
|
32 |
'''
|
33 |
|
34 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
35 |
+
controlnet = ControlNetModel.from_pretrained("MakiPan/controlnet-encoded-hands-130k", torch_dtype=torch.float16)
|
36 |
+
|
37 |
+
repo_default = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash", vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
|
38 |
+
repo_default.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
|
39 |
+
repo_default.set_adapters(["base"], adapter_weights=[0.7])
|
40 |
+
|
41 |
+
repo_customs = {
|
42 |
+
"Default": repo_default,
|
43 |
+
"Anime": StableDiffusionXLPipeline.from_pretrained("cagliostrolab/animagine-xl-3.1", vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
|
44 |
+
}
|
45 |
+
|
46 |
# Functions
|
47 |
def save_image(img, seed):
|
48 |
name = f"{seed}-{uuid.uuid4()}.png"
|
|
|
59 |
@spaces.GPU(duration=30)
|
60 |
def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATIVE_INPUT, model=DEFAULT_MODEL, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH, steps=1, guidance=0, number=1, seed=None):
|
61 |
|
62 |
+
repo = repo_customs[model or "Default"]
|
63 |
filter_input = filter_input or ""
|
64 |
negative_input = negative_input or DEFAULT_NEGATIVE_INPUT
|
65 |
seed = get_seed(seed)
|
|
|
67 |
print(input, filter_input, negative_input, model, height, width, steps, guidance, number, seed)
|
68 |
|
69 |
if model == "Anime":
|
|
|
|
|
|
|
70 |
steps = (not steps or steps < 0 and 24) or steps
|
71 |
guidance = (not guidance or guidance < 0 and 7) or guidance
|
72 |
else:
|
|
|
|
|
|
|
|
|
|
|
73 |
steps = (not steps or steps < 0 and 16) or steps
|
74 |
guidance = (not guidance or guidance < 0 and 3) or guidance
|
75 |
|