Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,6 @@ import os
|
|
11 |
|
12 |
from diffusers import DiffusionPipeline
|
13 |
from transformers import pipeline
|
14 |
-
# from peft import PeftModel
|
15 |
from PIL import Image
|
16 |
|
17 |
# Pre-Initialize
|
@@ -43,6 +42,8 @@ footer {
|
|
43 |
repo_nsfw_classifier = pipeline("image-classification", model="Falconsai/nsfw_image_detection")
|
44 |
|
45 |
repo_default = DiffusionPipeline.from_pretrained("fluently/Fluently-XL-Final", device=DEVICE, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
|
|
|
|
|
46 |
|
47 |
# repo_large = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, add_watermarker=False, revision="refs/pr/1")
|
48 |
# repo_large.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
|
@@ -50,9 +51,9 @@ repo_default = DiffusionPipeline.from_pretrained("fluently/Fluently-XL-Final", d
|
|
50 |
|
51 |
repo_customs = {
|
52 |
"Default": repo_default,
|
53 |
-
"Realistic": DiffusionPipeline.from_pretrained("ehristoforu/Visionix-alpha", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
|
54 |
-
"Anime": DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.1", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
|
55 |
-
"Pixel": repo_default,
|
56 |
# "Large": repo_neo,
|
57 |
}
|
58 |
|
@@ -100,9 +101,6 @@ def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATI
|
|
100 |
else:
|
101 |
steps_set = 25
|
102 |
guidance_set = 7
|
103 |
-
|
104 |
-
repo.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
|
105 |
-
repo.set_adapters(["base"], adapter_weights=[0.7])
|
106 |
|
107 |
if not steps:
|
108 |
steps = steps_set
|
|
|
11 |
|
12 |
from diffusers import DiffusionPipeline
|
13 |
from transformers import pipeline
|
|
|
14 |
from PIL import Image
|
15 |
|
16 |
# Pre-Initialize
|
|
|
42 |
repo_nsfw_classifier = pipeline("image-classification", model="Falconsai/nsfw_image_detection")
|
43 |
|
44 |
repo_default = DiffusionPipeline.from_pretrained("fluently/Fluently-XL-Final", device=DEVICE, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
|
45 |
+
repo_default.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
|
46 |
+
repo_default.set_adapters(["base"], adapter_weights=[0.7])
|
47 |
|
48 |
# repo_large = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, add_watermarker=False, revision="refs/pr/1")
|
49 |
# repo_large.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
|
|
|
51 |
|
52 |
repo_customs = {
|
53 |
"Default": repo_default,
|
54 |
+
# "Realistic": DiffusionPipeline.from_pretrained("ehristoforu/Visionix-alpha", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
|
55 |
+
# "Anime": DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.1", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
|
56 |
+
# "Pixel": repo_default,
|
57 |
# "Large": repo_neo,
|
58 |
}
|
59 |
|
|
|
101 |
else:
|
102 |
steps_set = 25
|
103 |
guidance_set = 7
|
|
|
|
|
|
|
104 |
|
105 |
if not steps:
|
106 |
steps = steps_set
|