Spaces:
Running
on
Zero
Running
on
Zero
调整项目结构
Browse files- __pycache__/config.cpython-310.pyc +0 -0
- __pycache__/utils.cpython-310.pyc +0 -0
- app.py +14 -15
- config.py +13 -0
__pycache__/config.cpython-310.pyc
ADDED
Binary file (551 Bytes). View file
|
|
__pycache__/utils.cpython-310.pyc
CHANGED
Binary files a/__pycache__/utils.cpython-310.pyc and b/__pycache__/utils.cpython-310.pyc differ
|
|
app.py
CHANGED
@@ -6,10 +6,17 @@ import random
|
|
6 |
import logging
|
7 |
import utils
|
8 |
from diffusers.models import AutoencoderKL
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
MAX_SEED = np.iinfo(np.int32).max
|
11 |
-
|
12 |
-
MAX_IMAGE_SIZE = 2048
|
13 |
|
14 |
# Enhanced logging configuration
|
15 |
logging.basicConfig(
|
@@ -35,11 +42,11 @@ if torch.cuda.is_available():
|
|
35 |
"madebyollin/sdxl-vae-fp16-fix",
|
36 |
torch_dtype=torch.float16,
|
37 |
)
|
38 |
-
pipe = utils.load_pipeline(
|
39 |
logger.info("Pipeline loaded successfully on GPU!")
|
40 |
except Exception as e:
|
41 |
logger.error(f"Error loading VAE, falling back to default: {e}")
|
42 |
-
pipe = utils.load_pipeline(
|
43 |
else:
|
44 |
logger.warning("CUDA not available, running on CPU")
|
45 |
pipe = None
|
@@ -82,15 +89,6 @@ def generate(
|
|
82 |
|
83 |
|
84 |
|
85 |
-
scheduler_list = [
|
86 |
-
"DPM++ 2M Karras",
|
87 |
-
"DPM++ SDE Karras",
|
88 |
-
"DPM++ 2M SDE Karras",
|
89 |
-
"Euler",
|
90 |
-
"Euler a",
|
91 |
-
"DDIM"
|
92 |
-
]
|
93 |
-
|
94 |
|
95 |
title = "# Animagine XL 4.0 Demo"
|
96 |
|
@@ -120,13 +118,14 @@ with gr.Blocks(css=custom_css).queue() as demo:
|
|
120 |
with gr.Column():
|
121 |
prompt = gr.Text(
|
122 |
label="Prompt",
|
123 |
-
max_lines=
|
124 |
placeholder="Enter your prompt",
|
125 |
)
|
126 |
negative_prompt = gr.Text(
|
127 |
label="Negative prompt",
|
128 |
-
max_lines=
|
129 |
placeholder="Enter a negative prompt",
|
|
|
130 |
)
|
131 |
with gr.Row():
|
132 |
width = gr.Slider(
|
|
|
6 |
import logging
|
7 |
import utils
|
8 |
from diffusers.models import AutoencoderKL
|
9 |
+
from config import (
|
10 |
+
MODEL,
|
11 |
+
MIN_IMAGE_SIZE,
|
12 |
+
MAX_IMAGE_SIZE,
|
13 |
+
DEFAULT_NEGATIVE_PROMPT,
|
14 |
+
scheduler_list,
|
15 |
+
)
|
16 |
+
|
17 |
|
18 |
MAX_SEED = np.iinfo(np.int32).max
|
19 |
+
|
|
|
20 |
|
21 |
# Enhanced logging configuration
|
22 |
logging.basicConfig(
|
|
|
42 |
"madebyollin/sdxl-vae-fp16-fix",
|
43 |
torch_dtype=torch.float16,
|
44 |
)
|
45 |
+
pipe = utils.load_pipeline(MODEL, device, vae=vae)
|
46 |
logger.info("Pipeline loaded successfully on GPU!")
|
47 |
except Exception as e:
|
48 |
logger.error(f"Error loading VAE, falling back to default: {e}")
|
49 |
+
pipe = utils.load_pipeline(MODEL, device)
|
50 |
else:
|
51 |
logger.warning("CUDA not available, running on CPU")
|
52 |
pipe = None
|
|
|
89 |
|
90 |
|
91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
title = "# Animagine XL 4.0 Demo"
|
94 |
|
|
|
118 |
with gr.Column():
|
119 |
prompt = gr.Text(
|
120 |
label="Prompt",
|
121 |
+
max_lines=5,
|
122 |
placeholder="Enter your prompt",
|
123 |
)
|
124 |
negative_prompt = gr.Text(
|
125 |
label="Negative prompt",
|
126 |
+
max_lines=5,
|
127 |
placeholder="Enter a negative prompt",
|
128 |
+
value=DEFAULT_NEGATIVE_PROMPT,
|
129 |
)
|
130 |
with gr.Row():
|
131 |
width = gr.Slider(
|
config.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MODEL = "cagliostrolab/animagine-xl-4.0"
|
2 |
+
DEFAULT_NEGATIVE_PROMPT = "lowres, bad anatomy, bad hands, text, error, missing finger, extra digits, fewer digits, cropped, worst quality, low quality, low score, bad score, average score, signature, watermark, username, blurry"
|
3 |
+
MIN_IMAGE_SIZE = 512
|
4 |
+
MAX_IMAGE_SIZE = 2048
|
5 |
+
|
6 |
+
scheduler_list = [
|
7 |
+
"DPM++ 2M Karras",
|
8 |
+
"DPM++ SDE Karras",
|
9 |
+
"DPM++ 2M SDE Karras",
|
10 |
+
"Euler",
|
11 |
+
"Euler a",
|
12 |
+
"DDIM"
|
13 |
+
]
|