Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -7,9 +7,6 @@ import os
|
|
7 |
from huggingface_hub import hf_hub_download
|
8 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
|
9 |
from compel import Compel, ReturnedEmbeddingsType
|
10 |
-
from PIL import Image, PngImagePlugin
|
11 |
-
import json
|
12 |
-
import io
|
13 |
|
14 |
# =====================================
|
15 |
# Prompt weights
|
@@ -215,25 +212,6 @@ def get_embed_new(prompt, pipeline, compel, only_convert_string=False, compel_pr
|
|
215 |
|
216 |
return merge_embeds([prompt_attention_to_invoke_prompt(i) for i in global_prompt_chanks], compel)
|
217 |
|
218 |
-
# Add metadata to the image
|
219 |
-
def add_metadata_to_image(image, metadata):
|
220 |
-
metadata_str = json.dumps(metadata)
|
221 |
-
|
222 |
-
# Convert PIL Image to PNG with metadata
|
223 |
-
img_with_metadata = image.copy()
|
224 |
-
|
225 |
-
# Create a PngInfo object and add metadata
|
226 |
-
png_info = PngImagePlugin.PngInfo()
|
227 |
-
png_info.add_text("parameters", metadata_str)
|
228 |
-
|
229 |
-
# Save to a byte buffer with metadata
|
230 |
-
buffer = io.BytesIO()
|
231 |
-
img_with_metadata.save(buffer, format="PNG", pnginfo=png_info)
|
232 |
-
|
233 |
-
# Reopen from buffer to get the image with metadata
|
234 |
-
buffer.seek(0)
|
235 |
-
return Image.open(buffer)
|
236 |
-
|
237 |
def add_comma_after_pattern_ti(text):
|
238 |
pattern = re.compile(r'\b\w+_\d+\b')
|
239 |
modified_text = pattern.sub(lambda x: x.group() + ',', text)
|
@@ -246,6 +224,7 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
246 |
MAX_IMAGE_SIZE = 2048
|
247 |
|
248 |
if torch.cuda.is_available():
|
|
|
249 |
token = os.environ.get("HF_TOKEN") # 从环境变量读取令牌
|
250 |
model_path = hf_hub_download(
|
251 |
repo_id="Menyu/Pixel", # 模型仓库名称(非完整URL)
|
@@ -254,6 +233,7 @@ if torch.cuda.is_available():
|
|
254 |
)
|
255 |
pipe = StableDiffusionXLPipeline.from_single_file(
|
256 |
model_path,
|
|
|
257 |
use_safetensors=True,
|
258 |
torch_dtype=torch.float16,
|
259 |
)
|
@@ -292,8 +272,6 @@ def infer(
|
|
292 |
# 在 infer 函数中调用 get_embed_new
|
293 |
if not use_negative_prompt:
|
294 |
negative_prompt = ""
|
295 |
-
|
296 |
-
original_prompt = prompt # Store original prompt for metadata
|
297 |
prompt = get_embed_new(prompt, pipe, compel, only_convert_string=True)
|
298 |
negative_prompt = get_embed_new(negative_prompt, pipe, compel, only_convert_string=True)
|
299 |
conditioning, pooled = compel([prompt, negative_prompt]) # 必须同时处理来保证长度相等
|
@@ -311,25 +289,7 @@ def infer(
|
|
311 |
generator=generator,
|
312 |
use_resolution_binning=use_resolution_binning,
|
313 |
).images[0]
|
314 |
-
|
315 |
-
# Create metadata dictionary
|
316 |
-
metadata = {
|
317 |
-
"prompt": original_prompt,
|
318 |
-
"processed_prompt": prompt,
|
319 |
-
"negative_prompt": negative_prompt,
|
320 |
-
"seed": seed,
|
321 |
-
"width": width,
|
322 |
-
"height": height,
|
323 |
-
"guidance_scale": guidance_scale,
|
324 |
-
"num_inference_steps": num_inference_steps,
|
325 |
-
"model": "MiaoMiaoPixel_V1.0",
|
326 |
-
"use_resolution_binning": use_resolution_binning,
|
327 |
-
"PreUrl": "https://huggingface.co/spaces/Menyu/MiaoPixel"
|
328 |
-
}
|
329 |
-
# Add metadata to the image
|
330 |
-
image_with_metadata = add_metadata_to_image(image, metadata)
|
331 |
-
|
332 |
-
return image_with_metadata, seed
|
333 |
|
334 |
examples = [
|
335 |
"nahida (genshin impact)",
|
|
|
7 |
from huggingface_hub import hf_hub_download
|
8 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
|
9 |
from compel import Compel, ReturnedEmbeddingsType
|
|
|
|
|
|
|
10 |
|
11 |
# =====================================
|
12 |
# Prompt weights
|
|
|
212 |
|
213 |
return merge_embeds([prompt_attention_to_invoke_prompt(i) for i in global_prompt_chanks], compel)
|
214 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
def add_comma_after_pattern_ti(text):
|
216 |
pattern = re.compile(r'\b\w+_\d+\b')
|
217 |
modified_text = pattern.sub(lambda x: x.group() + ',', text)
|
|
|
224 |
MAX_IMAGE_SIZE = 2048
|
225 |
|
226 |
if torch.cuda.is_available():
|
227 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
228 |
token = os.environ.get("HF_TOKEN") # 从环境变量读取令牌
|
229 |
model_path = hf_hub_download(
|
230 |
repo_id="Menyu/Pixel", # 模型仓库名称(非完整URL)
|
|
|
233 |
)
|
234 |
pipe = StableDiffusionXLPipeline.from_single_file(
|
235 |
model_path,
|
236 |
+
vae=vae,
|
237 |
use_safetensors=True,
|
238 |
torch_dtype=torch.float16,
|
239 |
)
|
|
|
272 |
# 在 infer 函数中调用 get_embed_new
|
273 |
if not use_negative_prompt:
|
274 |
negative_prompt = ""
|
|
|
|
|
275 |
prompt = get_embed_new(prompt, pipe, compel, only_convert_string=True)
|
276 |
negative_prompt = get_embed_new(negative_prompt, pipe, compel, only_convert_string=True)
|
277 |
conditioning, pooled = compel([prompt, negative_prompt]) # 必须同时处理来保证长度相等
|
|
|
289 |
generator=generator,
|
290 |
use_resolution_binning=use_resolution_binning,
|
291 |
).images[0]
|
292 |
+
return image, seed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
293 |
|
294 |
examples = [
|
295 |
"nahida (genshin impact)",
|