Update app.py
Browse files
app.py
CHANGED
|
@@ -49,7 +49,7 @@ def infer(
|
|
| 49 |
progress=gr.Progress(track_tqdm=True),
|
| 50 |
):
|
| 51 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 52 |
-
generator = torch.Generator(
|
| 53 |
|
| 54 |
# 使用 get_weighted_text_embeddings_sdxl 获取文本嵌入,不传递 device 参数
|
| 55 |
if use_negative_prompt and negative_prompt:
|
|
@@ -79,14 +79,11 @@ def infer(
|
|
| 79 |
image = pipe(
|
| 80 |
prompt_embeds=prompt_embeds,
|
| 81 |
negative_prompt_embeds=prompt_neg_embeds,
|
| 82 |
-
pooled_prompt_embeds = pooled_prompt_embeds,
|
| 83 |
-
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds,
|
| 84 |
width=width,
|
| 85 |
height=height,
|
| 86 |
guidance_scale=guidance_scale,
|
| 87 |
num_inference_steps=num_inference_steps,
|
| 88 |
generator=generator,
|
| 89 |
-
use_resolution_binning=use_resolution_binning,
|
| 90 |
).images[0]
|
| 91 |
return image, seed
|
| 92 |
|
|
|
|
| 49 |
progress=gr.Progress(track_tqdm=True),
|
| 50 |
):
|
| 51 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 52 |
+
generator = torch.Generator().manual_seed(seed)
|
| 53 |
|
| 54 |
# 使用 get_weighted_text_embeddings_sdxl 获取文本嵌入,不传递 device 参数
|
| 55 |
if use_negative_prompt and negative_prompt:
|
|
|
|
| 79 |
image = pipe(
|
| 80 |
prompt_embeds=prompt_embeds,
|
| 81 |
negative_prompt_embeds=prompt_neg_embeds,
|
|
|
|
|
|
|
| 82 |
width=width,
|
| 83 |
height=height,
|
| 84 |
guidance_scale=guidance_scale,
|
| 85 |
num_inference_steps=num_inference_steps,
|
| 86 |
generator=generator,
|
|
|
|
| 87 |
).images[0]
|
| 88 |
return image, seed
|
| 89 |
|