Spaces:
Build error
Build error
Commit
·
8eae9e7
1
Parent(s):
61a91f2
Update app.py
Browse files
app.py
CHANGED
@@ -58,22 +58,27 @@ models = [
|
|
58 |
]
|
59 |
|
60 |
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
62 |
current_model = models[1] if is_colab else models[0]
|
63 |
current_model_path = current_model.path
|
64 |
|
65 |
if is_colab:
|
66 |
-
|
67 |
current_model.path,
|
68 |
-
torch_dtype=torch.
|
69 |
-
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
|
|
70 |
)
|
71 |
|
72 |
-
|
73 |
else:
|
74 |
-
|
75 |
current_model.path,
|
76 |
-
torch_dtype=torch.
|
77 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
78 |
)
|
79 |
|
@@ -129,7 +134,7 @@ def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height
|
|
129 |
if seed == 0:
|
130 |
seed = random.randint(0, 2147483647)
|
131 |
|
132 |
-
generator = torch.Generator('
|
133 |
|
134 |
try:
|
135 |
if img is not None:
|
@@ -154,14 +159,14 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
|
|
154 |
if is_colab or current_model == custom_model:
|
155 |
pipe = StableDiffusionPipeline.from_pretrained(
|
156 |
current_model_path,
|
157 |
-
torch_dtype=torch.
|
158 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
159 |
safety_checker=lambda images, clip_input: (images, False)
|
160 |
)
|
161 |
else:
|
162 |
pipe = StableDiffusionPipeline.from_pretrained(
|
163 |
current_model_path,
|
164 |
-
torch_dtype=torch.
|
165 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
166 |
)
|
167 |
# pipe = pipe.to("cpu")
|
@@ -185,7 +190,7 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
|
|
185 |
callback=pipe_callback)
|
186 |
|
187 |
# update_state(f"Done. Seed: {seed}")
|
188 |
-
|
189 |
return replace_nsfw_images(result)
|
190 |
|
191 |
def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
|
@@ -203,14 +208,14 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
|
|
203 |
if is_colab or current_model == custom_model:
|
204 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
205 |
current_model_path,
|
206 |
-
torch_dtype=torch.
|
207 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
208 |
safety_checker=lambda images, clip_input: (images, False)
|
209 |
)
|
210 |
else:
|
211 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
212 |
current_model_path,
|
213 |
-
torch_dtype=torch.
|
214 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
215 |
)
|
216 |
# pipe = pipe.to("cpu")
|
@@ -238,6 +243,8 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
|
|
238 |
callback=pipe_callback)
|
239 |
|
240 |
# update_state(f"Done. Seed: {seed}")
|
|
|
|
|
241 |
|
242 |
def replace_nsfw_images(results):
|
243 |
|
|
|
58 |
]
|
59 |
|
60 |
|
61 |
+
custom_model = None
|
62 |
+
if is_colab:
|
63 |
+
models.insert(0, Model("Custom model"))
|
64 |
+
custom_model = models[0]
|
65 |
+
|
66 |
+
last_mode = "txt2img"
|
67 |
current_model = models[1] if is_colab else models[0]
|
68 |
current_model_path = current_model.path
|
69 |
|
70 |
if is_colab:
|
71 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
72 |
current_model.path,
|
73 |
+
torch_dtype=torch.float32,
|
74 |
+
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
75 |
+
safety_checker=lambda images, clip_input: (images, False)
|
76 |
)
|
77 |
|
|
|
78 |
else:
|
79 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
80 |
current_model.path,
|
81 |
+
torch_dtype=torch.float32,
|
82 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
83 |
)
|
84 |
|
|
|
134 |
if seed == 0:
|
135 |
seed = random.randint(0, 2147483647)
|
136 |
|
137 |
+
generator = torch.Generator('cpu').manual_seed(seed)
|
138 |
|
139 |
try:
|
140 |
if img is not None:
|
|
|
159 |
if is_colab or current_model == custom_model:
|
160 |
pipe = StableDiffusionPipeline.from_pretrained(
|
161 |
current_model_path,
|
162 |
+
torch_dtype=torch.float32,
|
163 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
164 |
safety_checker=lambda images, clip_input: (images, False)
|
165 |
)
|
166 |
else:
|
167 |
pipe = StableDiffusionPipeline.from_pretrained(
|
168 |
current_model_path,
|
169 |
+
torch_dtype=torch.float32,
|
170 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
171 |
)
|
172 |
# pipe = pipe.to("cpu")
|
|
|
190 |
callback=pipe_callback)
|
191 |
|
192 |
# update_state(f"Done. Seed: {seed}")
|
193 |
+
|
194 |
return replace_nsfw_images(result)
|
195 |
|
196 |
def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
|
|
|
208 |
if is_colab or current_model == custom_model:
|
209 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
210 |
current_model_path,
|
211 |
+
torch_dtype=torch.float32,
|
212 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
213 |
safety_checker=lambda images, clip_input: (images, False)
|
214 |
)
|
215 |
else:
|
216 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
217 |
current_model_path,
|
218 |
+
torch_dtype=torch.float32,
|
219 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
220 |
)
|
221 |
# pipe = pipe.to("cpu")
|
|
|
243 |
callback=pipe_callback)
|
244 |
|
245 |
# update_state(f"Done. Seed: {seed}")
|
246 |
+
|
247 |
+
return replace_nsfw_images(result)
|
248 |
|
249 |
def replace_nsfw_images(results):
|
250 |
|