Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -148,7 +148,7 @@ def infer(
|
|
148 |
outputs = model.generate(
|
149 |
input_ids=input_ids,
|
150 |
attention_mask=attention_mask,
|
151 |
-
max_new_tokens=
|
152 |
temperature=0.2,
|
153 |
top_p=0.9,
|
154 |
do_sample=True,
|
@@ -201,9 +201,9 @@ def infer(
|
|
201 |
print('-- generating image --')
|
202 |
#with torch.no_grad():
|
203 |
sd_image = pipe(
|
204 |
-
prompt=
|
205 |
prompt_2=enhanced_prompt_2,
|
206 |
-
prompt_3=
|
207 |
negative_prompt=negative_prompt,
|
208 |
guidance_scale=guidance_scale,
|
209 |
num_inference_steps=num_inference_steps,
|
@@ -211,6 +211,7 @@ def infer(
|
|
211 |
height=height,
|
212 |
# latents=None,
|
213 |
generator=generator,
|
|
|
214 |
).images[0]
|
215 |
print('-- got image --')
|
216 |
image_path = f"sd35m_{seed}.png"
|
|
|
148 |
outputs = model.generate(
|
149 |
input_ids=input_ids,
|
150 |
attention_mask=attention_mask,
|
151 |
+
max_new_tokens=1024,
|
152 |
temperature=0.2,
|
153 |
top_p=0.9,
|
154 |
do_sample=True,
|
|
|
201 |
print('-- generating image --')
|
202 |
#with torch.no_grad():
|
203 |
sd_image = pipe(
|
204 |
+
prompt=prompt, # This conversion is fine
|
205 |
prompt_2=enhanced_prompt_2,
|
206 |
+
prompt_3=enhanced_prompt,
|
207 |
negative_prompt=negative_prompt,
|
208 |
guidance_scale=guidance_scale,
|
209 |
num_inference_steps=num_inference_steps,
|
|
|
211 |
height=height,
|
212 |
# latents=None,
|
213 |
generator=generator,
|
214 |
+
max_sequence_length=1024
|
215 |
).images[0]
|
216 |
print('-- got image --')
|
217 |
image_path = f"sd35m_{seed}.png"
|