1inkusFace commited on
Commit
02ab915
·
verified ·
1 Parent(s): ee0d019

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -0
app.py CHANGED
@@ -118,8 +118,10 @@ def infer_30(
118
  torch.set_float32_matmul_precision("highest")
119
  seed = random.randint(0, MAX_SEED)
120
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
121
  print('-- generating image --')
122
  sd_image = pipe(
 
123
  prompt=prompt,
124
  prompt_2=prompt,
125
  prompt_3=prompt,
@@ -168,8 +170,10 @@ def infer_60(
168
  torch.set_float32_matmul_precision("highest")
169
  seed = random.randint(0, MAX_SEED)
170
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
171
  print('-- generating image --')
172
  sd_image = pipe(
 
173
  prompt=prompt,
174
  prompt_2=prompt,
175
  prompt_3=prompt,
@@ -180,6 +184,7 @@ def infer_60(
180
  num_inference_steps=num_inference_steps,
181
  width=width,
182
  height=height,
 
183
  generator=generator,
184
  max_sequence_length=512
185
  ).images[0]
@@ -217,8 +222,10 @@ def infer_90(
217
  torch.set_float32_matmul_precision("highest")
218
  seed = random.randint(0, MAX_SEED)
219
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
220
  print('-- generating image --')
221
  sd_image = pipe(
 
222
  prompt=prompt,
223
  prompt_2=prompt,
224
  prompt_3=prompt,
@@ -229,6 +236,7 @@ def infer_90(
229
  num_inference_steps=num_inference_steps,
230
  width=width,
231
  height=height,
 
232
  generator=generator,
233
  max_sequence_length=512
234
  ).images[0]
 
118
  torch.set_float32_matmul_precision("highest")
119
  seed = random.randint(0, MAX_SEED)
120
  generator = torch.Generator(device='cuda').manual_seed(seed)
121
+ input_ids = pipe.tokenizer(prompt, return_tensors="pt").input_ids.to(device)
122
  print('-- generating image --')
123
  sd_image = pipe(
124
+ prompt_embeds = pipe.text_encoder(input_ids)[0], #ensure that the input_ids are on the correct device.
125
  prompt=prompt,
126
  prompt_2=prompt,
127
  prompt_3=prompt,
 
170
  torch.set_float32_matmul_precision("highest")
171
  seed = random.randint(0, MAX_SEED)
172
  generator = torch.Generator(device='cuda').manual_seed(seed)
173
+ input_ids = pipe.tokenizer(prompt, return_tensors="pt").input_ids.to(device)
174
  print('-- generating image --')
175
  sd_image = pipe(
176
+ prompt_embeds = pipe.text_encoder(input_ids)[0], #ensure that the input_ids are on the correct device.
177
  prompt=prompt,
178
  prompt_2=prompt,
179
  prompt_3=prompt,
 
184
  num_inference_steps=num_inference_steps,
185
  width=width,
186
  height=height,
187
+ # cross_attention_kwargs={"scale": 0.75},
188
  generator=generator,
189
  max_sequence_length=512
190
  ).images[0]
 
222
  torch.set_float32_matmul_precision("highest")
223
  seed = random.randint(0, MAX_SEED)
224
  generator = torch.Generator(device='cuda').manual_seed(seed)
225
+ input_ids = pipe.tokenizer(prompt, return_tensors="pt").input_ids.to(device)
226
  print('-- generating image --')
227
  sd_image = pipe(
228
+ prompt_embeds = pipe.text_encoder(input_ids)[0], #ensure that the input_ids are on the correct device.
229
  prompt=prompt,
230
  prompt_2=prompt,
231
  prompt_3=prompt,
 
236
  num_inference_steps=num_inference_steps,
237
  width=width,
238
  height=height,
239
+ # cross_attention_kwargs={"scale": 0.75},
240
  generator=generator,
241
  max_sequence_length=512
242
  ).images[0]