1inkusFace commited on
Commit
cea1531
·
verified ·
1 Parent(s): 1573efb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -256,22 +256,22 @@ def generate_60(
256
  pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
257
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
258
 
259
- text_inputs1 = tokenizer(
260
  prompt,
261
  padding="max_length",
262
  max_length=77,
263
  truncation=True,
264
  return_tensors="pt",
265
  )
266
- text_inputs2 = tokenizer(
267
  prompt2,
268
  padding="max_length",
269
  max_length=77,
270
  truncation=True,
271
  return_tensors="pt",
272
  )
273
- prompt_embedsa = text_encoder(text_input_ids.to(device), output_hidden_states=True)
274
- prompt_embedsb = text_encoder(text_input_ids.to(device), output_hidden_states=True)
275
  prompt_embeds = torch.cat([prompt_embedsa,prompt_embedsb]).mean(dim=-1)
276
 
277
  options = {
@@ -319,22 +319,22 @@ def generate_90(
319
  pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
320
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
321
 
322
- text_inputs1 = tokenizer(
323
  prompt,
324
  padding="max_length",
325
  max_length=77,
326
  truncation=True,
327
  return_tensors="pt",
328
  )
329
- text_inputs2 = tokenizer(
330
  prompt2,
331
  padding="max_length",
332
  max_length=77,
333
  truncation=True,
334
  return_tensors="pt",
335
  )
336
- prompt_embedsa = text_encoder(text_input_ids.to(device), output_hidden_states=True)
337
- prompt_embedsb = text_encoder(text_input_ids.to(device), output_hidden_states=True)
338
  prompt_embeds = torch.cat([prompt_embedsa,prompt_embedsb]).mean(dim=-1)
339
 
340
  options = {
 
256
  pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
257
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
258
 
259
+ text_inputs1 = pipe.tokenizer(
260
  prompt,
261
  padding="max_length",
262
  max_length=77,
263
  truncation=True,
264
  return_tensors="pt",
265
  )
266
+ text_inputs2 = pipe.tokenizer(
267
  prompt2,
268
  padding="max_length",
269
  max_length=77,
270
  truncation=True,
271
  return_tensors="pt",
272
  )
273
+ prompt_embedsa = pipe.text_encoder(text_input_ids.to(device), output_hidden_states=True)
274
+ prompt_embedsb = pipe.text_encoder(text_input_ids.to(device), output_hidden_states=True)
275
  prompt_embeds = torch.cat([prompt_embedsa,prompt_embedsb]).mean(dim=-1)
276
 
277
  options = {
 
319
  pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
320
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
321
 
322
+ text_inputs1 = pipe.tokenizer(
323
  prompt,
324
  padding="max_length",
325
  max_length=77,
326
  truncation=True,
327
  return_tensors="pt",
328
  )
329
+ text_inputs2 = pipe.tokenizer(
330
  prompt2,
331
  padding="max_length",
332
  max_length=77,
333
  truncation=True,
334
  return_tensors="pt",
335
  )
336
+ prompt_embedsa = pipe.text_encoder(text_input_ids.to(device), output_hidden_states=True)
337
+ prompt_embedsb = pipe.text_encoder(text_input_ids.to(device), output_hidden_states=True)
338
  prompt_embeds = torch.cat([prompt_embedsa,prompt_embedsb]).mean(dim=-1)
339
 
340
  options = {