1inkusFace commited on
Commit
a051af2
·
verified ·
1 Parent(s): 86d6411

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -242,20 +242,20 @@ def captioning(img):
242
  output_prompt=[]
243
  # Initial caption generation without a prompt:
244
  inputsa = processor5(images=img, return_tensors="pt").to('cuda')
245
- generated_ids = model5.generate(**inputsa, min_length=32, max_length=64)
246
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
247
  print(generated_text)
248
  # Loop through prompts array:
249
  for prompt in prompts_array:
250
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
251
- generated_ids = model5.generate(**inputs, min_length=32, max_length=64) # Adjust max_length if needed
252
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
253
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
254
  output_prompt.append(response_text)
255
  print(f"{response_text}\n") # Print only the response text
256
  # Continue conversation:
257
  inputf = processor5(images=img, text=generated_text + 'So therefore', return_tensors="pt").to('cuda')
258
- generated_ids = model5.generate(**inputf, min_length=32, max_length=64)
259
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
260
  response_text = generated_text.replace(generated_text, "").strip() # Remove the previous text plus 'So therefore'
261
  print(response_text)
 
242
  output_prompt=[]
243
  # Initial caption generation without a prompt:
244
  inputsa = processor5(images=img, return_tensors="pt").to('cuda')
245
+ generated_ids = model5.generate(**inputsa, min_length=24, max_length=42)
246
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
247
  print(generated_text)
248
  # Loop through prompts array:
249
  for prompt in prompts_array:
250
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
251
+ generated_ids = model5.generate(**inputs, min_length=16, max_length=32) # Adjust max_length if needed
252
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
253
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
254
  output_prompt.append(response_text)
255
  print(f"{response_text}\n") # Print only the response text
256
  # Continue conversation:
257
  inputf = processor5(images=img, text=generated_text + 'So therefore', return_tensors="pt").to('cuda')
258
+ generated_ids = model5.generate(**inputf, min_length=24, max_length=42)
259
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
260
  response_text = generated_text.replace(generated_text, "").strip() # Remove the previous text plus 'So therefore'
261
  print(response_text)