1inkusFace commited on
Commit
cb1fed0
·
verified ·
1 Parent(s): 70e7ee7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -242,13 +242,14 @@ def captioning(img):
242
  output_prompt=[]
243
  # Initial caption generation without a prompt:
244
  inputsa = processor5(images=img, return_tensors="pt").to('cuda')
245
- generated_ids = model5.generate(**inputsa, min_length=32, max_length=64)
246
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
 
247
  print(generated_text)
248
  # Loop through prompts array:
249
  for prompt in prompts_array:
250
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
251
- generated_ids = model5.generate(**inputs, max_length=64) # Adjust max_length if needed
252
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
253
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
254
  output_prompt.append(response_text)
@@ -259,7 +260,7 @@ def captioning(img):
259
  # generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
260
  # response_text = generated_text.replace(generated_text, "").strip() # Remove the previous text plus 'So therefore'
261
  # print(response_text)
262
- output_prompt.append(response_text)
263
  print(output_prompt)
264
  return output_prompt
265
  '''
@@ -394,6 +395,7 @@ def generate_30(
394
  filename= f'rv_IP_{timestamp}.png'
395
  print("-- using image file --")
396
  print(caption)
 
397
  print("-- generating further caption --")
398
 
399
  #expand_prompt(prompt)
 
242
  output_prompt=[]
243
  # Initial caption generation without a prompt:
244
  inputsa = processor5(images=img, return_tensors="pt").to('cuda')
245
+ generated_ids = model5.generate(**inputsa, min_length=24, max_length=42)
246
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
247
+ output_prompt.append.(generated_text)
248
  print(generated_text)
249
  # Loop through prompts array:
250
  for prompt in prompts_array:
251
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
252
+ generated_ids = model5.generate(**inputs, max_length=42) # Adjust max_length if needed
253
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
254
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
255
  output_prompt.append(response_text)
 
260
  # generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
261
  # response_text = generated_text.replace(generated_text, "").strip() # Remove the previous text plus 'So therefore'
262
  # print(response_text)
263
+ #output_prompt.append(response_text)
264
  print(output_prompt)
265
  return output_prompt
266
  '''
 
395
  filename= f'rv_IP_{timestamp}.png'
396
  print("-- using image file --")
397
  print(caption)
398
+ print(caption_2)
399
  print("-- generating further caption --")
400
 
401
  #expand_prompt(prompt)