1inkusFace commited on
Commit
805bb63
Β·
verified Β·
1 Parent(s): a77e94c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -252,14 +252,14 @@ def captioning(img):
252
  output_prompt=[]
253
  # Initial caption generation without a prompt:
254
  inputsa = processor5(images=img, return_tensors="pt").to('cuda')
255
- generated_ids = model5.generate(**inputsa, min_length=64, max_length=256)
256
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
257
  output_prompt.append(generated_text)
258
  print(generated_text)
259
  # Loop through prompts array:
260
  for prompt in prompts_array:
261
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
262
- generated_ids = model5.generate(**inputs, min_length=32, max_length=96) # Adjust max_length if needed
263
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
264
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
265
  output_prompt.append(response_text)
@@ -300,7 +300,7 @@ def expand_prompt(prompt):
300
  outputs = model.generate(
301
  input_ids=input_ids,
302
  attention_mask=attention_mask,
303
- max_new_tokens=1024,
304
  temperature=0.2,
305
  top_p=0.9,
306
  do_sample=True,
 
252
  output_prompt=[]
253
  # Initial caption generation without a prompt:
254
  inputsa = processor5(images=img, return_tensors="pt").to('cuda')
255
+ generated_ids = model5.generate(**inputsa, min_length=64, max_length=96)
256
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
257
  output_prompt.append(generated_text)
258
  print(generated_text)
259
  # Loop through prompts array:
260
  for prompt in prompts_array:
261
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
262
+ generated_ids = model5.generate(**inputs, min_length=42, max_length=64) # Adjust max_length if needed
263
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
264
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
265
  output_prompt.append(response_text)
 
300
  outputs = model.generate(
301
  input_ids=input_ids,
302
  attention_mask=attention_mask,
303
+ max_new_tokens=128,
304
  temperature=0.2,
305
  top_p=0.9,
306
  do_sample=True,