Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -246,14 +246,14 @@ def captioning(img):
|
|
246 |
output_prompt=[]
|
247 |
# Initial caption generation without a prompt:
|
248 |
inputsa = processor5(images=img, return_tensors="pt").to('cuda')
|
249 |
-
generated_ids = model5.generate(**inputsa, min_length=
|
250 |
generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
251 |
output_prompt.append(generated_text)
|
252 |
print(generated_text)
|
253 |
# Loop through prompts array:
|
254 |
for prompt in prompts_array:
|
255 |
inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
|
256 |
-
generated_ids = model5.generate(**inputs) # Adjust max_length if needed
|
257 |
generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
258 |
response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
|
259 |
output_prompt.append(response_text)
|
@@ -291,7 +291,7 @@ def expand_prompt(prompt):
|
|
291 |
outputs = model.generate(
|
292 |
input_ids=input_ids,
|
293 |
attention_mask=attention_mask,
|
294 |
-
max_new_tokens=
|
295 |
temperature=0.2,
|
296 |
top_p=0.9,
|
297 |
do_sample=True,
|
@@ -304,7 +304,7 @@ def expand_prompt(prompt):
|
|
304 |
outputs_2 = model.generate(
|
305 |
input_ids=input_ids_2,
|
306 |
attention_mask=attention_mask_2,
|
307 |
-
max_new_tokens=
|
308 |
temperature=0.2,
|
309 |
top_p=0.9,
|
310 |
do_sample=True,
|
|
|
246 |
output_prompt=[]
|
247 |
# Initial caption generation without a prompt:
|
248 |
inputsa = processor5(images=img, return_tensors="pt").to('cuda')
|
249 |
+
generated_ids = model5.generate(**inputsa, min_length=42, max_length=64)
|
250 |
generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
251 |
output_prompt.append(generated_text)
|
252 |
print(generated_text)
|
253 |
# Loop through prompts array:
|
254 |
for prompt in prompts_array:
|
255 |
inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
|
256 |
+
generated_ids = model5.generate(**inputs, min_length=16, max_length=64) # Adjust max_length if needed
|
257 |
generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
258 |
response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
|
259 |
output_prompt.append(response_text)
|
|
|
291 |
outputs = model.generate(
|
292 |
input_ids=input_ids,
|
293 |
attention_mask=attention_mask,
|
294 |
+
max_new_tokens=512,
|
295 |
temperature=0.2,
|
296 |
top_p=0.9,
|
297 |
do_sample=True,
|
|
|
304 |
outputs_2 = model.generate(
|
305 |
input_ids=input_ids_2,
|
306 |
attention_mask=attention_mask_2,
|
307 |
+
max_new_tokens=512,
|
308 |
temperature=0.2,
|
309 |
top_p=0.9,
|
310 |
do_sample=True,
|