1inkusFace commited on
Commit
494b48a
·
verified ·
1 Parent(s): 952d077

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -4
app.py CHANGED
@@ -253,14 +253,36 @@ def captioning(img):
253
  output_prompt=[]
254
  # Initial caption generation without a prompt:
255
  inputsa = processor5(images=img, return_tensors="pt").to('cuda')
256
- generated_ids = model5.generate(**inputsa, min_length=42, max_length=64)
 
 
 
 
 
 
 
 
 
 
 
257
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
258
  output_prompt.append(generated_text)
259
  print(generated_text)
260
  # Loop through prompts array:
261
  for prompt in prompts_array:
262
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
263
- generated_ids = model5.generate(**inputs, min_length=32, max_length=42) # Adjust max_length if needed
 
 
 
 
 
 
 
 
 
 
 
264
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
265
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
266
  output_prompt.append(response_text)
@@ -364,7 +386,6 @@ def generate_30(
364
  latent_file_5_scale: float = 1.0,
365
  samples=1,
366
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
367
-
368
  ):
369
  global captioner_2
370
  captioner2=captioner_2
@@ -443,7 +464,6 @@ def generate_30(
443
  print(new_prompt)
444
  print("-- FINAL PROMPT --")
445
  print("-- ------------ --")
446
-
447
  #global model
448
  #global txt_tokenizer
449
  #del model
 
253
  output_prompt=[]
254
  # Initial caption generation without a prompt:
255
  inputsa = processor5(images=img, return_tensors="pt").to('cuda')
256
+ generated_ids = model5.generate(
257
+ **inputsa,
258
+ do_sample=False,
259
+ num_beams=5,
260
+ max_length=256,
261
+ min_length=1,
262
+ top_p=0.9,
263
+ repetition_penalty=1.5,
264
+ length_penalty=1.0,
265
+ temperature=1,
266
+ )
267
+
268
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
269
  output_prompt.append(generated_text)
270
  print(generated_text)
271
  # Loop through prompts array:
272
  for prompt in prompts_array:
273
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
274
+ generated_ids = model5.generate(
275
+ **inputs,
276
+ do_sample=False,
277
+ num_beams=5,
278
+ max_length=256,
279
+ min_length=1,
280
+ top_p=0.9,
281
+ repetition_penalty=1.5,
282
+ length_penalty=1.0,
283
+ temperature=1,
284
+ )
285
+ # Adjust max_length if needed
286
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
287
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
288
  output_prompt.append(response_text)
 
386
  latent_file_5_scale: float = 1.0,
387
  samples=1,
388
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
 
389
  ):
390
  global captioner_2
391
  captioner2=captioner_2
 
464
  print(new_prompt)
465
  print("-- FINAL PROMPT --")
466
  print("-- ------------ --")
 
467
  #global model
468
  #global txt_tokenizer
469
  #del model