rjiang12 commited on
Commit
a2b1833
·
1 Parent(s): fe6ca74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -38,8 +38,9 @@ def generate_answer_git(processor, model, image, question):
38
  input_ids = [processor.tokenizer.cls_token_id] + input_ids
39
  input_ids = torch.tensor(input_ids).unsqueeze(0)
40
 
41
- generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50, output_scores=True, return_dict_in_generate=True)
42
  print(generated_ids)
 
43
  generated_answer = processor.batch_decode(generated_ids, skip_special_tokens=True)
44
 
45
  return generated_answer
@@ -49,8 +50,9 @@ def generate_answer_blip(processor, model, image, question):
49
  # prepare image + question
50
  inputs = processor(images=image, text=question, return_tensors="pt")
51
 
52
- generated_ids = model.generate(**inputs, max_length=50, output_scores=True, return_dict_in_generate=True)
53
  print(generated_ids)
 
54
  generated_answer = processor.batch_decode(generated_ids, skip_special_tokens=True)
55
 
56
  return generated_answer
 
38
  input_ids = [processor.tokenizer.cls_token_id] + input_ids
39
  input_ids = torch.tensor(input_ids).unsqueeze(0)
40
 
41
+ generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50, output_scores=True)
42
  print(generated_ids)
43
+ print(generated_ids.scores)
44
  generated_answer = processor.batch_decode(generated_ids, skip_special_tokens=True)
45
 
46
  return generated_answer
 
50
  # prepare image + question
51
  inputs = processor(images=image, text=question, return_tensors="pt")
52
 
53
+ generated_ids = model.generate(**inputs, max_length=50, output_scores=True)
54
  print(generated_ids)
55
+ print(generated_ids.scores)
56
  generated_answer = processor.batch_decode(generated_ids, skip_special_tokens=True)
57
 
58
  return generated_answer