rjiang12 commited on
Commit
2997525
·
1 Parent(s): 832c797

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -11
app.py CHANGED
@@ -54,17 +54,26 @@ def generate_answer_git(processor, model, image, question):
54
  # print(generated_ids)
55
  # generated_answer = processor.batch_decode(generated_ids.sequences, skip_special_tokens=True)
56
  # print(generated_answer)
57
-
58
- encoding = processor(images=image, text=question, return_tensors="pt")
59
-
60
- with torch.no_grad():
61
- outputs = model(**encoding)
62
- print(outputs.logits)
63
- predicted_class_idx = outputs.logits[0].argmax(-1).item
64
- # return model.config.id2label[predicted_class_idx]
65
- print(predicted_class_idx)
66
- print(model.config.id2label)
67
- print(model.config.id2label[predicted_class_idx])
 
 
 
 
 
 
 
 
 
68
 
69
 
70
  return 'haha'
 
54
  # print(generated_ids)
55
  # generated_answer = processor.batch_decode(generated_ids.sequences, skip_special_tokens=True)
56
  # print(generated_answer)
57
+ inputs = processor(images=image, text=question, add_special_tokens=False, return_tensors="pt")
58
+ inputs["max_length"] = 20
59
+ inputs["num_beams"] = 1
60
+ inputs["do_sample"] = True
61
+ inputs["top_k"] = 50
62
+ inputs["top_p"] = 0.95
63
+
64
+ out = model.generate(**inputs)
65
+ print(processor.batch_decode(out, skip_special_tokens=True)[0])
66
+
67
+ # encoding = processor(images=image, text=question, return_tensors="pt")
68
+
69
+ # with torch.no_grad():
70
+ # outputs = model(**encoding)
71
+ # print(outputs.logits)
72
+ # predicted_class_idx = outputs.logits[0].argmax(-1).item
73
+ # # return model.config.id2label[predicted_class_idx]
74
+ # print(predicted_class_idx)
75
+ # print(model.config.id2label)
76
+ # print(model.config.id2label[predicted_class_idx])
77
 
78
 
79
  return 'haha'