nornorr commited on
Commit
edc1f86
·
1 Parent(s): e7e09b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -0
app.py CHANGED
@@ -12,6 +12,30 @@ def main():
12
  if clicked:
13
  results = classifier([image])
14
  st.json(results)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  if __name__ == "__main__":
17
  main()
 
12
  if clicked:
13
  results = classifier([image])
14
  st.json(results)
15
+ from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer
16
+
17
+ model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
18
+ feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
19
+ tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
20
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
+ model.to(device)
22
+ max_length = 16
23
+ num_beams = 4
24
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
25
+ def predict_step(image_paths):
26
+ images = []
27
+ for image_path in image_paths:
28
+ i_image = Image.open(image_path)
29
+ if i_image.mode != "RGB":
30
+ i_image = i_image.convert(mode="RGB")
31
+ images.append(i_image)
32
+ pixel_values = feature_extractor(images=images, return_tensors="pt").pixel_values
33
+ pixel_values = pixel_values.to(device)
34
+ output_ids = model.generate(pixel_values, **gen_kwargs)
35
+ preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
36
+ preds = [pred.strip() for pred in preds]
37
+ return preds
38
+ predict_step(['doctor.e16ba4e4.jpg']
39
 
40
  if __name__ == "__main__":
41
  main()