MsChabane commited on
Commit
84e97d7
·
verified ·
1 Parent(s): f6e1b75

add model caption

Browse files
Files changed (1) hide show
  1. main.py +6 -4
main.py CHANGED
@@ -32,11 +32,13 @@ app.add_middleware(
32
  allow_methods=["*"],
33
  allow_headers=["*"],
34
  )
 
 
35
  try:
36
- interpreter = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
37
- #interpreter_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
38
- #interpreter_processor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
39
- #interpreter_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
40
  except Exception as exp:
41
  print("[ERROR] Can't load nlpconnect/vit-gpt2-image-captioning")
42
  print(str(exp))
 
32
  allow_methods=["*"],
33
  allow_headers=["*"],
34
  )
35
+
36
+ from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
37
  try:
38
+ interpreter =1 #pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
39
+ interpreter_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
40
+ interpreter_processor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
41
+ interpreter_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
42
  except Exception as exp:
43
  print("[ERROR] Can't load nlpconnect/vit-gpt2-image-captioning")
44
  print(str(exp))