Update README.md
Browse files
README.md
CHANGED
|
@@ -26,9 +26,9 @@ from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoToken
|
|
| 26 |
import torch
|
| 27 |
from PIL import Image
|
| 28 |
|
| 29 |
-
model = VisionEncoderDecoderModel.from_pretrained("
|
| 30 |
-
feature_extractor = ViTImageProcessor.from_pretrained("
|
| 31 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
| 32 |
|
| 33 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 34 |
model.to(device)
|
|
@@ -67,7 +67,7 @@ predict_step(['doctor.e16ba4e4.jpg']) # ['a woman in a hospital bed with a woman
|
|
| 67 |
|
| 68 |
from transformers import pipeline
|
| 69 |
|
| 70 |
-
image_to_text = pipeline("image-to-text", model="
|
| 71 |
|
| 72 |
image_to_text("https://ankur3107.github.io/assets/images/image-captioning-example.png")
|
| 73 |
|
|
|
|
| 26 |
import torch
|
| 27 |
from PIL import Image
|
| 28 |
|
| 29 |
+
model = VisionEncoderDecoderModel.from_pretrained("Ayansk11/Image_Caption_using_ViT_GPT2")
|
| 30 |
+
feature_extractor = ViTImageProcessor.from_pretrained("Ayansk11/Image_Caption_using_ViT_GPT2")
|
| 31 |
+
tokenizer = AutoTokenizer.from_pretrained("Ayansk11/Image_Caption_using_ViT_GPT2")
|
| 32 |
|
| 33 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 34 |
model.to(device)
|
|
|
|
| 67 |
|
| 68 |
from transformers import pipeline
|
| 69 |
|
| 70 |
+
image_to_text = pipeline("image-to-text", model="Ayansk11/Image_Caption_using_ViT_GPT2")
|
| 71 |
|
| 72 |
image_to_text("https://ankur3107.github.io/assets/images/image-captioning-example.png")
|
| 73 |
|