Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -9,14 +9,17 @@ torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/0000000397
|
|
9 |
torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
|
10 |
torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
|
11 |
|
12 |
-
git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco")
|
13 |
-
git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
|
14 |
|
15 |
-
|
16 |
-
|
17 |
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
20 |
|
21 |
blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
22 |
blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
@@ -32,9 +35,10 @@ coca_model, _, coca_transform = open_clip.create_model_and_transforms(
|
|
32 |
|
33 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
34 |
|
35 |
-
git_model_base.to(device)
|
36 |
-
blip_model_base.to(device)
|
37 |
-
|
|
|
38 |
blip_model_large.to(device)
|
39 |
vitgpt_model.to(device)
|
40 |
coca_model.to(device)
|
@@ -60,11 +64,13 @@ def generate_caption_coca(model, transform, image):
|
|
60 |
|
61 |
|
62 |
def generate_captions(image):
|
63 |
-
caption_git_base = generate_caption(git_processor_base, git_model_base, image)
|
|
|
|
|
64 |
|
65 |
-
|
66 |
|
67 |
-
caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
|
68 |
|
69 |
caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
|
70 |
|
@@ -72,11 +78,11 @@ def generate_captions(image):
|
|
72 |
|
73 |
caption_coca = generate_caption_coca(coca_model, coca_transform, image)
|
74 |
|
75 |
-
return
|
76 |
|
77 |
|
78 |
examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
|
79 |
-
outputs = [gr.outputs.Textbox(label="Caption generated by GIT-base"), gr.outputs.Textbox(label="Caption generated by GIT-large
|
80 |
|
81 |
title = "Interactive demo: comparing image captioning models"
|
82 |
description = "Gradio Demo to compare GIT, BLIP, ViT+GPT2 and CoCa, 4 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
|
|
|
9 |
torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
|
10 |
torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
|
11 |
|
12 |
+
# git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco")
|
13 |
+
# git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
|
14 |
|
15 |
+
git_processor_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco")
|
16 |
+
git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
|
17 |
|
18 |
+
git_processor_large_textcaps = AutoProcessor.from_pretrained("microsoft/git-large-r-textcaps")
|
19 |
+
git_model_large_textcaps = AutoModelForCausalLM.from_pretrained("microsoft/git-large-r-textcaps")
|
20 |
+
|
21 |
+
# blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
22 |
+
# blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
23 |
|
24 |
blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
25 |
blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
|
|
35 |
|
36 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
37 |
|
38 |
+
# git_model_base.to(device)
|
39 |
+
# blip_model_base.to(device)
|
40 |
+
git_model_large_coco.to(device)
|
41 |
+
git_model_large_textcaps.to(device)
|
42 |
blip_model_large.to(device)
|
43 |
vitgpt_model.to(device)
|
44 |
coca_model.to(device)
|
|
|
64 |
|
65 |
|
66 |
def generate_captions(image):
|
67 |
+
# caption_git_base = generate_caption(git_processor_base, git_model_base, image)
|
68 |
+
|
69 |
+
caption_git_large_coco = generate_caption(git_processor_large_coco, git_model_large_coco, image)
|
70 |
|
71 |
+
caption_git_large_textcaps = generate_caption(git_processor_large_textcaps, git_model_large_textcaps, image)
|
72 |
|
73 |
+
# caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
|
74 |
|
75 |
caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
|
76 |
|
|
|
78 |
|
79 |
caption_coca = generate_caption_coca(coca_model, coca_transform, image)
|
80 |
|
81 |
+
return caption_git_large_coco, caption_git_large_textcaps, caption_blip_large, caption_vitgpt, caption_coca
|
82 |
|
83 |
|
84 |
examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
|
85 |
+
outputs = [gr.outputs.Textbox(label="Caption generated by GIT-base fine-tuned on COCO"), gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on TextCaps"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by ViT+GPT-2"), gr.outputs.Textbox(label="Caption generated by CoCa")]
|
86 |
|
87 |
title = "Interactive demo: comparing image captioning models"
|
88 |
description = "Gradio Demo to compare GIT, BLIP, ViT+GPT2 and CoCa, 4 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
|