Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ git_processor_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-co
|
|
16 |
git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
|
17 |
|
18 |
git_processor_large_textcaps = AutoProcessor.from_pretrained("microsoft/git-large-r-textcaps")
|
19 |
-
git_model_large_textcaps = AutoModelForCausalLM.from_pretrained("microsoft/git-large-r-textcaps")
|
20 |
|
21 |
# blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
22 |
# blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
@@ -44,7 +44,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
44 |
# git_model_base.to(device)
|
45 |
# blip_model_base.to(device)
|
46 |
git_model_large_coco.to(device)
|
47 |
-
git_model_large_textcaps.to(device)
|
48 |
blip_model_large.to(device)
|
49 |
# vitgpt_model.to(device)
|
50 |
coca_model.to(device)
|
@@ -78,7 +78,7 @@ def generate_captions(image):
|
|
78 |
|
79 |
caption_git_large_coco = generate_caption(git_processor_large_coco, git_model_large_coco, image)
|
80 |
|
81 |
-
caption_git_large_textcaps = generate_caption(git_processor_large_textcaps, git_model_large_textcaps, image)
|
82 |
|
83 |
# caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
|
84 |
|
@@ -92,7 +92,7 @@ def generate_captions(image):
|
|
92 |
|
93 |
caption_blip2_8_bit = generate_caption(blip2_processor_8_bit, blip2_model_8_bit, image, use_float_16=True).strip()
|
94 |
|
95 |
-
return caption_git_large_coco,
|
96 |
|
97 |
|
98 |
examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
|
|
|
16 |
git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
|
17 |
|
18 |
git_processor_large_textcaps = AutoProcessor.from_pretrained("microsoft/git-large-r-textcaps")
|
19 |
+
# git_model_large_textcaps = AutoModelForCausalLM.from_pretrained("microsoft/git-large-r-textcaps")
|
20 |
|
21 |
# blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
22 |
# blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
|
|
44 |
# git_model_base.to(device)
|
45 |
# blip_model_base.to(device)
|
46 |
git_model_large_coco.to(device)
|
47 |
+
# git_model_large_textcaps.to(device)
|
48 |
blip_model_large.to(device)
|
49 |
# vitgpt_model.to(device)
|
50 |
coca_model.to(device)
|
|
|
78 |
|
79 |
caption_git_large_coco = generate_caption(git_processor_large_coco, git_model_large_coco, image)
|
80 |
|
81 |
+
# caption_git_large_textcaps = generate_caption(git_processor_large_textcaps, git_model_large_textcaps, image)
|
82 |
|
83 |
# caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
|
84 |
|
|
|
92 |
|
93 |
caption_blip2_8_bit = generate_caption(blip2_processor_8_bit, blip2_model_8_bit, image, use_float_16=True).strip()
|
94 |
|
95 |
+
return caption_git_large_coco, caption_blip_large, caption_coca, caption_blip2_8_bit
|
96 |
|
97 |
|
98 |
examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
|