Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -167,8 +167,8 @@ pipe = load_and_prepare_model()
|
|
167 |
# text models
|
168 |
#checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
169 |
checkpoint = "ford442/Phi-3.5-mini-instruct-bf16"
|
170 |
-
|
171 |
-
|
172 |
#captioner_3 = pipeline(model="ford442/blip-image-to-text-large-bf16",device='cuda', task="image-to-text")
|
173 |
model5 = Blip2ForConditionalGeneration.from_pretrained("ford442/blip2-image-to-text-bf16").to('cuda')
|
174 |
processor5 = Blip2Processor.from_pretrained("ford442/blip2-image-to-text-bf16", device_map='cuda')
|
|
|
167 |
# text models
|
168 |
#checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
169 |
checkpoint = "ford442/Phi-3.5-mini-instruct-bf16"
|
170 |
+
captioner_2 = pipeline(model="ydshieh/vit-gpt2-coco-en",device='cuda:0', task="image-to-text")
|
171 |
+
#captioner = pipeline(model="Salesforce/blip-image-captioning-base",device='cuda', task="image-to-text")
|
172 |
#captioner_3 = pipeline(model="ford442/blip-image-to-text-large-bf16",device='cuda', task="image-to-text")
|
173 |
model5 = Blip2ForConditionalGeneration.from_pretrained("ford442/blip2-image-to-text-bf16").to('cuda')
|
174 |
processor5 = Blip2Processor.from_pretrained("ford442/blip2-image-to-text-bf16", device_map='cuda')
|