Spaces:
Runtime error
Runtime error
File size: 1,468 Bytes
f457390 7f0b913 f457390 7f0b913 f457390 9525c8a f457390 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import gradio as gr
import os
import skimage
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
from collections import OrderedDict
import torch
from imagebind import data
from imagebind.models import imagebind_model
from imagebind.models.imagebind_model import ModalityType
import torch.nn as nn
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model = imagebind_model.imagebind_huge(pretrained=True)
model.eval()
model.to(device)
def image_text_zeroshot(image, text_list):
image_paths = [image]
labels = [label.strip(" ") for label in text_list.strip(" ").split("|")]
inputs = {
ModalityType.TEXT: data.load_and_transform_text(labels, device),
ModalityType.VISION: data.load_and_transform_vision_data(image_paths, device),
}
with torch.no_grad():
embeddings = model(inputs)
scores = (
torch.softmax(
embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T, dim=-1
)
.squeeze(0)
.tolist()
)
score_dict = {label: score for label, score in zip(labels, scores)}
return score_dict
def main():
inputs = [
gr.inputs.Textbox(lines=1, label="texts"),
gr.inputs.Image(type="filepath", label="Output image")
]
iface = gr.Interface(
image_text_zeroshot(image, text_list),
inputs,
"label",
description="""...""",
title="ImageBind",
)
iface.launch() |