Spaces:
Sleeping
Sleeping
File size: 1,685 Bytes
b1f02cd 7e03287 6f00f31 b1f02cd 7e03287 b1f02cd 7f4983c b1f02cd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import gradio as gr
from transformers import CLIPProcessor, CLIPModel
clip = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
def inference(input_img, captions):
captions_list = captions.split(",")
inputs = processor(text=captions_list, images=input_img, return_tensors="pt", padding=True)
outputs = clip(**inputs)
# this is the image-text similarity score
logits_per_image = outputs.logits_per_image
probs = logits_per_image.softmax(dim=1).tolist()[0]
confidences = {captions_list[i][:30]: probs[i] for i in range(len(probs))}
return confidences
title = "CLIP Inference: Application using a pretrained CLIP model"
description = "An application to predict the appropriate caption for an image"
examples = [
["examples/woman_standing.jpg","woman standing inside a house, a photo of dog, running water, cupboard, home interiors"],
["examples/city.jpg","long shot of a city, sunsetting on a urban place, river with animals"],
["examples/dinning_tables.jpg","a bunch of dinning tables, cricket ground with players, movie theater, plants with music"],
["examples/giraffe.jpg","tall giraffe standing and turning back, luxurious car on a road, a bunch of people standing"],
["examples/dogs.jpg","a couple of dogs standing, woman standing inside a house, a photo of MJ"]
]
demo = gr.Interface(
inference,
inputs = [
gr.Image(shape=(416, 416), label="Input Image"),
gr.Textbox(placeholder="List of captions")],
outputs = [gr.Label()],
title = title,
description = description,
examples = examples,
)
demo.launch()
|