Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import CLIPModel, AutoTokenizer, RawImage
|
|
|
|
|
|
|
| 3 |
|
| 4 |
# Load the CLIP model and tokenizer
|
| 5 |
model = CLIPModel.from_pretrained("Xenova/mobileclip_blt")
|
|
@@ -18,11 +20,9 @@ def compute_probability(image):
|
|
| 18 |
text_embeds = model(text_inputs)
|
| 19 |
normalized_text_embeds = text_embeds.normalize().tolist()
|
| 20 |
|
| 21 |
-
probabilities =
|
| 22 |
-
x => softmax(normalized_text_embeds.map(y => 100 * dot(x, y)))
|
| 23 |
-
)
|
| 24 |
|
| 25 |
-
return {"probability": probabilities[0]
|
| 26 |
|
| 27 |
# Create the Gradio interface
|
| 28 |
iface = gr.Interface(
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import CLIPModel, AutoTokenizer, RawImage
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
|
| 6 |
# Load the CLIP model and tokenizer
|
| 7 |
model = CLIPModel.from_pretrained("Xenova/mobileclip_blt")
|
|
|
|
| 20 |
text_embeds = model(text_inputs)
|
| 21 |
normalized_text_embeds = text_embeds.normalize().tolist()
|
| 22 |
|
| 23 |
+
probabilities = [F.softmax(torch.tensor([100 * torch.dot(torch.tensor(x), torch.tensor(y)) for y in normalized_text_embeds])).tolist()[0] for x in normalized_image_embeds]
|
|
|
|
|
|
|
| 24 |
|
| 25 |
+
return {"probability": probabilities[0]}
|
| 26 |
|
| 27 |
# Create the Gradio interface
|
| 28 |
iface = gr.Interface(
|