gschurck commited on
Commit
a50a7f8
·
verified ·
1 Parent(s): 5491ced

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -0
app.py CHANGED
@@ -1,6 +1,42 @@
1
  import gradio as gr
 
 
2
 
3
  def greet(name):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  return "Hello " + name + "!!"
5
 
6
  demo = gr.Interface(fn=greet, inputs="text", outputs="text")
 
1
  import gradio as gr
2
+ from sentence_transformers import SentenceTransformer
3
+ import requests
4
 
5
  def greet(name):
6
+ # Load the model
7
+ model = SentenceTransformer("intfloat/mmE5-mllama-11b-instruct", trust_remote_code=True)
8
+
9
+ # Download an example image of a cat and a dog
10
+ dog_cat_image_bytes = requests.get('https://github.com/haon-chen/mmE5/blob/main/figures/example.jpg?raw=true', stream=True).raw.read()
11
+ with open("cat_dog_example.jpg", "wb") as f:
12
+ f.write(dog_cat_image_bytes)
13
+
14
+ # Image + Text -> Text
15
+ image_embeddings = model.encode([{
16
+ "image": "cat_dog_example.jpg",
17
+ "text": "Represent the given image with the following question: What is in the image",
18
+ }])
19
+ text_embeddings = model.encode([
20
+ {"text": "A cat and a dog"},
21
+ {"text": "A cat and a tiger"},
22
+ ])
23
+
24
+ similarity = model.similarity(image_embeddings, text_embeddings)
25
+ print(similarity)
26
+ # tensor([[0.3967, 0.3090]])
27
+ # ✅ The first text is most similar to the image
28
+
29
+ # Text -> Image
30
+ image_embeddings = model.encode([
31
+ {"image": dog_cat_image_bytes, "text": "Represent the given image."},
32
+ ])
33
+ text_embeddings = model.encode([
34
+ {"text": "Find me an everyday image that matches the given caption: A cat and a dog."},
35
+ {"text": "Find me an everyday image that matches the given caption: A cat and a tiger."},
36
+ ])
37
+
38
+ similarity = model.similarity(image_embeddings, text_embeddings)
39
+ print(similarity)
40
  return "Hello " + name + "!!"
41
 
42
  demo = gr.Interface(fn=greet, inputs="text", outputs="text")