Geraldine commited on
Commit
2ba0248
·
verified ·
1 Parent(s): 4e2506f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -0
app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from sentence_transformers import SentenceTransformer
3
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
4
+ from PIL import Image
5
+ import torch
6
+ from torchvision import io
7
+ from typing import Dict
8
+ from datetime import datetime
9
+ import numpy as np
10
+ import base64
11
+ import os, io
12
+
13
+ # Load the model in half-precision on the available device(s)
14
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
15
+ "./Qwen2-VL-7B-Instruct", torch_dtype="auto", device_map="auto"
16
+ )
17
+ processor = AutoProcessor.from_pretrained("./Qwen2-VL-7B-Instruct")
18
+
19
+ def array_to_image_path(image_array):
20
+ if image_array is None:
21
+ raise ValueError("No image provided. Please upload an image before submitting.")
22
+ # Convert numpy array to PIL Image
23
+ img = Image.fromarray(np.uint8(image_array))
24
+
25
+ # Generate a unique filename using timestamp
26
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
27
+ filename = f"image_{timestamp}.png"
28
+
29
+ # Save the image
30
+ img.save(filename)
31
+
32
+ # Get the full path of the saved image
33
+ full_path = os.path.abspath(filename)
34
+
35
+ return full_path
36
+
37
+ def generate_embeddings(text):
38
+ model = SentenceTransformer('./all-MiniLM-L6-v2')
39
+ embeddings = model.encode(sentences)
40
+ return embeddings
41
+
42
+ def describe_image(image):
43
+ # Convert the image to the format expected by the model
44
+ image_path = array_to_image_path(image)
45
+ with open(image_path, "rb") as f:
46
+ image = base64.b64encode(f.read()).decode("utf-8")
47
+
48
+ messages = [
49
+ {
50
+ "role": "user",
51
+ "content": [
52
+ {"type": "image", "image": f"data:image/png;base64,{image}"},
53
+ {"type": "text", "text": "Make a very detailed description of the image."},
54
+ ],
55
+ }
56
+ ]
57
+
58
+ text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
59
+ # Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe this image.<|im_end|>\n<|im_start|>assistant\n'
60
+
61
+ inputs = processor(
62
+ text=[text_prompt], images=[image], padding=True, return_tensors="pt"
63
+ )
64
+ inputs = inputs.to("cuda")
65
+
66
+ # Inference: Generation of the output
67
+ output_ids = model.generate(**inputs, max_new_tokens=128)
68
+ generated_ids = [
69
+ output_ids[len(input_ids) :]
70
+ for input_ids, output_ids in zip(inputs.input_ids, output_ids)
71
+ ]
72
+ output_text = processor.batch_decode(
73
+ generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
74
+ )
75
+ # remove image
76
+ os.remove(image_path)
77
+ # Extract the detailed description from the response
78
+ return output_text, generate_embeddings(output_text)
79
+
80
+ # Create a Gradio interface
81
+ iface = gr.Interface(
82
+ fn=describe_image,
83
+ inputs=gr.Image(),
84
+ outputs=[gr.Textbox(label="Description"), gr.JSON(label="Embeddings")],
85
+ title="Image Description with Qwen Model",
86
+ description="Upload an image to get a detailed description using the Qwen2-VL-7B-Instruct model."
87
+ )
88
+
89
+ # Launch the app
90
+ #iface.launch(share=True)
91
+ iface.launch()