|
import gradio as gr |
|
from sentence_transformers import SentenceTransformer |
|
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor |
|
from PIL import Image |
|
import torch |
|
from torchvision import io |
|
from typing import Dict |
|
from datetime import datetime |
|
import numpy as np |
|
import base64 |
|
import os, io |
|
|
|
|
|
model = Qwen2VLForConditionalGeneration.from_pretrained( |
|
"./Qwen2-VL-7B-Instruct", torch_dtype="auto", device_map="auto" |
|
) |
|
processor = AutoProcessor.from_pretrained("./Qwen2-VL-7B-Instruct") |
|
|
|
def array_to_image_path(image_array): |
|
if image_array is None: |
|
raise ValueError("No image provided. Please upload an image before submitting.") |
|
|
|
img = Image.fromarray(np.uint8(image_array)) |
|
|
|
|
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") |
|
filename = f"image_{timestamp}.png" |
|
|
|
|
|
img.save(filename) |
|
|
|
|
|
full_path = os.path.abspath(filename) |
|
|
|
return full_path |
|
|
|
def generate_embeddings(text): |
|
model = SentenceTransformer('./all-MiniLM-L6-v2') |
|
embeddings = model.encode(sentences) |
|
return embeddings |
|
|
|
def describe_image(image_array): |
|
image_path = array_to_image_path(image) |
|
image = Image.open(image_path) |
|
|
|
messages = [ |
|
{ |
|
"role": "user", |
|
"content": [ |
|
{ |
|
"type": "image", |
|
}, |
|
{"type": "text", "text": "Make a very detailed description of the image."}, |
|
], |
|
} |
|
] |
|
|
|
text_prompt = processor.apply_chat_template(messages, add_generation_prompt=True) |
|
|
|
|
|
inputs = processor( |
|
text=[text_prompt], images=[image], padding=True, return_tensors="pt" |
|
) |
|
inputs = inputs.to("cpu") |
|
|
|
|
|
output_ids = model.generate(**inputs, max_new_tokens=128) |
|
generated_ids = [ |
|
output_ids[len(input_ids) :] |
|
for input_ids, output_ids in zip(inputs.input_ids, output_ids) |
|
] |
|
output_text = processor.batch_decode( |
|
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True |
|
) |
|
|
|
os.remove(image_path) |
|
|
|
return output_text, generate_embeddings(output_text) |
|
|
|
|
|
iface = gr.Interface( |
|
fn=describe_image, |
|
inputs=gr.Image(), |
|
outputs=[gr.Textbox(label="Description"), gr.JSON(label="Embeddings")], |
|
title="Image Description with Qwen Model", |
|
description="Upload an image to get a detailed description using the Qwen2-VL-7B-Instruct model." |
|
) |
|
|
|
|
|
|
|
iface.launch() |