Update app.py
Browse files
app.py
CHANGED
@@ -16,23 +16,13 @@ model = Qwen2VLForConditionalGeneration.from_pretrained(
|
|
16 |
)
|
17 |
processor = AutoProcessor.from_pretrained("./Qwen2-VL-7B-Instruct")
|
18 |
|
19 |
-
def
|
20 |
if image_array is None:
|
21 |
raise ValueError("No image provided. Please upload an image before submitting.")
|
22 |
# Convert numpy array to PIL Image
|
23 |
-
|
24 |
|
25 |
-
|
26 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
27 |
-
filename = f"image_{timestamp}.png"
|
28 |
-
|
29 |
-
# Save the image
|
30 |
-
img.save(filename)
|
31 |
-
|
32 |
-
# Get the full path of the saved image
|
33 |
-
full_path = os.path.abspath(filename)
|
34 |
-
|
35 |
-
return full_path
|
36 |
|
37 |
def generate_embeddings(text):
|
38 |
model = SentenceTransformer('./all-MiniLM-L6-v2')
|
@@ -40,9 +30,7 @@ def generate_embeddings(text):
|
|
40 |
return embeddings
|
41 |
|
42 |
def describe_image(image_array):
|
43 |
-
|
44 |
-
os.chmod(image_path, stat.S_IROTH)
|
45 |
-
image = Image.open(image_path)
|
46 |
|
47 |
messages = [
|
48 |
{
|
|
|
16 |
)
|
17 |
processor = AutoProcessor.from_pretrained("./Qwen2-VL-7B-Instruct")
|
18 |
|
19 |
+
def array_to_image(image_array):
|
20 |
if image_array is None:
|
21 |
raise ValueError("No image provided. Please upload an image before submitting.")
|
22 |
# Convert numpy array to PIL Image
|
23 |
+
image = Image.fromarray(np.uint8(image_array)).convert("RGB")
|
24 |
|
25 |
+
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
def generate_embeddings(text):
|
28 |
model = SentenceTransformer('./all-MiniLM-L6-v2')
|
|
|
30 |
return embeddings
|
31 |
|
32 |
def describe_image(image_array):
|
33 |
+
image = array_to_image(image_array)
|
|
|
|
|
34 |
|
35 |
messages = [
|
36 |
{
|