abc-valera commited on
Commit
0156fe0
·
1 Parent(s): be4b8bf

Added application file

Browse files
Files changed (4) hide show
  1. Dockerfile +58 -0
  2. main.py +9 -0
  3. requirements.txt +11 -0
  4. superpoint.py +142 -0
Dockerfile ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FROM python:3.9
2
+
3
+ # ENV DEBIAN_FRONTEND=noninteractive
4
+
5
+ # # Remove any third-party apt sources to avoid issues with expiring keys.
6
+ # # Install some basic utilities
7
+ # RUN rm -f /etc/apt/sources.list.d/*.list && \
8
+ # apt-get update && apt-get install -y --no-install-recommends \
9
+ # curl \
10
+ # ca-certificates \
11
+ # sudo \
12
+ # git \
13
+ # wget \
14
+ # procps \
15
+ # git-lfs \
16
+ # zip \
17
+ # unzip \
18
+ # htop \
19
+ # vim \
20
+ # nano \
21
+ # bzip2 \
22
+ # libx11-6 \
23
+ # build-essential \
24
+ # libsndfile-dev \
25
+ # software-properties-common && \
26
+ # rm -rf /var/lib/apt/lists/*
27
+
28
+ # RUN git clone https://github.com/abc-valera/dotfiles.git
29
+ # # && cd dotfiles && sudo ./install.sh
30
+
31
+ # RUN useradd -m -u 1000 user
32
+ # USER user
33
+ # ENV PATH="/home/user/.local/bin:$PATH"
34
+
35
+ # WORKDIR /app
36
+
37
+ # COPY --chown=user ./requirements.txt requirements.txt
38
+ # RUN pip install --no-cache-dir --upgrade -r requirements.txt
39
+
40
+ # COPY --chown=user . /app
41
+ # CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
42
+
43
+
44
+ # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
45
+ # you will also find guides on how best to write your Dockerfile
46
+
47
+ FROM python:3.9
48
+
49
+ # The two following lines are requirements for the Dev Mode to be functional
50
+ # Learn more about the Dev Mode at https://huggingface.co/dev-mode-explorers
51
+ RUN useradd -m -u 1000 user
52
+ WORKDIR /app
53
+
54
+ COPY --chown=user ./requirements.txt requirements.txt
55
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
56
+
57
+ COPY --chown=user . /app
58
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
main.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+
3
+
4
+ app = FastAPI()
5
+
6
+
7
+ @app.get("/")
8
+ def read_root():
9
+ return {"Hello": "World!"}
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # requests
2
+ # pillow
3
+ # transformers[torch]
4
+ # opencv-python
5
+
6
+ fastapi==0.74.*
7
+ requests==2.27.*
8
+ sentencepiece==0.1.*
9
+ torch==1.11.*
10
+ transformers==4.*
11
+ uvicorn[standard]==0.17.*
superpoint.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import (
2
+ SuperPointImageProcessor,
3
+ SuperPointForKeypointDetection as SuperPointKeypointDetection,
4
+ )
5
+ import torch
6
+ import cv2
7
+ import numpy as np
8
+ import os
9
+
10
+
11
+ ### ЗАВАНТАЖЕННЯ І ПОПЕРЕДНЯ ОБРОБКА ДАНИХ ###
12
+
13
+ # Створюємо директорію для вихідних файлів
14
+ os.makedirs("output", exist_ok=True)
15
+
16
+ images = ["data/image0.jpg", "data/image1.jpg", "data/image2.jpg", "data/image3.jpg"]
17
+ original_images = []
18
+ resized_images = []
19
+
20
+ for image_path in images:
21
+ image = cv2.imread(image_path)
22
+ original_images.append(image)
23
+ resized_image = cv2.resize(image, (640, 480))
24
+ resized_images.append(resized_image)
25
+
26
+
27
+ ### ЗАСТОСУВАННЯ ДЕТЕКТОРА ОЗНАК SUPERPOINT ###
28
+
29
+
30
+ processor = SuperPointImageProcessor.from_pretrained("magic-leap-community/superpoint")
31
+ model = SuperPointKeypointDetection.from_pretrained("magic-leap-community/superpoint")
32
+
33
+ inputs = processor(resized_images, return_tensors="pt")
34
+ outputs = model(**inputs)
35
+
36
+
37
+ ### ВІЗУАЛІЗАЦІЯ РЕЗУЛЬТАТІВ ###
38
+
39
+
40
+ def draw_keypoints(image, keypoints, color=(0, 255, 0), radius=2):
41
+ for kp in keypoints:
42
+ x, y = int(kp[0]), int(kp[1])
43
+ cv2.circle(image, (x, y), radius, color, -1)
44
+ return image
45
+
46
+
47
+ def create_blank_image(shape):
48
+ return np.zeros((shape[0], shape[1], 3), dtype=np.uint8)
49
+
50
+
51
+ all_keypoints = []
52
+
53
+ for i, (original_image, resized_image) in enumerate(
54
+ zip(original_images, resized_images)
55
+ ):
56
+ image_mask = outputs.mask[i]
57
+ image_indices = torch.nonzero(image_mask).squeeze()
58
+ image_keypoints = outputs.keypoints[i][image_indices]
59
+
60
+ # Масштабуємо ключові точки назад до оригінального розміру
61
+ scale_x = original_image.shape[1] / resized_image.shape[1]
62
+ scale_y = original_image.shape[0] / resized_image.shape[0]
63
+ scaled_keypoints = image_keypoints.clone()
64
+ scaled_keypoints[:, 0] *= scale_x
65
+ scaled_keypoints[:, 1] *= scale_y
66
+
67
+ all_keypoints.append(scaled_keypoints)
68
+
69
+ # Створюємо зображення з ключовими точками
70
+ keypoints_image = draw_keypoints(original_image.copy(), scaled_keypoints)
71
+ cv2.imwrite(f"output/image{i}.png", keypoints_image)
72
+
73
+ # Створюємо зображення тільки з ключовими точками
74
+ blank_image = create_blank_image(original_image.shape[:2])
75
+ just_keypoints_image = draw_keypoints(blank_image, scaled_keypoints)
76
+ cv2.imwrite(f"output/image{i}_just_keypoints.png", just_keypoints_image)
77
+
78
+
79
+ ### СПІВСТАВЛЕННЯ ОЗНАК ###
80
+
81
+
82
+ def match_keypoints(img1, kp1, img2, kp2, method="flann"):
83
+ # Convert keypoints to cv2.KeyPoint objects
84
+ kp1 = [cv2.KeyPoint(x=float(kp[0]), y=float(kp[1]), size=1) for kp in kp1]
85
+ kp2 = [cv2.KeyPoint(x=float(kp[0]), y=float(kp[1]), size=1) for kp in kp2]
86
+
87
+ # Compute descriptors
88
+ sift = cv2.SIFT_create()
89
+ _, des1 = sift.compute(img1, kp1)
90
+ _, des2 = sift.compute(img2, kp2)
91
+
92
+ if method == "flann":
93
+ FLANN_INDEX_KDTREE = 1
94
+ index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
95
+ search_params = dict(checks=50)
96
+ flann = cv2.FlannBasedMatcher(index_params, search_params)
97
+ matches = flann.knnMatch(des1, des2, k=2)
98
+ else: # BF Matcher
99
+ bf = cv2.BFMatcher()
100
+ matches = bf.knnMatch(des1, des2, k=2)
101
+
102
+ # Apply ratio test
103
+ good_matches = []
104
+ for m, n in matches:
105
+ if m.distance < 0.7 * n.distance:
106
+ good_matches.append(m)
107
+
108
+ # Draw matches
109
+ img_matches = cv2.drawMatches(
110
+ img1,
111
+ kp1,
112
+ img2,
113
+ kp2,
114
+ good_matches,
115
+ None,
116
+ flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS,
117
+ )
118
+ return img_matches
119
+
120
+
121
+ # Застосовуємо алгоритми співставлення ознак, якщо є більше одного зображення
122
+ if len(images) > 1:
123
+ for i in range(1, len(images)):
124
+ # FLANN matching
125
+ flann_matches = match_keypoints(
126
+ original_images[0],
127
+ all_keypoints[0],
128
+ original_images[i],
129
+ all_keypoints[i],
130
+ method="flann",
131
+ )
132
+ cv2.imwrite(f"output/image0_image{i}_flann.png", flann_matches)
133
+
134
+ # BF matching
135
+ bf_matches = match_keypoints(
136
+ original_images[0],
137
+ all_keypoints[0],
138
+ original_images[i],
139
+ all_keypoints[i],
140
+ method="bf",
141
+ )
142
+ cv2.imwrite(f"output/image0_image{i}_bf.png", bf_matches)