Spaces:
Runtime error
Runtime error
Commit
·
4a2e688
1
Parent(s):
39d7d0c
Update main.py
Browse files
main.py
CHANGED
|
@@ -7,7 +7,7 @@ import torch.nn.functional as F
|
|
| 7 |
|
| 8 |
# mm libs
|
| 9 |
from mmdet.registry import MODELS
|
| 10 |
-
from mmengine import Config
|
| 11 |
from mmengine.structures import InstanceData
|
| 12 |
|
| 13 |
from ext.class_names.lvis_list import LVIS_CLASSES
|
|
@@ -89,7 +89,7 @@ def get_points_with_draw(image, img_state, evt: gr.SelectData):
|
|
| 89 |
label = 'Add Mask'
|
| 90 |
|
| 91 |
x, y = evt.index[0], evt.index[1]
|
| 92 |
-
|
| 93 |
point_radius, point_color = 10, (97, 217, 54) if label == "Add Mask" else (237, 34, 13)
|
| 94 |
|
| 95 |
img_state.selected_points.append([x, y])
|
|
@@ -116,7 +116,7 @@ def get_bbox_with_draw(image, img_state, evt: gr.SelectData):
|
|
| 116 |
else:
|
| 117 |
raise ValueError(f"Cannot be {len(img_state.selected_bboxes)}")
|
| 118 |
|
| 119 |
-
|
| 120 |
|
| 121 |
draw = ImageDraw.Draw(image)
|
| 122 |
draw.ellipse(
|
|
@@ -233,14 +233,22 @@ def extract_img_feat(img, img_state):
|
|
| 233 |
new_h = int(h * scale)
|
| 234 |
img = img.resize((new_w, new_h), resample=Image.Resampling.BILINEAR)
|
| 235 |
img_numpy = np.array(img)
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 244 |
return img, None, "Please try to click something."
|
| 245 |
|
| 246 |
|
|
|
|
| 7 |
|
| 8 |
# mm libs
|
| 9 |
from mmdet.registry import MODELS
|
| 10 |
+
from mmengine import Config, print_log
|
| 11 |
from mmengine.structures import InstanceData
|
| 12 |
|
| 13 |
from ext.class_names.lvis_list import LVIS_CLASSES
|
|
|
|
| 89 |
label = 'Add Mask'
|
| 90 |
|
| 91 |
x, y = evt.index[0], evt.index[1]
|
| 92 |
+
print_log(f"Point: {x}_{y}", logger='current')
|
| 93 |
point_radius, point_color = 10, (97, 217, 54) if label == "Add Mask" else (237, 34, 13)
|
| 94 |
|
| 95 |
img_state.selected_points.append([x, y])
|
|
|
|
| 116 |
else:
|
| 117 |
raise ValueError(f"Cannot be {len(img_state.selected_bboxes)}")
|
| 118 |
|
| 119 |
+
print_log(f"box_list: {img_state.selected_bboxes}", logger='current')
|
| 120 |
|
| 121 |
draw = ImageDraw.Draw(image)
|
| 122 |
draw.ellipse(
|
|
|
|
| 233 |
new_h = int(h * scale)
|
| 234 |
img = img.resize((new_w, new_h), resample=Image.Resampling.BILINEAR)
|
| 235 |
img_numpy = np.array(img)
|
| 236 |
+
print_log(f"Successfully loaded an image with size {new_w} x {new_h}", logger='current')
|
| 237 |
+
|
| 238 |
+
try:
|
| 239 |
+
img_tensor = torch.tensor(img_numpy, device=device, dtype=torch.float32).permute((2, 0, 1))[None]
|
| 240 |
+
img_tensor = (img_tensor - mean) / std
|
| 241 |
+
img_tensor = F.pad(img_tensor, (0, IMG_SIZE - new_w, 0, IMG_SIZE - new_h), 'constant', 0)
|
| 242 |
+
feat_dict = model.extract_feat(img_tensor)
|
| 243 |
+
img_state.set_img(img_numpy, feat_dict)
|
| 244 |
+
print_log(f"Successfully generated the image feats.", logger='current')
|
| 245 |
+
except RuntimeError as e:
|
| 246 |
+
if "CUDA out of memory" in str(e):
|
| 247 |
+
img_state.clear()
|
| 248 |
+
print_log(f"CUDA OOM! please try again later", logger='current')
|
| 249 |
+
return None, None, "CUDA OOM, please try again later."
|
| 250 |
+
else:
|
| 251 |
+
raise
|
| 252 |
return img, None, "Please try to click something."
|
| 253 |
|
| 254 |
|