Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ from pathlib import Path
|
|
11 |
|
12 |
transformer = transforms.Compose([
|
13 |
transforms.ToTensor(),
|
14 |
-
transforms.Resize([640,640]),
|
15 |
|
16 |
])
|
17 |
|
@@ -68,10 +68,9 @@ model = Model(confg='./configs/rtdetr/rtdetr_r101vd_6x_coco.yml',ckpt="./checkpo
|
|
68 |
#img = Image.open('./a.jpg').convert('RGB').resize((640,640))
|
69 |
|
70 |
|
71 |
-
|
72 |
def detect(img):
|
73 |
-
print(img)
|
74 |
-
|
75 |
t_img = transformer(img).unsqueeze(0)#.unsqueeze(0) #[1,1,640,640]
|
76 |
size = torch.tensor([[t_img.shape[2], t_img.shape[3]]])
|
77 |
#print(t_img.shape)
|
|
|
11 |
|
12 |
transformer = transforms.Compose([
|
13 |
transforms.ToTensor(),
|
14 |
+
#transforms.Resize([640,640]),
|
15 |
|
16 |
])
|
17 |
|
|
|
68 |
#img = Image.open('./a.jpg').convert('RGB').resize((640,640))
|
69 |
|
70 |
|
|
|
71 |
def detect(img):
|
72 |
+
#print(img) #ndarray
|
73 |
+
img = Image.fromarray(img).resize((640,640))
|
74 |
t_img = transformer(img).unsqueeze(0)#.unsqueeze(0) #[1,1,640,640]
|
75 |
size = torch.tensor([[t_img.shape[2], t_img.shape[3]]])
|
76 |
#print(t_img.shape)
|