cheng-hust commited on
Commit
672536c
·
verified ·
1 Parent(s): 2b88463

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -66,20 +66,20 @@ class Model(nn.Module):
66
 
67
 
68
  model = Model(confg='./configs/rtdetr/rtdetr_r101vd_6x_coco.yml',ckpt="./checkpoint_init.pth")
69
- model2 = Model(confg='./configs/rtdetr/rtdetr_r101vd_6x_coco_cope12.yml',ckpt="./checkpointcope12.pth",cope=False)
70
- model3 = Model(confg='./configs/rtdetr/rtdetr_r101vd_6x_coco_cope24.yml',ckpt="./checkpointcope24.pth",cope=False)
71
 
72
  #img = cv2.imread('./j.jpg',cv2.IMREAD_GRAYSCALE)
73
  #img = Image.open('./a.jpg').convert('RGB').resize((640,640))
74
 
75
 
76
- def detect(img,thr=0.2,cope='none'):
77
  #print(img) #ndarray
78
  img = Image.fromarray(img).resize((640,640))
79
  t_img = transformer(img).unsqueeze(0)#.unsqueeze(0) #[1,1,640,640]
80
  size = torch.tensor([[t_img.shape[2], t_img.shape[3]]])
81
  #print(t_img.shape)
82
- if cope == 'none':
83
  labels, boxes, scores=model(t_img,size)
84
  elif cope == 'cope12':
85
  labels, boxes, scores=model2(t_img,size)
@@ -105,6 +105,6 @@ def detect(img,thr=0.2,cope='none'):
105
  #save_path = Path('./output') / img_path.name
106
  return img
107
 
108
- interface = gr.Interface(fn=detect,inputs=["image",gr.Slider(label="thr", value=0.2, maximum=1, minimum=0),gr.inputs.Radio(['none','cope12','cope24'])],outputs="image",title="rt-cope detect")
109
 
110
  interface.launch()
 
66
 
67
 
68
  model = Model(confg='./configs/rtdetr/rtdetr_r101vd_6x_coco.yml',ckpt="./checkpoint_init.pth")
69
+ model2 = Model(confg='./configs/rtdetr/rtdetr_r101vd_6x_cococope12.yml',ckpt="./checkpointcope12.pth",cope=False)
70
+ model3 = Model(confg='./configs/rtdetr/rtdetr_r101vd_6x_coco.yml',ckpt="./rtdetrCOCO.pth",cope=False)
71
 
72
  #img = cv2.imread('./j.jpg',cv2.IMREAD_GRAYSCALE)
73
  #img = Image.open('./a.jpg').convert('RGB').resize((640,640))
74
 
75
 
76
+ def detect(img,thr=0.2,cope='aitod'):
77
  #print(img) #ndarray
78
  img = Image.fromarray(img).resize((640,640))
79
  t_img = transformer(img).unsqueeze(0)#.unsqueeze(0) #[1,1,640,640]
80
  size = torch.tensor([[t_img.shape[2], t_img.shape[3]]])
81
  #print(t_img.shape)
82
+ if cope == 'aitod':
83
  labels, boxes, scores=model(t_img,size)
84
  elif cope == 'cope12':
85
  labels, boxes, scores=model2(t_img,size)
 
105
  #save_path = Path('./output') / img_path.name
106
  return img
107
 
108
+ interface = gr.Interface(fn=detect,inputs=["image",gr.Slider(label="thr", value=0.2, maximum=1, minimum=0),gr.inputs.Radio(['aitod','cope12','COCO'])],outputs="image",title="rt-cope detect")
109
 
110
  interface.launch()