AI-Naga commited on
Commit
4059946
·
1 Parent(s): 6602745

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -49
app.py CHANGED
@@ -10,9 +10,9 @@ from ultralytics import YOLO
10
  model = torch.hub.load('ultralytics/yolov5', 'yolov5x', pretrained=True)
11
  path = [['image_0.jpg'], ['image_1.jpg']]
12
  video_path = [['TresPass_Detection_1.mp4']]
13
- # area = [(215, 180), (120, 75), (370, 55), (520, 140), (215, 180) ]
14
- area = [(215, 180), (110, 75), (370, 55), (520, 140), (215, 180) ]
15
-
16
  # def show_preds_video(video_path):
17
  def show_preds_video():
18
  cap = cv2.VideoCapture('TresPass_Detection_1.mp4')
@@ -22,12 +22,14 @@ def show_preds_video():
22
  if not ret:
23
  break
24
  count += 1
25
- if count % 10 != 0:
26
  continue
27
  # frame = cv2.imread(video_path)
 
28
  frame=cv2.resize(frame,(1020,600))
29
  frame_copy = frame.copy()
30
- frame=cv2.resize(frame,(1020,600))
 
31
 
32
  results=model(frame)
33
  for index, row in results.pandas().xyxy[0].iterrows():
@@ -42,12 +44,14 @@ def show_preds_video():
42
 
43
  if ('person') in d:
44
  results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
 
 
45
  if results >0:
46
  cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2)
47
  cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
48
- cv2.putText(frame_copy,str("Alert !!! Trespasser detected !!!"),(50,400),cv2.FONT_HERSHEY_PLAIN,2,(0,0,255),3)
49
 
50
- cv2.polylines(frame_copy, [np.array(area, np.int32)], True, (0,255,0), 2)
51
 
52
  yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
53
 
@@ -62,52 +66,14 @@ interface_video = gr.Interface(
62
  fn=show_preds_video,
63
  inputs=inputs_video,
64
  outputs=outputs_video,
65
- title="Intrusion Detection",
66
  examples=video_path,
67
- cache_examples=False,
 
68
  )
69
 
70
  gr.TabbedInterface(
71
  [interface_video],
72
  # [interface_image, interface_video],
73
  tab_names=['Video inference']
74
- ).queue().launch(width=200, height = 200)
75
-
76
-
77
- # def show_preds_image(image_path):
78
- # frame = cv2.imread(image_path)
79
- # frame=cv2.resize(frame,(1020,600))
80
- # results=model(frame)
81
- # for index, row in results.pandas().xyxy[0].iterrows():
82
- # x1 = int(row['xmin'])
83
- # y1 = int(row['ymin'])
84
- # x2 = int(row['xmax'])
85
- # y2 = int(row['ymax'])
86
- # d=(row['name'])
87
-
88
- # cx=int(x1+x2)//2
89
- # cy=int(y1+y2)//2
90
-
91
- # if ('person') in d:
92
- # results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
93
- # if results >0:
94
- # cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2)
95
- # cv2.putText(frame,str(d),(x1,y1),cv2.FONT_HERSHEY_PLAIN,1,(255,0,0),2)
96
-
97
- # cv2.polylines(frame, [np.array(area, np.int32)], True, (0,255,0), 2)
98
- # return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
99
-
100
- # inputs_image = [
101
- # gr.components.Image(type="filepath", label="Input Image"),
102
- # ]
103
- # outputs_image = [
104
- # gr.components.Image(type="numpy", label="Output Image"),
105
- # ]
106
- # interface_image = gr.Interface(
107
- # fn=show_preds_image,
108
- # inputs=inputs_image,
109
- # outputs=outputs_image,
110
- # title="Parking space counter",
111
- # examples=path,
112
- # cache_examples=False,
113
- # )
 
10
  model = torch.hub.load('ultralytics/yolov5', 'yolov5x', pretrained=True)
11
  path = [['image_0.jpg'], ['image_1.jpg']]
12
  video_path = [['TresPass_Detection_1.mp4']]
13
+ # area = [(215, 180), (110, 75), (370, 55), (520, 140), (215, 180) ]
14
+ # area = [(190, 180), (100, 75), (360, 55), (510, 140), (190, 180) ]
15
+ area = [(215, 180), (110, 80), (360, 55), (510, 140), (215, 180) ]
16
  # def show_preds_video(video_path):
17
  def show_preds_video():
18
  cap = cv2.VideoCapture('TresPass_Detection_1.mp4')
 
22
  if not ret:
23
  break
24
  count += 1
25
+ if count % 8 != 0:
26
  continue
27
  # frame = cv2.imread(video_path)
28
+
29
  frame=cv2.resize(frame,(1020,600))
30
  frame_copy = frame.copy()
31
+
32
+ cv2.polylines(frame_copy, [np.array(area, np.int32)], True, (0,255,0), 2)
33
 
34
  results=model(frame)
35
  for index, row in results.pandas().xyxy[0].iterrows():
 
44
 
45
  if ('person') in d:
46
  results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
47
+ # results = cv2.pointPolygonTest(np.array(area, np.int32), ((x2,y1)), False)
48
+ # results = cv2.pointPolygonTest(np.array(area, np.int32), ((x2,y2)), False)
49
  if results >0:
50
  cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2)
51
  cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
52
+ cv2.putText(frame_copy,str("Alert !!! Trespasser detected !!!"),(50,300),cv2.FONT_HERSHEY_PLAIN,2,(0,0,255),3)
53
 
54
+
55
 
56
  yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
57
 
 
66
  fn=show_preds_video,
67
  inputs=inputs_video,
68
  outputs=outputs_video,
69
+ title="Security - Trespasser monitoring ",
70
  examples=video_path,
71
+ cache_examples=True,
72
+
73
  )
74
 
75
  gr.TabbedInterface(
76
  [interface_video],
77
  # [interface_image, interface_video],
78
  tab_names=['Video inference']
79
+ ).queue().launch(width=100, height = 100)