Spaces:
Runtime error
Runtime error
Marcus Vinicius Zerbini Canhaço
commited on
Commit
·
43a30ac
1
Parent(s):
346e896
feat: atualização do detector com otimizações para GPU T4
Browse files
src/domain/detectors/gpu.py
CHANGED
@@ -143,7 +143,14 @@ class WeaponDetectorGPU(BaseDetector):
|
|
143 |
"frames_analyzed": 0,
|
144 |
"video_duration": 0,
|
145 |
"device_type": "GPU",
|
146 |
-
"detections": []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
}
|
148 |
|
149 |
try:
|
@@ -166,7 +173,7 @@ class WeaponDetectorGPU(BaseDetector):
|
|
166 |
# Calcular duração do vídeo
|
167 |
metrics["video_duration"] = len(frames) / (fps or 2)
|
168 |
|
169 |
-
# Processar frames
|
170 |
t0 = time.time()
|
171 |
detections_by_frame = []
|
172 |
|
|
|
143 |
"frames_analyzed": 0,
|
144 |
"video_duration": 0,
|
145 |
"device_type": "GPU",
|
146 |
+
"detections": [],
|
147 |
+
"technical": {
|
148 |
+
"model": "owlv2-base-patch16",
|
149 |
+
"input_size": f"{resolution}x{resolution}",
|
150 |
+
"nms_threshold": 0.5,
|
151 |
+
"preprocessing": "optimized",
|
152 |
+
"early_stop": False
|
153 |
+
}
|
154 |
}
|
155 |
|
156 |
try:
|
|
|
173 |
# Calcular duração do vídeo
|
174 |
metrics["video_duration"] = len(frames) / (fps or 2)
|
175 |
|
176 |
+
# Processar frames
|
177 |
t0 = time.time()
|
178 |
detections_by_frame = []
|
179 |
|
src/infrastructure/services/weapon_detector.py
CHANGED
@@ -70,20 +70,19 @@ class WeaponDetectorService(DetectorInterface):
|
|
70 |
|
71 |
# Converter detecções para entidades do domínio
|
72 |
detections = []
|
73 |
-
for
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
except Exception as e:
|
85 |
-
logger.error(f"Erro ao processar detecção: {str(e)}")
|
86 |
|
|
|
87 |
result = DetectionResult(
|
88 |
video_path=output_path or video_path,
|
89 |
detections=detections,
|
|
|
70 |
|
71 |
# Converter detecções para entidades do domínio
|
72 |
detections = []
|
73 |
+
for detection in metrics.get('detections', []):
|
74 |
+
try:
|
75 |
+
detections.append(Detection(
|
76 |
+
frame=detection.get('frame', 0),
|
77 |
+
confidence=detection.get('confidence', 0.0),
|
78 |
+
label=detection.get('label', 'objeto perigoso'),
|
79 |
+
box=detection.get('box', [0, 0, 0, 0]),
|
80 |
+
timestamp=detection.get('frame', 0) / fps if fps else 0
|
81 |
+
))
|
82 |
+
except Exception as e:
|
83 |
+
logger.error(f"Erro ao processar detecção: {str(e)}")
|
|
|
|
|
84 |
|
85 |
+
# Criar resultado com informações técnicas
|
86 |
result = DetectionResult(
|
87 |
video_path=output_path or video_path,
|
88 |
detections=detections,
|
src/main.py
CHANGED
@@ -138,7 +138,7 @@ def main():
|
|
138 |
api_open=False,
|
139 |
max_size=queue_size,
|
140 |
status_update_rate="auto",
|
141 |
-
|
142 |
)
|
143 |
|
144 |
# Launch
|
|
|
138 |
api_open=False,
|
139 |
max_size=queue_size,
|
140 |
status_update_rate="auto",
|
141 |
+
concurrency_count=max_concurrent
|
142 |
)
|
143 |
|
144 |
# Launch
|
src/presentation/web/gradio_interface.py
CHANGED
@@ -295,18 +295,21 @@ class GradioInterface:
|
|
295 |
"frames_analyzed": response.detection_result.frames_analyzed,
|
296 |
"total_time": round(response.detection_result.total_time, 2),
|
297 |
"frame_extraction_time": round(response.detection_result.frame_extraction_time, 2),
|
298 |
-
"analysis_time": round(response.detection_result.analysis_time, 2)
|
|
|
|
|
299 |
},
|
300 |
-
"detections": []
|
|
|
301 |
}
|
302 |
|
303 |
-
# Adicionar detecções ao JSON
|
304 |
-
for det in response.detection_result.detections[:10]:
|
305 |
technical_data["detections"].append({
|
306 |
"label": det.label,
|
307 |
"confidence": round(det.confidence * 100 if det.confidence <= 1.0 else det.confidence, 2),
|
308 |
"frame": det.frame,
|
309 |
-
"timestamp":
|
310 |
"box": det.box if hasattr(det, "box") else None
|
311 |
})
|
312 |
|
|
|
295 |
"frames_analyzed": response.detection_result.frames_analyzed,
|
296 |
"total_time": round(response.detection_result.total_time, 2),
|
297 |
"frame_extraction_time": round(response.detection_result.frame_extraction_time, 2),
|
298 |
+
"analysis_time": round(response.detection_result.analysis_time, 2),
|
299 |
+
"fps": fps,
|
300 |
+
"resolution": resolution
|
301 |
},
|
302 |
+
"detections": [],
|
303 |
+
"cache_stats": response.cache_stats if hasattr(response, 'cache_stats') else {}
|
304 |
}
|
305 |
|
306 |
+
# Adicionar detecções ao JSON com informações temporais
|
307 |
+
for det in response.detection_result.detections[:10]:
|
308 |
technical_data["detections"].append({
|
309 |
"label": det.label,
|
310 |
"confidence": round(det.confidence * 100 if det.confidence <= 1.0 else det.confidence, 2),
|
311 |
"frame": det.frame,
|
312 |
+
"timestamp": f"{int(det.timestamp // 60):02d}:{int(det.timestamp % 60):02d}",
|
313 |
"box": det.box if hasattr(det, "box") else None
|
314 |
})
|
315 |
|