Merge branch 'develop'
Browse files- .gitattributes +22 -0
- .gitignore +51 -0
- predict.py +36 -0
- processed-video/ny-traffic-processed.mp4 +3 -0
- processed-video/ny-walking-processed.mp4 +3 -0
- raw-video/ny-traffic.mp4 +3 -0
- raw-video/ny-walking.mp4 +3 -0
- requirements.txt +1 -0
- runs/val/best.pt +3 -0
- runs/val/last.pt +3 -0
- runs/val/val_coco8/F1_curve.png +0 -0
- runs/val/val_coco8/PR_curve.png +0 -0
- runs/val/val_coco8/P_curve.png +0 -0
- runs/val/val_coco8/R_curve.png +0 -0
- runs/val/val_coco8/confusion_matrix.png +0 -0
- runs/val/val_coco8/confusion_matrix_normalized.png +0 -0
- runs/val/val_coco8/val_batch0_labels.jpg +0 -0
- runs/val/val_coco8/val_batch0_pred.jpg +0 -0
- train_yolov8n.py +24 -0
- train_yolov8s.py +20 -0
- validate.py +15 -0
.gitattributes
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
@@ -33,3 +34,24 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<<<<<<< HEAD
|
2 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
3 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
4 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
34 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
35 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
36 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
37 |
+
=======
|
38 |
+
# Tratar datasets y runs como binarios para evitar conversiones de texto
|
39 |
+
datasets/combined/** filter=lfs diff=lfs merge=lfs -text
|
40 |
+
runs/detect/** filter=lfs diff=lfs merge=lfs -text
|
41 |
+
# Asegurar formato de texto en archivos clave
|
42 |
+
*.yaml text eol=lf
|
43 |
+
*.py text eol=lf
|
44 |
+
*.md text eol=lf
|
45 |
+
# Forzar archivos de configuración y modelos a usar Git LFS
|
46 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
47 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
48 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
49 |
+
*.engine filter=lfs diff=lfs merge=lfs -text
|
50 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
51 |
+
# Archivos de vídeo y datos grandes
|
52 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
53 |
+
*.csv filter=lfs diff=lfs merge=lfs -text
|
54 |
+
*.json filter=lfs diff=lfs merge=lfs -text
|
55 |
+
processed-video/*.mp4 filter=lfs diff=lfs merge=lfs -text
|
56 |
+
raw-video/*.mp4 filter=lfs diff=lfs merge=lfs -text
|
57 |
+
>>>>>>> yolov8-model
|
.gitignore
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Ignorar la carpeta de entornos virtuales
|
2 |
+
.venv/
|
3 |
+
venv/
|
4 |
+
|
5 |
+
# Archivos de caché de Python
|
6 |
+
__pycache__/
|
7 |
+
*.py[cod]
|
8 |
+
*$py.class
|
9 |
+
|
10 |
+
# Archivos temporales del sistema
|
11 |
+
.DS_Store
|
12 |
+
Thumbs.db
|
13 |
+
|
14 |
+
# Archivos de configuración de usuario
|
15 |
+
*.log
|
16 |
+
*.tmp
|
17 |
+
*.swp
|
18 |
+
*.swo
|
19 |
+
|
20 |
+
# Claves API o credenciales privadas (⚠️ Asegúrate de que este archivo contiene datos sensibles)
|
21 |
+
download-roboflow.py
|
22 |
+
|
23 |
+
# PERMITIR subir `runs/val/`
|
24 |
+
!runs/val/
|
25 |
+
|
26 |
+
# Ignorar archivos de ejecución y checkpoints de modelos
|
27 |
+
runs/detect
|
28 |
+
|
29 |
+
weights/
|
30 |
+
*.pt
|
31 |
+
*.onnx
|
32 |
+
*.tflite
|
33 |
+
*.engine
|
34 |
+
*.ckpt
|
35 |
+
|
36 |
+
# EXCEPCIÓN: No ignorar los .pt dentro de runs/val
|
37 |
+
!runs/val/*.pt
|
38 |
+
|
39 |
+
# Ignorar dataset original de COCO para evitar archivos innecesarios
|
40 |
+
datasets/
|
41 |
+
download-coco.py
|
42 |
+
datasets-download/
|
43 |
+
|
44 |
+
# Ignorar archivos con la terminación .Zone.Identifier (propio de Windows)
|
45 |
+
*.Zone.Identifier
|
46 |
+
|
47 |
+
# PERMITIR subir `datasets/combined/`
|
48 |
+
# (Eliminar esta línea si accidentalmente se ignora)
|
49 |
+
!datasets/combined/
|
50 |
+
!datasets/combined/images/
|
51 |
+
!datasets/combined/labels/
|
predict.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
from ultralytics import YOLO
|
3 |
+
|
4 |
+
# Cargar modelo YOLOv8 entrenado
|
5 |
+
model = YOLO("/home/izaskunmz/yolo/yolov8-object-detection/runs/detect/train_coco8/weights/best.pt")
|
6 |
+
|
7 |
+
# Abrir vídeo
|
8 |
+
video_path = "/home/izaskunmz/yolo/yolov8-object-detection/raw-video/ny-traffic.mp4"
|
9 |
+
cap = cv2.VideoCapture(video_path)
|
10 |
+
|
11 |
+
# Obtener dimensiones del video original
|
12 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
13 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
14 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
15 |
+
|
16 |
+
# Definir el codec y crear el VideoWriter para guardar el resultado
|
17 |
+
output_path = "/home/izaskunmz/yolo/yolov8-object-detection/processed-video/ny-traffic-processed.mp4"
|
18 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec para formato MP4
|
19 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
20 |
+
|
21 |
+
while cap.isOpened():
|
22 |
+
ret, frame = cap.read()
|
23 |
+
if not ret:
|
24 |
+
break # Si el vídeo ha terminado, salimos del bucle
|
25 |
+
|
26 |
+
# Realizar detección en el frame
|
27 |
+
results = model(frame)
|
28 |
+
|
29 |
+
# Obtener frame con anotaciones
|
30 |
+
annotated_frame = results[0].plot()
|
31 |
+
|
32 |
+
# Guardar el frame en el video de salida
|
33 |
+
out.write(annotated_frame)
|
34 |
+
|
35 |
+
cap.release()
|
36 |
+
out.release() # Liberar el escritor de video
|
processed-video/ny-traffic-processed.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ae66dfe798c8d870971336d36f5a2e2889416966ac8e1061760de1c26e22fd03
|
3 |
+
size 158863855
|
processed-video/ny-walking-processed.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37dc741a42b183c4feadd7108fea2db6dc6b6116f9ac596f92944c7418e9cb4d
|
3 |
+
size 178527503
|
raw-video/ny-traffic.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:03bb512b109a238d9bdacfb118ce45e312925a1283f06c647624408573f4112a
|
3 |
+
size 62404880
|
raw-video/ny-walking.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:992ce64943371f674fdc2b243fc1228985e8d387e387230428e8c57a071d8e8c
|
3 |
+
size 20162842
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ultralytics
|
runs/val/best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dfa11240a586e524e3b42e818f22b8afa899fa9b2425ca69648783f421d43829
|
3 |
+
size 22592803
|
runs/val/last.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1314b2c5f9e3c55ff97fdbc023b0531b7d4217a00420698c731ec4e9278c6e35
|
3 |
+
size 22592803
|
runs/val/val_coco8/F1_curve.png
ADDED
![]() |
runs/val/val_coco8/PR_curve.png
ADDED
![]() |
runs/val/val_coco8/P_curve.png
ADDED
![]() |
runs/val/val_coco8/R_curve.png
ADDED
![]() |
runs/val/val_coco8/confusion_matrix.png
ADDED
![]() |
runs/val/val_coco8/confusion_matrix_normalized.png
ADDED
![]() |
runs/val/val_coco8/val_batch0_labels.jpg
ADDED
![]() |
runs/val/val_coco8/val_batch0_pred.jpg
ADDED
![]() |
train_yolov8n.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
|
3 |
+
# Cargar el modelo YOLOv8 preentrenado
|
4 |
+
model = YOLO("yolov8n.pt") # Puedes probar con "yolov8s.pt" para mayor precisión
|
5 |
+
|
6 |
+
# Entrenar el modelo con hiperparámetros ajustados
|
7 |
+
model.train(
|
8 |
+
data="/home/izaskunmz/yolo/yolov8-object-detection/datasets/coco8/data.yaml", # Ruta correcta al dataset
|
9 |
+
epochs=150, # Aumentamos las épocas para mejorar precisión
|
10 |
+
batch=8, # Reducimos el batch para estabilidad en CPU
|
11 |
+
imgsz=640, # Tamaño de la imagen
|
12 |
+
device="cpu", # Entrenamiento en CPU
|
13 |
+
lr0=0.0005, # Learning rate inicial más bajo para mejorar estabilidad
|
14 |
+
lrf=0.0001, # Decaimiento más lento del learning rate
|
15 |
+
momentum=0.95, # Aumentamos momentum para estabilizar entrenamiento
|
16 |
+
weight_decay=0.0001, # Regularización más fuerte para evitar sobreajuste
|
17 |
+
optimizer="AdamW", # Mejor optimizador que SGD para convergencia en CPU
|
18 |
+
cos_lr=True, # Usamos learning rate decay con coseno para ajuste fino
|
19 |
+
close_mosaic=5, # Desactivamos aumentación mosaico después de 5 épocas
|
20 |
+
patience=0, # 🔹 Desactiva Early Stopping
|
21 |
+
project="/home/izaskunmz/yolo/yolov8-object-detection/runs/detect", # Ruta correcta para guardar los modelos
|
22 |
+
name="train_yolov8n", # Nombre del experimento optimizado
|
23 |
+
exist_ok=True # Evita sobreescritura, crea versiones numeradas
|
24 |
+
)
|
train_yolov8s.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
|
3 |
+
# Cargar el modelo YOLOv8s preentrenado
|
4 |
+
model = YOLO("yolov8s.pt")
|
5 |
+
|
6 |
+
# Entrenar el modelo y guardar en la carpeta correcta
|
7 |
+
model.train(
|
8 |
+
data="/home/izaskunmz/yolo/yolov8-object-detection/datasets/coco8/data.yaml", # Archivo de configuración del dataset
|
9 |
+
epochs=150, # Aumentamos las épocas para mejorar el aprendizaje
|
10 |
+
batch=8, # Reducimos el batch si hay problemas de memoria
|
11 |
+
imgsz=640, # Tamaño de las imágenes
|
12 |
+
device='cpu', # Si tienes GPU, cámbialo a 'cuda'
|
13 |
+
project="/home/izaskunmz/yolo/yolov8-object-detection/runs/detect", # Carpeta donde se guardarán los resultados
|
14 |
+
name="train_coco8", # Nombre del experimento
|
15 |
+
exist_ok=True, # Si la carpeta existe, crea una nueva numerada
|
16 |
+
patience=200, # Para evitar que se detenga temprano
|
17 |
+
lr0=0.01, # Ajustamos la tasa de aprendizaje inicial
|
18 |
+
momentum=0.937, # Momentum del optimizador
|
19 |
+
weight_decay=0.0005 # Regularización para evitar overfitting
|
20 |
+
)
|
validate.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
|
3 |
+
# Cargar el modelo entrenado
|
4 |
+
model = YOLO("/home/izaskunmz/yolo/yolov8-object-detection/runs/detect/train_coco8/weights/best.pt") # Asegúrate de que esta ruta sea correcta
|
5 |
+
|
6 |
+
# Validar el modelo y guardar los resultados en la carpeta correcta
|
7 |
+
metrics = model.val(
|
8 |
+
data="/home/izaskunmz/yolo/yolov8-object-detection/datasets/coco8/data.yaml",
|
9 |
+
project="/home/izaskunmz/yolo/yolov8-object-detection/runs/val", # Define la carpeta donde se guardarán los resultados
|
10 |
+
name="val_coco8", # Nombre del experimento
|
11 |
+
exist_ok=True # Evita sobreescribir, creará nuevas versiones numeradas
|
12 |
+
)
|
13 |
+
|
14 |
+
# Mostrar las métricas de evaluación
|
15 |
+
print(metrics)
|