Spaces:
Sleeping
Sleeping
File size: 5,268 Bytes
7370e5c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
# Ultralytics YOLO 🚀, AGPL-3.0 license
from itertools import product
from pathlib import Path
import pytest
import torch
from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODEL, SOURCE
from ultralytics import YOLO
from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
from ultralytics.utils import ASSETS, WEIGHTS_DIR
def test_checks():
"""Validate CUDA settings against torch CUDA functions."""
assert torch.cuda.is_available() == CUDA_IS_AVAILABLE
assert torch.cuda.device_count() == CUDA_DEVICE_COUNT
@pytest.mark.slow
@pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability")
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
@pytest.mark.parametrize(
"task, dynamic, int8, half, batch",
[ # generate all combinations but exclude those where both int8 and half are True
(task, dynamic, int8, half, batch)
# Note: tests reduced below pending compute availability expansion as GPU CI runner utilization is high
# for task, dynamic, int8, half, batch in product(TASKS, [True, False], [True, False], [True, False], [1, 2])
for task, dynamic, int8, half, batch in product(TASKS, [True], [True], [False], [2])
if not (int8 and half) # exclude cases where both int8 and half are True
],
)
def test_export_engine_matrix(task, dynamic, int8, half, batch):
"""Test YOLO model export to TensorRT format for various configurations and run inference."""
file = YOLO(TASK2MODEL[task]).export(
format="engine",
imgsz=32,
dynamic=dynamic,
int8=int8,
half=half,
batch=batch,
data=TASK2DATA[task],
workspace=1, # reduce workspace GB for less resource utilization during testing
simplify=True, # use 'onnxslim'
)
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
Path(file).unlink() # cleanup
Path(file).with_suffix(".cache").unlink() if int8 else None # cleanup INT8 cache
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_train():
"""Test model training on a minimal dataset using available CUDA devices."""
device = 0 if CUDA_DEVICE_COUNT == 1 else [0, 1]
YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device) # requires imgsz>=64
@pytest.mark.slow
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_predict_multiple_devices():
"""Validate model prediction consistency across CPU and CUDA devices."""
model = YOLO("yolov8n.pt")
model = model.cpu()
assert str(model.device) == "cpu"
_ = model(SOURCE) # CPU inference
assert str(model.device) == "cpu"
model = model.to("cuda:0")
assert str(model.device) == "cuda:0"
_ = model(SOURCE) # CUDA inference
assert str(model.device) == "cuda:0"
model = model.cpu()
assert str(model.device) == "cpu"
_ = model(SOURCE) # CPU inference
assert str(model.device) == "cpu"
model = model.cuda()
assert str(model.device) == "cuda:0"
_ = model(SOURCE) # CUDA inference
assert str(model.device) == "cuda:0"
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_autobatch():
"""Check optimal batch size for YOLO model training using autobatch utility."""
from ultralytics.utils.autobatch import check_train_batch_size
check_train_batch_size(YOLO(MODEL).model.cuda(), imgsz=128, amp=True)
@pytest.mark.slow
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_utils_benchmarks():
"""Profile YOLO models for performance benchmarks."""
from ultralytics.utils.benchmarks import ProfileModels
# Pre-export a dynamic engine model to use dynamic inference
YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1)
ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_predict_sam():
"""Test SAM model predictions using different prompts, including bounding boxes and point annotations."""
from ultralytics import SAM
from ultralytics.models.sam import Predictor as SAMPredictor
# Load a model
model = SAM(WEIGHTS_DIR / "sam_b.pt")
# Display model information (optional)
model.info()
# Run inference
model(SOURCE, device=0)
# Run inference with bboxes prompt
model(SOURCE, bboxes=[439, 437, 524, 709], device=0)
# Run inference with points prompt
model(ASSETS / "zidane.jpg", points=[900, 370], labels=[1], device=0)
# Create SAMPredictor
overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model=WEIGHTS_DIR / "mobile_sam.pt")
predictor = SAMPredictor(overrides=overrides)
# Set image
predictor.set_image(ASSETS / "zidane.jpg") # set with image file
# predictor(bboxes=[439, 437, 524, 709])
# predictor(points=[900, 370], labels=[1])
# Reset image
predictor.reset_image()
|