Spaces:
Running
on
Zero
Running
on
Zero
Refine GPU Resource Allocation for YOLOv11 Inference
Browse files**Summary:** This pull request optimizes GPU resource allocation in the SAHI + YOLOv11 demo by removing the duration parameter from the @spaces.GPU decorators and explicitly setting the device to cuda:0 in the load_yolo_model function.
app.py
CHANGED
|
@@ -41,7 +41,7 @@ sahi.utils.file.download_from_url(
|
|
| 41 |
# Global model variable
|
| 42 |
model = None
|
| 43 |
|
| 44 |
-
@spaces.GPU(
|
| 45 |
def load_yolo_model(model_name, confidence_threshold=0.5):
|
| 46 |
"""
|
| 47 |
Loads a YOLOv11 detection model.
|
|
@@ -56,12 +56,12 @@ def load_yolo_model(model_name, confidence_threshold=0.5):
|
|
| 56 |
global model
|
| 57 |
model_path = model_name
|
| 58 |
model = AutoDetectionModel.from_pretrained(
|
| 59 |
-
model_type="ultralytics", model_path=model_path, device=
|
| 60 |
confidence_threshold=confidence_threshold, image_size=IMAGE_SIZE
|
| 61 |
)
|
| 62 |
return model
|
| 63 |
|
| 64 |
-
@spaces.GPU(
|
| 65 |
def sahi_yolo_inference(
|
| 66 |
image,
|
| 67 |
yolo_model_name,
|
|
|
|
| 41 |
# Global model variable
|
| 42 |
model = None
|
| 43 |
|
| 44 |
+
@spaces.GPU()
|
| 45 |
def load_yolo_model(model_name, confidence_threshold=0.5):
|
| 46 |
"""
|
| 47 |
Loads a YOLOv11 detection model.
|
|
|
|
| 56 |
global model
|
| 57 |
model_path = model_name
|
| 58 |
model = AutoDetectionModel.from_pretrained(
|
| 59 |
+
model_type="ultralytics", model_path=model_path, device='cuda:0',
|
| 60 |
confidence_threshold=confidence_threshold, image_size=IMAGE_SIZE
|
| 61 |
)
|
| 62 |
return model
|
| 63 |
|
| 64 |
+
@spaces.GPU()
|
| 65 |
def sahi_yolo_inference(
|
| 66 |
image,
|
| 67 |
yolo_model_name,
|