|
import numpy |
|
import sahi.predict |
|
import sahi.utils |
|
from PIL import Image |
|
|
|
TEMP_DIR = "temp" |
|
|
|
|
|
def sahi_yolov8m_inference( |
|
image, |
|
detection_model, |
|
slice_height, |
|
slice_width, |
|
overlap_height_ratio, |
|
overlap_width_ratio, |
|
image_size, |
|
): |
|
|
|
detection_model.image_size = image_size |
|
prediction_result = sahi.predict.get_sliced_prediction( |
|
image=image, |
|
detection_model=detection_model, |
|
slice_height=slice_height, |
|
slice_width=slice_width, |
|
overlap_height_ratio=overlap_height_ratio, |
|
overlap_width_ratio=overlap_width_ratio, |
|
) |
|
visual_result = sahi.utils.cv.visualize_object_predictions( |
|
image=numpy.array(image), |
|
object_prediction_list=prediction_result.object_prediction_list, |
|
rect_th=3, |
|
text_size=2 |
|
) |
|
|
|
output = Image.fromarray(visual_result["image"]) |
|
|
|
return output |