Daniel Cerda Escobar
Update Space"
ee620ac
raw
history blame
2.17 kB
import numpy
import sahi.predict
import sahi.utils
import PyPDF4
from pdf2image import convert_from_path
from PIL import Image
TEMP_DIR = "temp"
def sahi_yolov8m_inference(
image,
detection_model,
slice_height=512,
slice_width=512,
overlap_height_ratio=0.1,
overlap_width_ratio=0.1,
image_size=640,
postprocess_match_threshold=0.5,
):
# standard inference
detection_model.image_size = image_size
prediction_result_1 = sahi.predict.get_prediction(
image=image, detection_model=detection_model
)
visual_result_1 = sahi.utils.cv.visualize_object_predictions(
image=numpy.array(image),
object_prediction_list=prediction_result_1.object_prediction_list,
)
output_1 = Image.fromarray(visual_result_1["image"])
# sliced inference
prediction_result_2 = sahi.predict.get_sliced_prediction(
image=image,
detection_model=detection_model,
slice_height=slice_height,
slice_width=slice_width,
overlap_height_ratio=overlap_height_ratio,
overlap_width_ratio=overlap_width_ratio,
postprocess_match_threshold=postprocess_match_threshold,
)
visual_result_2 = sahi.utils.cv.visualize_object_predictions(
image=numpy.array(image),
object_prediction_list=prediction_result_2.object_prediction_list,
)
output_2 = Image.fromarray(visual_result_2["image"])
return output_1, output_2
# def convert_pdf_file(
# path,
# #filename=name,
# dpi=300,
# image_width=4961,
# image_heigth=3508,
# grayscale=True,
# ):
# with open(path, 'rb') as pdf_file:
# pdf_reader = PyPDF4.PdfFileReader(pdf_file, strict=False)
# first_page = pdf_reader.getPage(0)
# page_size = (first_page.mediaBox.getWidth(), first_page.mediaBox.getHeight())
# if page_size[0] > page_size[1]:
# image = convert_from_path(path, dpi=dpi, size=(image_width,image_heigth), grayscale=grayscale)
# else:
# image = convert_from_path(path, dpi=dpi, size=(image_heigth,image_width), grayscale=grayscale)
# return image
# image[0].save(f'{path}/{filename}.png', 'PNG')