Daniel Cerda Escobar
commited on
Commit
·
7bb89fc
1
Parent(s):
55018fa
Bugs Fixed
Browse files- requirements.txt +2 -4
- utils.py +8 -40
requirements.txt
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
sahi==0.11.14
|
2 |
streamlit-image-comparison==0.0.4
|
3 |
-
streamlit
|
4 |
ultralyticsplus
|
5 |
-
|
6 |
-
pdf2image
|
7 |
-
PyPDF4
|
|
|
1 |
sahi==0.11.14
|
2 |
streamlit-image-comparison==0.0.4
|
3 |
+
streamlit
|
4 |
ultralyticsplus
|
5 |
+
|
|
|
|
utils.py
CHANGED
@@ -1,8 +1,6 @@
|
|
1 |
import numpy
|
2 |
import sahi.predict
|
3 |
import sahi.utils
|
4 |
-
import PyPDF4
|
5 |
-
from pdf2image import convert_from_path
|
6 |
from PIL import Image
|
7 |
|
8 |
TEMP_DIR = "temp"
|
@@ -15,55 +13,25 @@ def sahi_yolov8m_inference(
|
|
15 |
slice_width=512,
|
16 |
overlap_height_ratio=0.1,
|
17 |
overlap_width_ratio=0.1,
|
18 |
-
image_size=
|
19 |
-
postprocess_match_threshold=0.
|
20 |
):
|
21 |
-
|
22 |
-
# standard inference
|
23 |
-
detection_model.image_size = image_size
|
24 |
-
prediction_result_1 = sahi.predict.get_prediction(
|
25 |
-
image=image, detection_model=detection_model
|
26 |
-
)
|
27 |
-
visual_result_1 = sahi.utils.cv.visualize_object_predictions(
|
28 |
-
image=numpy.array(image),
|
29 |
-
object_prediction_list=prediction_result_1.object_prediction_list,
|
30 |
-
)
|
31 |
-
output_1 = Image.fromarray(visual_result_1["image"])
|
32 |
-
|
33 |
# sliced inference
|
34 |
-
|
35 |
image=image,
|
36 |
detection_model=detection_model,
|
|
|
37 |
slice_height=slice_height,
|
38 |
slice_width=slice_width,
|
39 |
overlap_height_ratio=overlap_height_ratio,
|
40 |
overlap_width_ratio=overlap_width_ratio,
|
41 |
postprocess_match_threshold=postprocess_match_threshold,
|
42 |
)
|
43 |
-
|
44 |
image=numpy.array(image),
|
45 |
-
object_prediction_list=
|
46 |
)
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
return output_1, output_2
|
51 |
|
52 |
-
|
53 |
-
# path,
|
54 |
-
# #filename=name,
|
55 |
-
# dpi=300,
|
56 |
-
# image_width=4961,
|
57 |
-
# image_heigth=3508,
|
58 |
-
# grayscale=True,
|
59 |
-
# ):
|
60 |
-
# with open(path, 'rb') as pdf_file:
|
61 |
-
# pdf_reader = PyPDF4.PdfFileReader(pdf_file, strict=False)
|
62 |
-
# first_page = pdf_reader.getPage(0)
|
63 |
-
# page_size = (first_page.mediaBox.getWidth(), first_page.mediaBox.getHeight())
|
64 |
-
# if page_size[0] > page_size[1]:
|
65 |
-
# image = convert_from_path(path, dpi=dpi, size=(image_width,image_heigth), grayscale=grayscale)
|
66 |
-
# else:
|
67 |
-
# image = convert_from_path(path, dpi=dpi, size=(image_heigth,image_width), grayscale=grayscale)
|
68 |
-
# return image
|
69 |
-
# image[0].save(f'{path}/{filename}.png', 'PNG')
|
|
|
1 |
import numpy
|
2 |
import sahi.predict
|
3 |
import sahi.utils
|
|
|
|
|
4 |
from PIL import Image
|
5 |
|
6 |
TEMP_DIR = "temp"
|
|
|
13 |
slice_width=512,
|
14 |
overlap_height_ratio=0.1,
|
15 |
overlap_width_ratio=0.1,
|
16 |
+
image_size=1024,
|
17 |
+
postprocess_match_threshold=0.75,
|
18 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
# sliced inference
|
20 |
+
prediction_result = sahi.predict.get_sliced_prediction(
|
21 |
image=image,
|
22 |
detection_model=detection_model,
|
23 |
+
image_size = image_size,
|
24 |
slice_height=slice_height,
|
25 |
slice_width=slice_width,
|
26 |
overlap_height_ratio=overlap_height_ratio,
|
27 |
overlap_width_ratio=overlap_width_ratio,
|
28 |
postprocess_match_threshold=postprocess_match_threshold,
|
29 |
)
|
30 |
+
visual_result = sahi.utils.cv.visualize_object_predictions(
|
31 |
image=numpy.array(image),
|
32 |
+
object_prediction_list=prediction_result.object_prediction_list,
|
33 |
)
|
34 |
|
35 |
+
output = Image.fromarray(visual_result["image"])
|
|
|
|
|
36 |
|
37 |
+
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|