Spaces:
Sleeping
Sleeping
Daniel Cerda Escobar
commited on
Commit
Β·
66ed482
1
Parent(s):
386b975
Update files
Browse files- app.py +13 -4
- requirements.txt +1 -1
- utils.py +7 -7
app.py
CHANGED
@@ -58,8 +58,8 @@ with col1:
|
|
58 |
with st.expander('How to use it'):
|
59 |
st.markdown(
|
60 |
'''
|
61 |
-
1)
|
62 |
-
2) Set
|
63 |
3) Press to perform inference π
|
64 |
4) Visualize model predictions π
|
65 |
'''
|
@@ -69,7 +69,11 @@ st.write('##')
|
|
69 |
|
70 |
col1, col2, col3 = st.columns(3, gap='large')
|
71 |
with col1:
|
72 |
-
st.markdown('##### Input
|
|
|
|
|
|
|
|
|
73 |
# set input images from examples
|
74 |
def radio_func(option):
|
75 |
option_to_id = {
|
@@ -84,12 +88,17 @@ with col1:
|
|
84 |
format_func = radio_func,
|
85 |
)
|
86 |
with col2:
|
|
|
|
|
|
|
|
|
|
|
87 |
st.markdown('##### Preview')
|
88 |
-
image = sahi.utils.cv.read_image_as_pil(IMAGE_TO_URL[radio])
|
89 |
with st.container(border = True):
|
90 |
st.image(image, use_column_width = True)
|
91 |
|
92 |
with col3:
|
|
|
93 |
st.markdown('##### Set model parameters')
|
94 |
slice_size = st.slider(
|
95 |
label = 'Slice Size',
|
|
|
58 |
with st.expander('How to use it'):
|
59 |
st.markdown(
|
60 |
'''
|
61 |
+
1) Upload or select any example diagram ππ»
|
62 |
+
2) Set model parameters π
|
63 |
3) Press to perform inference π
|
64 |
4) Visualize model predictions π
|
65 |
'''
|
|
|
69 |
|
70 |
col1, col2, col3 = st.columns(3, gap='large')
|
71 |
with col1:
|
72 |
+
st.markdown('##### Set Input Image')
|
73 |
+
# set input image by upload
|
74 |
+
image_file = st.file_uploader(
|
75 |
+
'Upload your P&ID', type = ['jpg','jpeg','png']
|
76 |
+
)
|
77 |
# set input images from examples
|
78 |
def radio_func(option):
|
79 |
option_to_id = {
|
|
|
88 |
format_func = radio_func,
|
89 |
)
|
90 |
with col2:
|
91 |
+
# visualize input image
|
92 |
+
if image_file is not None:
|
93 |
+
image = Image.open(image_file)
|
94 |
+
else:
|
95 |
+
image = sahi.utils.cv.read_image_as_pil(IMAGE_TO_URL[radio])
|
96 |
st.markdown('##### Preview')
|
|
|
97 |
with st.container(border = True):
|
98 |
st.image(image, use_column_width = True)
|
99 |
|
100 |
with col3:
|
101 |
+
# set SAHI parameters
|
102 |
st.markdown('##### Set model parameters')
|
103 |
slice_size = st.slider(
|
104 |
label = 'Slice Size',
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
sahi==0.11.14
|
2 |
-
streamlit-image-comparison
|
3 |
streamlit
|
4 |
ultralyticsplus
|
5 |
|
|
|
1 |
sahi==0.11.14
|
2 |
+
streamlit-image-comparison
|
3 |
streamlit
|
4 |
ultralyticsplus
|
5 |
|
utils.py
CHANGED
@@ -9,12 +9,12 @@ TEMP_DIR = "temp"
|
|
9 |
def sahi_yolov8m_inference(
|
10 |
image,
|
11 |
detection_model,
|
12 |
-
slice_height
|
13 |
-
slice_width
|
14 |
-
overlap_height_ratio
|
15 |
-
overlap_width_ratio
|
16 |
-
image_size
|
17 |
-
postprocess_match_threshold
|
18 |
):
|
19 |
# sliced inference
|
20 |
detection_model.image_size = image_size
|
@@ -31,7 +31,7 @@ def sahi_yolov8m_inference(
|
|
31 |
image=numpy.array(image),
|
32 |
object_prediction_list=prediction_result.object_prediction_list,
|
33 |
rect_th=3,
|
34 |
-
text_size=
|
35 |
)
|
36 |
|
37 |
output = Image.fromarray(visual_result["image"])
|
|
|
9 |
def sahi_yolov8m_inference(
|
10 |
image,
|
11 |
detection_model,
|
12 |
+
slice_height,
|
13 |
+
slice_width,
|
14 |
+
overlap_height_ratio,
|
15 |
+
overlap_width_ratio,
|
16 |
+
image_size,
|
17 |
+
postprocess_match_threshold,
|
18 |
):
|
19 |
# sliced inference
|
20 |
detection_model.image_size = image_size
|
|
|
31 |
image=numpy.array(image),
|
32 |
object_prediction_list=prediction_result.object_prediction_list,
|
33 |
rect_th=3,
|
34 |
+
text_size=2
|
35 |
)
|
36 |
|
37 |
output = Image.fromarray(visual_result["image"])
|