Spaces:
Sleeping
Sleeping
Daniel Cerda Escobar
commited on
Commit
Β·
108ee38
1
Parent(s):
7bb89fc
Update app
Browse files
app.py
CHANGED
@@ -1,13 +1,7 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
import numpy as np
|
3 |
import streamlit as st
|
4 |
-
import random
|
5 |
import sahi.utils.file
|
6 |
-
import tempfile
|
7 |
-
import os
|
8 |
from PIL import Image
|
9 |
from sahi import AutoDetectionModel
|
10 |
-
#from utils import convert_pdf_file
|
11 |
from utils import sahi_yolov8m_inference
|
12 |
from streamlit_image_comparison import image_comparison
|
13 |
from ultralyticsplus.hf_utils import download_from_hub
|
@@ -64,7 +58,7 @@ with col1:
|
|
64 |
with st.expander('How to use it'):
|
65 |
st.markdown(
|
66 |
'''
|
67 |
-
1) Select any example diagram
|
68 |
2) Set confidence threshold π
|
69 |
3) Press to perform inference π
|
70 |
4) Visualize model predictions π
|
@@ -75,14 +69,7 @@ st.write('##')
|
|
75 |
|
76 |
col1, col2, col3 = st.columns(3, gap='large')
|
77 |
with col1:
|
78 |
-
|
79 |
-
# set input image by upload
|
80 |
-
#uploaded_file = st.file_uploader("Upload your diagram", type="pdf")
|
81 |
-
#if uploaded_file:
|
82 |
-
# temp_dir = tempfile.mkdtemp()
|
83 |
-
# path = os.path.join(temp_dir, uploaded_file.name)
|
84 |
-
# with open(path, "wb") as f:
|
85 |
-
# f.write(uploaded_file.getvalue())
|
86 |
# set input images from examples
|
87 |
def radio_func(option):
|
88 |
option_to_id = {
|
@@ -98,26 +85,28 @@ with col1:
|
|
98 |
)
|
99 |
with col2:
|
100 |
st.markdown('##### Preview')
|
101 |
-
# visualize input image
|
102 |
-
#if uploaded_file is not None:
|
103 |
-
#image_file = convert_pdf_file(path=path)
|
104 |
-
#image = Image.open(image_file)
|
105 |
-
#else:
|
106 |
image = sahi.utils.cv.read_image_as_pil(IMAGE_TO_URL[radio])
|
107 |
with st.container(border = True):
|
108 |
st.image(image, use_column_width = True)
|
109 |
|
110 |
with col3:
|
111 |
st.markdown('##### Set model parameters')
|
112 |
-
|
113 |
-
label = 'Select
|
114 |
-
min_value
|
115 |
-
max_value
|
116 |
-
value
|
117 |
-
step
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
)
|
119 |
-
|
120 |
-
label = 'Select
|
121 |
min_value = 0.0,
|
122 |
max_value = 1.0,
|
123 |
value = 0.75,
|
@@ -135,22 +124,22 @@ if submit:
|
|
135 |
with st.spinner(text="Downloading model weights ... "):
|
136 |
detection_model = get_model()
|
137 |
|
138 |
-
image_size =
|
139 |
|
140 |
with st.spinner(text="Performing prediction ... "):
|
141 |
-
|
142 |
image,
|
143 |
detection_model,
|
144 |
image_size=image_size,
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
postprocess_match_threshold=postprocess_match_threshold
|
150 |
)
|
151 |
|
152 |
-
st.session_state["output_1"] =
|
153 |
-
st.session_state["output_2"] =
|
154 |
|
155 |
st.write('##')
|
156 |
|
@@ -163,7 +152,7 @@ with col2:
|
|
163 |
img2=st.session_state["output_2"],
|
164 |
label1='Uploaded Diagram',
|
165 |
label2='Model Inference',
|
166 |
-
width=
|
167 |
starting_position=50,
|
168 |
show_labels=True,
|
169 |
make_responsive=True,
|
|
|
|
|
|
|
1 |
import streamlit as st
|
|
|
2 |
import sahi.utils.file
|
|
|
|
|
3 |
from PIL import Image
|
4 |
from sahi import AutoDetectionModel
|
|
|
5 |
from utils import sahi_yolov8m_inference
|
6 |
from streamlit_image_comparison import image_comparison
|
7 |
from ultralyticsplus.hf_utils import download_from_hub
|
|
|
58 |
with st.expander('How to use it'):
|
59 |
st.markdown(
|
60 |
'''
|
61 |
+
1) Select any example diagram ππ»
|
62 |
2) Set confidence threshold π
|
63 |
3) Press to perform inference π
|
64 |
4) Visualize model predictions π
|
|
|
69 |
|
70 |
col1, col2, col3 = st.columns(3, gap='large')
|
71 |
with col1:
|
72 |
+
st.markdown('##### Input Data')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
# set input images from examples
|
74 |
def radio_func(option):
|
75 |
option_to_id = {
|
|
|
85 |
)
|
86 |
with col2:
|
87 |
st.markdown('##### Preview')
|
|
|
|
|
|
|
|
|
|
|
88 |
image = sahi.utils.cv.read_image_as_pil(IMAGE_TO_URL[radio])
|
89 |
with st.container(border = True):
|
90 |
st.image(image, use_column_width = True)
|
91 |
|
92 |
with col3:
|
93 |
st.markdown('##### Set model parameters')
|
94 |
+
slice_size = st.slider(
|
95 |
+
label = 'Select Slice Size',
|
96 |
+
min_value=256,
|
97 |
+
max_value=1024,
|
98 |
+
value=768,
|
99 |
+
step=256
|
100 |
+
)
|
101 |
+
overlap_ratio = st.slider(
|
102 |
+
label = 'Select Overlap Ratio',
|
103 |
+
min_value=0.0,
|
104 |
+
max_value=0.5,
|
105 |
+
value=0.1,
|
106 |
+
step=0.1
|
107 |
)
|
108 |
+
postprocess_match_threshold = st.slider(
|
109 |
+
label = 'Select Confidence Threshold',
|
110 |
min_value = 0.0,
|
111 |
max_value = 1.0,
|
112 |
value = 0.75,
|
|
|
124 |
with st.spinner(text="Downloading model weights ... "):
|
125 |
detection_model = get_model()
|
126 |
|
127 |
+
image_size = 1024
|
128 |
|
129 |
with st.spinner(text="Performing prediction ... "):
|
130 |
+
output = sahi_yolov8m_inference(
|
131 |
image,
|
132 |
detection_model,
|
133 |
image_size=image_size,
|
134 |
+
slice_height=slice_size,
|
135 |
+
slice_width=slice_size,
|
136 |
+
overlap_height_ratio=overlap_ratio,
|
137 |
+
overlap_width_ratio=overlap_ratio,
|
138 |
postprocess_match_threshold=postprocess_match_threshold
|
139 |
)
|
140 |
|
141 |
+
st.session_state["output_1"] = image
|
142 |
+
st.session_state["output_2"] = output
|
143 |
|
144 |
st.write('##')
|
145 |
|
|
|
152 |
img2=st.session_state["output_2"],
|
153 |
label1='Uploaded Diagram',
|
154 |
label2='Model Inference',
|
155 |
+
width=768,
|
156 |
starting_position=50,
|
157 |
show_labels=True,
|
158 |
make_responsive=True,
|