Daniel Cerda Escobar commited on
Commit
43c365e
·
1 Parent(s): a16a11f

Update app files

Browse files
Files changed (2) hide show
  1. app.py +4 -4
  2. utils.py +3 -3
app.py CHANGED
@@ -95,7 +95,7 @@ with col3:
95
  label = 'Slice Size',
96
  min_value=256,
97
  max_value=1024,
98
- value=768,
99
  step=256
100
  )
101
  overlap_ratio = st.slider(
@@ -109,8 +109,8 @@ with col3:
109
  label = 'Confidence Threshold',
110
  min_value = 0.0,
111
  max_value = 1.0,
112
- value = 0.75,
113
- step = 0.25
114
  )
115
 
116
  st.write('##')
@@ -152,7 +152,7 @@ with col2:
152
  img2=st.session_state["output_2"],
153
  label1='Uploaded Diagram',
154
  label2='Model Inference',
155
- width=800,
156
  starting_position=50,
157
  show_labels=True,
158
  make_responsive=True,
 
95
  label = 'Slice Size',
96
  min_value=256,
97
  max_value=1024,
98
+ value=512,
99
  step=256
100
  )
101
  overlap_ratio = st.slider(
 
109
  label = 'Confidence Threshold',
110
  min_value = 0.0,
111
  max_value = 1.0,
112
+ value = 0.8,
113
+ step = 0.1
114
  )
115
 
116
  st.write('##')
 
152
  img2=st.session_state["output_2"],
153
  label1='Uploaded Diagram',
154
  label2='Model Inference',
155
+ width=1280,
156
  starting_position=50,
157
  show_labels=True,
158
  make_responsive=True,
utils.py CHANGED
@@ -9,8 +9,8 @@ TEMP_DIR = "temp"
9
  def sahi_yolov8m_inference(
10
  image,
11
  detection_model,
12
- slice_height=768,
13
- slice_width=768,
14
  overlap_height_ratio=0.1,
15
  overlap_width_ratio=0.1,
16
  image_size=1280,
@@ -31,7 +31,7 @@ def sahi_yolov8m_inference(
31
  image=numpy.array(image),
32
  object_prediction_list=prediction_result.object_prediction_list,
33
  rect_th=3,
34
- text_size=4
35
  )
36
 
37
  output = Image.fromarray(visual_result["image"])
 
9
  def sahi_yolov8m_inference(
10
  image,
11
  detection_model,
12
+ slice_height=512,
13
+ slice_width=512,
14
  overlap_height_ratio=0.1,
15
  overlap_width_ratio=0.1,
16
  image_size=1280,
 
31
  image=numpy.array(image),
32
  object_prediction_list=prediction_result.object_prediction_list,
33
  rect_th=3,
34
+ text_size=3
35
  )
36
 
37
  output = Image.fromarray(visual_result["image"])