samuellimabraz commited on
Commit
b6c3e5f
·
1 Parent(s): 8d8e33d

feat: melhoria na interface com adição de gráficos e métricas de desempenho

Browse files
Files changed (1) hide show
  1. app.py +278 -115
app.py CHANGED
@@ -1,6 +1,10 @@
1
  import cv2
2
  import numpy as np
 
 
 
3
  import onnxruntime as ort
 
4
  import gradio as gr
5
  import os
6
  from huggingface_hub import hf_hub_download
@@ -11,11 +15,12 @@ FILENAME = "tune/trial_10/weights/best.onnx"
11
  MODEL_DIR = "model"
12
  MODEL_PATH = os.path.join(MODEL_DIR, "model.onnx")
13
 
 
14
  def download_model():
15
  """Download the model using Hugging Face Hub"""
16
  # Ensure model directory exists
17
  os.makedirs(MODEL_DIR, exist_ok=True)
18
-
19
  try:
20
  print(f"Downloading model from {REPO_ID}...")
21
  # Download the model file from Hugging Face Hub
@@ -25,138 +30,179 @@ def download_model():
25
  local_dir=MODEL_DIR,
26
  local_dir_use_symlinks=False,
27
  force_download=True,
28
- cache_dir=None
29
  )
30
-
31
  # Move the file to the correct location if it's not there already
32
  if os.path.exists(model_path) and model_path != MODEL_PATH:
33
  os.rename(model_path, MODEL_PATH)
34
-
35
  # Remove empty directories if they exist
36
  empty_dir = os.path.join(MODEL_DIR, "tune")
37
  if os.path.exists(empty_dir):
38
  import shutil
 
39
  shutil.rmtree(empty_dir)
40
-
41
  print("Model downloaded successfully!")
42
  return MODEL_PATH
43
-
44
  except Exception as e:
45
- print(f"Error downloading model: {str(e)}")
46
  raise e
47
-
 
48
  class SignatureDetector:
49
  def __init__(self, model_path):
50
  self.model_path = model_path
51
  self.classes = ["signature"]
52
  self.input_width = 640
53
  self.input_height = 640
54
-
55
  # Initialize ONNX Runtime session
56
- self.session = ort.InferenceSession(MODEL_PATH, providers=["CPUExecutionProvider"])
57
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  def preprocess(self, img):
59
  # Convert PIL Image to cv2 format
60
  img_cv2 = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
61
-
62
  # Get image dimensions
63
  self.img_height, self.img_width = img_cv2.shape[:2]
64
-
65
  # Convert back to RGB for processing
66
  img_rgb = cv2.cvtColor(img_cv2, cv2.COLOR_BGR2RGB)
67
-
68
  # Resize
69
  img_resized = cv2.resize(img_rgb, (self.input_width, self.input_height))
70
-
71
  # Normalize and transpose
72
  image_data = np.array(img_resized) / 255.0
73
  image_data = np.transpose(image_data, (2, 0, 1))
74
  image_data = np.expand_dims(image_data, axis=0).astype(np.float32)
75
-
76
  return image_data, img_cv2
77
-
78
  def draw_detections(self, img, box, score, class_id):
79
  x1, y1, w, h = box
80
  self.color_palette = np.random.uniform(0, 255, size=(len(self.classes), 3))
81
  color = self.color_palette[class_id]
82
-
83
  cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)
84
-
85
  label = f"{self.classes[class_id]}: {score:.2f}"
86
- (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
87
-
 
 
88
  label_x = x1
89
  label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10
90
-
91
  cv2.rectangle(
92
  img,
93
  (int(label_x), int(label_y - label_height)),
94
  (int(label_x + label_width), int(label_y + label_height)),
95
  color,
96
- cv2.FILLED
97
  )
98
-
99
- cv2.putText(img, label, (int(label_x), int(label_y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
100
-
 
 
 
 
 
 
 
 
 
101
  def postprocess(self, input_image, output, conf_thres, iou_thres):
102
  outputs = np.transpose(np.squeeze(output[0]))
103
  rows = outputs.shape[0]
104
-
105
  boxes = []
106
  scores = []
107
  class_ids = []
108
-
109
  x_factor = self.img_width / self.input_width
110
  y_factor = self.img_height / self.input_height
111
-
112
  for i in range(rows):
113
  classes_scores = outputs[i][4:]
114
  max_score = np.amax(classes_scores)
115
-
116
  if max_score >= conf_thres:
117
  class_id = np.argmax(classes_scores)
118
  x, y, w, h = outputs[i][0], outputs[i][1], outputs[i][2], outputs[i][3]
119
-
120
  left = int((x - w / 2) * x_factor)
121
  top = int((y - h / 2) * y_factor)
122
  width = int(w * x_factor)
123
  height = int(h * y_factor)
124
-
125
  class_ids.append(class_id)
126
  scores.append(max_score)
127
  boxes.append([left, top, width, height])
128
-
129
  indices = cv2.dnn.NMSBoxes(boxes, scores, conf_thres, iou_thres)
130
-
131
  for i in indices:
132
  box = boxes[i]
133
  score = scores[i]
134
  class_id = class_ids[i]
135
  self.draw_detections(input_image, box, score, class_id)
136
-
137
  return cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
138
-
139
  def detect(self, image, conf_thres=0.25, iou_thres=0.5):
140
  # Preprocess the image
141
  img_data, original_image = self.preprocess(image)
142
-
143
  # Run inference
 
144
  outputs = self.session.run(None, {self.session.get_inputs()[0].name: img_data})
145
-
 
146
  # Postprocess the results
147
  output_image = self.postprocess(original_image, outputs, conf_thres, iou_thres)
148
-
 
 
 
 
 
 
 
149
  return output_image
150
 
 
151
  def create_gradio_interface():
152
  # Download model if it doesn't exist
153
  if not os.path.exists(MODEL_PATH):
154
  download_model()
155
-
156
  # Initialize the detector
157
  detector = SignatureDetector(MODEL_PATH)
158
-
159
-
160
  css = """
161
  .custom-button {
162
  background-color: #b0ffb8 !important;
@@ -165,24 +211,125 @@ def create_gradio_interface():
165
  .custom-button:hover {
166
  background-color: #b0ffb8b3 !important;
167
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  """
169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  with gr.Blocks(
171
- theme = gr.themes.Soft(
172
- primary_hue="indigo",
173
- secondary_hue="gray",
174
- neutral_hue="gray"
175
  ),
176
- css=css
177
  ) as iface:
178
  gr.Markdown(
179
  """
180
  # Tech4Humans - Detector de Assinaturas
181
 
182
  Este sistema utiliza o modelo [**YOLOv8s**](https://huggingface.co/tech4humans/yolov8s-signature-detector), especialmente ajustado para a detecção de assinaturas manuscritas em imagens de documentos.
183
- O modelo foi treinado com dados provenientes de dois conjuntos públicos — [**Tobacco800**](https://paperswithcode.com/dataset/tobacco-800) e [**signatures-xc8up**](https://universe.roboflow.com/roboflow-100/signatures-xc8up) — e inclui robustos
184
- mecanismos de pré-processamento e aumento de dados para garantir alta precisão e generalização.
185
-
186
  Com este detector, é possível identificar assinaturas em documentos digitais com elevada precisão em tempo real, sendo ideal para
187
  aplicações que envolvem validação, organização e processamento de documentos.
188
 
@@ -190,84 +337,100 @@ def create_gradio_interface():
190
  """
191
  )
192
 
193
- with gr.Row():
194
- with gr.Column(): # Coluna para a imagem de entrada e controles
195
- input_image = gr.Image(label="Faça o upload do seu documento", type="pil")
196
-
197
- with gr.Row(): # Linha para os botões
 
 
 
198
  clear_btn = gr.ClearButton([input_image], value="Limpar")
199
  submit_btn = gr.Button("Detectar", elem_classes="custom-button")
200
-
201
- confidence_threshold = gr.Slider(
202
- minimum=0.0,
203
- maximum=1.0,
204
- value=0.25,
205
- step=0.05,
206
- label="Limiar de Confiança",
207
- info="Ajuste a pontuação mínima de confiança necessária para detecção."
208
- )
209
- iou_threshold = gr.Slider(
210
- minimum=0.0,
211
- maximum=1.0,
212
- value=0.5,
213
- step=0.05,
214
- label="Limiar de IoU",
215
- info="Ajuste o limiar de Interseção sobre União para Non Maximum Suppression (NMS)."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  )
217
-
218
- output_image = gr.Image(label="Resultados da Detecção") # Em outra coluna
219
-
220
- clear_btn.add(output_image)
221
-
222
- gr.Examples(
223
- examples=[
224
- ["assets/images/example_{i}.jpg".format(i=i)] for i in range(0, len(os.listdir(os.path.join("assets", "images"))))
225
- ],
226
- inputs=input_image,
227
- outputs=output_image,
228
- fn=detector.detect,
229
- label="Exemplos",
230
- cache_examples=True,
231
- cache_mode='lazy'
232
- )
233
 
 
 
234
 
235
- submit_btn.click(
236
- fn=detector.detect,
237
- inputs=[input_image, confidence_threshold, iou_threshold],
238
- outputs=output_image,
239
- )
240
-
241
- gr.Markdown(
242
- """
243
- ---
244
- ## Sobre o Modelo e Resultados
245
 
246
- Este projeto utiliza o modelo YOLOv8s ajustado para detecção de assinaturas manuscritas em imagens de documentos. Ele foi treinado com dados provenientes dos conjuntos [Tobacco800](https://paperswithcode.com/dataset/tobacco-800) e [signatures-xc8up](https://universe.roboflow.com/roboflow-100/signatures-xc8up), passando por processos de pré-processamento e aumentação de dados.
 
 
 
247
 
248
- ### Principais Métricas:
249
- - **Precisão (Precision):** 94,74%
250
- - **Revocação (Recall):** 89,72%
251
- - **mAP@50:** 94,50%
252
- - **mAP@50-95:** 67,35%
253
- - **Tempo de Inferência (CPU):** 171,56 ms
254
 
255
- O processo completo de treinamento, ajuste de hiperparâmetros, e avaliação do modelo pode ser consultado em detalhes no repositório abaixo.
 
 
 
 
 
256
 
257
- [Leia o README completo no Hugging Face Models](https://huggingface.co/tech4humans/yolov8s-signature-detector)
258
 
259
- ---
260
- """
261
- )
262
 
263
- gr.Markdown(
264
- """
265
- **Desenvolvido por [Tech4Humans](https://www.tech4h.com.br/)** | **Modelo:** [YOLOv8s](https://huggingface.co/tech4humans/yolov8s-signature-detector) | **Datasets:** [Tobacco800](https://paperswithcode.com/dataset/tobacco-800), [signatures-xc8up](https://universe.roboflow.com/roboflow-100/signatures-xc8up)
266
- """
 
 
 
 
 
 
 
 
267
  )
268
-
269
  return iface
270
 
 
271
  if __name__ == "__main__":
272
  iface = create_gradio_interface()
273
- iface.launch()
 
1
  import cv2
2
  import numpy as np
3
+ import pandas as pd
4
+ import time
5
+ import matplotlib.pyplot as plt
6
  import onnxruntime as ort
7
+ from collections import deque
8
  import gradio as gr
9
  import os
10
  from huggingface_hub import hf_hub_download
 
15
  MODEL_DIR = "model"
16
  MODEL_PATH = os.path.join(MODEL_DIR, "model.onnx")
17
 
18
+
19
  def download_model():
20
  """Download the model using Hugging Face Hub"""
21
  # Ensure model directory exists
22
  os.makedirs(MODEL_DIR, exist_ok=True)
23
+
24
  try:
25
  print(f"Downloading model from {REPO_ID}...")
26
  # Download the model file from Hugging Face Hub
 
30
  local_dir=MODEL_DIR,
31
  local_dir_use_symlinks=False,
32
  force_download=True,
33
+ cache_dir=None,
34
  )
35
+
36
  # Move the file to the correct location if it's not there already
37
  if os.path.exists(model_path) and model_path != MODEL_PATH:
38
  os.rename(model_path, MODEL_PATH)
39
+
40
  # Remove empty directories if they exist
41
  empty_dir = os.path.join(MODEL_DIR, "tune")
42
  if os.path.exists(empty_dir):
43
  import shutil
44
+
45
  shutil.rmtree(empty_dir)
46
+
47
  print("Model downloaded successfully!")
48
  return MODEL_PATH
49
+
50
  except Exception as e:
51
+ print(f"Error downloading model: {e}")
52
  raise e
53
+
54
+
55
  class SignatureDetector:
56
  def __init__(self, model_path):
57
  self.model_path = model_path
58
  self.classes = ["signature"]
59
  self.input_width = 640
60
  self.input_height = 640
61
+
62
  # Initialize ONNX Runtime session
63
+ self.session = ort.InferenceSession(
64
+ MODEL_PATH, providers=["CPUExecutionProvider"]
65
+ )
66
+
67
+ # Initialize metrics tracking
68
+ self.inference_times = deque(maxlen=50) # Store last 50 inference times
69
+ self.total_inferences = 0
70
+ self.avg_inference_time = 0
71
+
72
+ def update_metrics(self, inference_time):
73
+ self.inference_times.append(inference_time)
74
+ self.total_inferences += 1
75
+ self.avg_inference_time = sum(self.inference_times) / len(self.inference_times)
76
+
77
+ def get_metrics(self):
78
+ return {
79
+ "times": list(self.inference_times),
80
+ "total_inferences": self.total_inferences,
81
+ "avg_time": self.avg_inference_time,
82
+ }
83
+
84
  def preprocess(self, img):
85
  # Convert PIL Image to cv2 format
86
  img_cv2 = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
87
+
88
  # Get image dimensions
89
  self.img_height, self.img_width = img_cv2.shape[:2]
90
+
91
  # Convert back to RGB for processing
92
  img_rgb = cv2.cvtColor(img_cv2, cv2.COLOR_BGR2RGB)
93
+
94
  # Resize
95
  img_resized = cv2.resize(img_rgb, (self.input_width, self.input_height))
96
+
97
  # Normalize and transpose
98
  image_data = np.array(img_resized) / 255.0
99
  image_data = np.transpose(image_data, (2, 0, 1))
100
  image_data = np.expand_dims(image_data, axis=0).astype(np.float32)
101
+
102
  return image_data, img_cv2
103
+
104
  def draw_detections(self, img, box, score, class_id):
105
  x1, y1, w, h = box
106
  self.color_palette = np.random.uniform(0, 255, size=(len(self.classes), 3))
107
  color = self.color_palette[class_id]
108
+
109
  cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)
110
+
111
  label = f"{self.classes[class_id]}: {score:.2f}"
112
+ (label_width, label_height), _ = cv2.getTextSize(
113
+ label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1
114
+ )
115
+
116
  label_x = x1
117
  label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10
118
+
119
  cv2.rectangle(
120
  img,
121
  (int(label_x), int(label_y - label_height)),
122
  (int(label_x + label_width), int(label_y + label_height)),
123
  color,
124
+ cv2.FILLED,
125
  )
126
+
127
+ cv2.putText(
128
+ img,
129
+ label,
130
+ (int(label_x), int(label_y)),
131
+ cv2.FONT_HERSHEY_SIMPLEX,
132
+ 0.5,
133
+ (0, 0, 0),
134
+ 1,
135
+ cv2.LINE_AA,
136
+ )
137
+
138
  def postprocess(self, input_image, output, conf_thres, iou_thres):
139
  outputs = np.transpose(np.squeeze(output[0]))
140
  rows = outputs.shape[0]
141
+
142
  boxes = []
143
  scores = []
144
  class_ids = []
145
+
146
  x_factor = self.img_width / self.input_width
147
  y_factor = self.img_height / self.input_height
148
+
149
  for i in range(rows):
150
  classes_scores = outputs[i][4:]
151
  max_score = np.amax(classes_scores)
152
+
153
  if max_score >= conf_thres:
154
  class_id = np.argmax(classes_scores)
155
  x, y, w, h = outputs[i][0], outputs[i][1], outputs[i][2], outputs[i][3]
156
+
157
  left = int((x - w / 2) * x_factor)
158
  top = int((y - h / 2) * y_factor)
159
  width = int(w * x_factor)
160
  height = int(h * y_factor)
161
+
162
  class_ids.append(class_id)
163
  scores.append(max_score)
164
  boxes.append([left, top, width, height])
165
+
166
  indices = cv2.dnn.NMSBoxes(boxes, scores, conf_thres, iou_thres)
167
+
168
  for i in indices:
169
  box = boxes[i]
170
  score = scores[i]
171
  class_id = class_ids[i]
172
  self.draw_detections(input_image, box, score, class_id)
173
+
174
  return cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
175
+
176
  def detect(self, image, conf_thres=0.25, iou_thres=0.5):
177
  # Preprocess the image
178
  img_data, original_image = self.preprocess(image)
179
+
180
  # Run inference
181
+ start_time = time.time()
182
  outputs = self.session.run(None, {self.session.get_inputs()[0].name: img_data})
183
+ inference_time = (time.time() - start_time) * 1000 # Convert to milliseconds
184
+
185
  # Postprocess the results
186
  output_image = self.postprocess(original_image, outputs, conf_thres, iou_thres)
187
+
188
+ self.update_metrics(inference_time)
189
+
190
+ return output_image, self.get_metrics()
191
+
192
+ def detect_example(self, image, conf_thres=0.25, iou_thres=0.5):
193
+ """Wrapper method for examples that returns only the image"""
194
+ output_image, _ = self.detect(image, conf_thres, iou_thres)
195
  return output_image
196
 
197
+
198
  def create_gradio_interface():
199
  # Download model if it doesn't exist
200
  if not os.path.exists(MODEL_PATH):
201
  download_model()
202
+
203
  # Initialize the detector
204
  detector = SignatureDetector(MODEL_PATH)
205
+
 
206
  css = """
207
  .custom-button {
208
  background-color: #b0ffb8 !important;
 
211
  .custom-button:hover {
212
  background-color: #b0ffb8b3 !important;
213
  }
214
+ .container {
215
+ max-width: 1200px !important;
216
+ margin: auto !important;
217
+ }
218
+ .main-container {
219
+ gap: 20px !important;
220
+ }
221
+ .metrics-container {
222
+ padding: 1.5rem !important;
223
+ border-radius: 0.75rem !important;
224
+ background-color: #1f2937 !important;
225
+ margin: 1rem 0 !important;
226
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1) !important;
227
+ }
228
+ .metrics-title {
229
+ font-size: 1.25rem !important;
230
+ font-weight: 600 !important;
231
+ color: #1f2937 !important;
232
+ margin-bottom: 1rem !important;
233
+ }
234
  """
235
 
236
+ def process_image(image, conf_thres, iou_thres):
237
+ if image is None:
238
+ return None, None, None, None
239
+
240
+ output_image, metrics = detector.detect(image, conf_thres, iou_thres)
241
+
242
+ # Create plots data
243
+ hist_data = pd.DataFrame({"Tempo (ms)": metrics["times"]})
244
+ line_data = pd.DataFrame(
245
+ {
246
+ "Inferência": range(len(metrics["times"])),
247
+ "Tempo (ms)": metrics["times"],
248
+ "Média": [metrics["avg_time"]] * len(metrics["times"]),
249
+ }
250
+ )
251
+
252
+ # Limpar figuras existentes
253
+ plt.close("all")
254
+
255
+ # Configuração do estilo dos plots
256
+ plt.style.use("dark_background")
257
+
258
+ # Criar figura do histograma
259
+ hist_fig, hist_ax = plt.subplots(figsize=(8, 4), facecolor="#f0f0f5")
260
+ hist_ax.set_facecolor("#f0f0f5")
261
+ hist_data.hist(
262
+ bins=20, ax=hist_ax, color="#4F46E5", alpha=0.7, edgecolor="white"
263
+ )
264
+ hist_ax.set_title(
265
+ "Distribuição dos Tempos de Inferência",
266
+ pad=15,
267
+ fontsize=12,
268
+ color="#1f2937",
269
+ )
270
+ hist_ax.set_xlabel("Tempo (ms)", color="#374151")
271
+ hist_ax.set_ylabel("Frequência", color="#374151")
272
+ hist_ax.tick_params(colors="#4b5563")
273
+ hist_ax.grid(True, linestyle="--", alpha=0.3)
274
+
275
+ # Criar figura do gráfico de linha
276
+ line_fig, line_ax = plt.subplots(figsize=(8, 4), facecolor="#f0f0f5")
277
+ line_ax.set_facecolor("#f0f0f5")
278
+ line_data.plot(
279
+ x="Inferência",
280
+ y="Tempo (ms)",
281
+ ax=line_ax,
282
+ color="#4F46E5",
283
+ alpha=0.7,
284
+ label="Tempo",
285
+ )
286
+ line_data.plot(
287
+ x="Inferência",
288
+ y="Média",
289
+ ax=line_ax,
290
+ color="#DC2626",
291
+ linestyle="--",
292
+ label="Média",
293
+ )
294
+ line_ax.set_title(
295
+ "Tempo de Inferência por Execução", pad=15, fontsize=12, color="#1f2937"
296
+ )
297
+ line_ax.set_xlabel("Número da Inferência", color="#374151")
298
+ line_ax.set_ylabel("Tempo (ms)", color="#374151")
299
+ line_ax.tick_params(colors="#4b5563")
300
+ line_ax.grid(True, linestyle="--", alpha=0.3)
301
+ line_ax.legend(frameon=True, facecolor="#f0f0f5", edgecolor="none")
302
+
303
+ # Ajustar layout
304
+ hist_fig.tight_layout()
305
+ line_fig.tight_layout()
306
+
307
+ # Fechar as figuras para liberar memória
308
+ plt.close(hist_fig)
309
+ plt.close(line_fig)
310
+
311
+ return (
312
+ output_image,
313
+ gr.update(
314
+ value=f"Total de Inferências: {metrics['total_inferences']}",
315
+ container=True,
316
+ ),
317
+ hist_fig,
318
+ line_fig,
319
+ )
320
+
321
  with gr.Blocks(
322
+ theme=gr.themes.Soft(
323
+ primary_hue="indigo", secondary_hue="gray", neutral_hue="gray"
 
 
324
  ),
325
+ css=css,
326
  ) as iface:
327
  gr.Markdown(
328
  """
329
  # Tech4Humans - Detector de Assinaturas
330
 
331
  Este sistema utiliza o modelo [**YOLOv8s**](https://huggingface.co/tech4humans/yolov8s-signature-detector), especialmente ajustado para a detecção de assinaturas manuscritas em imagens de documentos.
332
+
 
 
333
  Com este detector, é possível identificar assinaturas em documentos digitais com elevada precisão em tempo real, sendo ideal para
334
  aplicações que envolvem validação, organização e processamento de documentos.
335
 
 
337
  """
338
  )
339
 
340
+ with gr.Row(equal_height=True, elem_classes="main-container"):
341
+ # Coluna da esquerda para controles e informações
342
+ with gr.Column(scale=1):
343
+ input_image = gr.Image(
344
+ label="Faça o upload do seu documento", type="pil"
345
+ )
346
+
347
+ with gr.Row():
348
  clear_btn = gr.ClearButton([input_image], value="Limpar")
349
  submit_btn = gr.Button("Detectar", elem_classes="custom-button")
350
+
351
+ with gr.Group():
352
+ confidence_threshold = gr.Slider(
353
+ minimum=0.0,
354
+ maximum=1.0,
355
+ value=0.25,
356
+ step=0.05,
357
+ label="Limiar de Confiança",
358
+ info="Ajuste a pontuação mínima de confiança necessária para detecção.",
359
+ )
360
+ iou_threshold = gr.Slider(
361
+ minimum=0.0,
362
+ maximum=1.0,
363
+ value=0.5,
364
+ step=0.05,
365
+ label="Limiar de IoU",
366
+ info="Ajuste o limiar de Interseção sobre União para Non Maximum Suppression (NMS).",
367
+ )
368
+
369
+ with gr.Column(scale=1):
370
+ output_image = gr.Image(label="Resultados da Detecção")
371
+
372
+ with gr.Accordion("Exemplos", open=True):
373
+ gr.Examples(
374
+ examples=[
375
+ ["assets/images/example_{i}.jpg".format(i=i)]
376
+ for i in range(
377
+ 0, len(os.listdir(os.path.join("assets", "images")))
378
+ )
379
+ ],
380
+ inputs=input_image,
381
+ outputs=output_image,
382
+ fn=detector.detect_example,
383
+ cache_examples=True,
384
+ cache_mode="lazy",
385
+ )
386
+
387
+ with gr.Row(elem_classes="metrics-container"):
388
+ with gr.Column(scale=1):
389
+ total_inferences = gr.Textbox(
390
+ label="Total de Inferências", show_copy_button=True, container=True
391
  )
392
+ hist_plot = gr.Plot(label="Distribuição dos Tempos", container=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393
 
394
+ with gr.Column(scale=1):
395
+ line_plot = gr.Plot(label="Histórico de Tempos", container=True)
396
 
397
+ with gr.Row(elem_classes="container"):
 
 
 
 
 
 
 
 
 
398
 
399
+ gr.Markdown(
400
+ """
401
+ ---
402
+ ## Sobre o Projeto
403
 
404
+ Este projeto utiliza o modelo YOLOv8s ajustado para detecção de assinaturas manuscritas em imagens de documentos. Ele foi treinado com dados provenientes dos conjuntos [Tobacco800](https://paperswithcode.com/dataset/tobacco-800) e [signatures-xc8up](https://universe.roboflow.com/roboflow-100/signatures-xc8up), passando por processos de pré-processamento e aumentação de dados.
 
 
 
 
 
405
 
406
+ ### Principais Métricas:
407
+ - **Precisão (Precision):** 94,74%
408
+ - **Revocação (Recall):** 89,72%
409
+ - **mAP@50:** 94,50%
410
+ - **mAP@50-95:** 67,35%
411
+ - **Tempo de Inferência (CPU):** 171,56 ms
412
 
413
+ O processo completo de treinamento, ajuste de hiperparâmetros, e avaliação do modelo pode ser consultado em detalhes no repositório abaixo.
414
 
415
+ [Leia o README completo no Hugging Face Models](https://huggingface.co/tech4humans/yolov8s-signature-detector)
 
 
416
 
417
+ ---
418
+
419
+ **Desenvolvido por [Tech4Humans](https://www.tech4h.com.br/)** | **Modelo:** [YOLOv8s](https://huggingface.co/tech4humans/yolov8s-signature-detector) | **Datasets:** [Tobacco800](https://paperswithcode.com/dataset/tobacco-800), [signatures-xc8up](https://universe.roboflow.com/roboflow-100/signatures-xc8up)
420
+ """
421
+ )
422
+
423
+ clear_btn.add([output_image, total_inferences, hist_plot, line_plot])
424
+
425
+ submit_btn.click(
426
+ fn=process_image,
427
+ inputs=[input_image, confidence_threshold, iou_threshold],
428
+ outputs=[output_image, total_inferences, hist_plot, line_plot],
429
  )
430
+
431
  return iface
432
 
433
+
434
  if __name__ == "__main__":
435
  iface = create_gradio_interface()
436
+ iface.launch()