mlbench123 commited on
Commit
4aff382
·
verified ·
1 Parent(s): f399eae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +778 -223
app.py CHANGED
@@ -18,91 +18,176 @@ from scalingtestupdated import calculate_scaling_factor
18
  from scipy.interpolate import splprep, splev
19
  from scipy.ndimage import gaussian_filter1d
20
  import json
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- # Language translations
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  TRANSLATIONS = {
24
  "english": {
25
  "input_image": "Input Image",
26
- "offset_value": "Offset value for Mask(mm)",
27
- "coin_diameter": "Diameter of reference coin(mm). Adjust according to coin.",
 
 
28
  "output_image": "Output Image",
29
  "outlines": "Outlines of Objects",
30
  "dxf_file": "DXF file",
31
  "mask": "Mask",
 
 
32
  "scaling_factor": "Scaling Factor(mm)",
33
  "scaling_placeholder": "Every pixel is equal to mentioned number in millimeters",
34
  "language_selector": "Select Language",
35
  },
36
  "dutch": {
37
  "input_image": "Invoer Afbeelding",
38
- "offset_value": "Offset waarde voor Masker(mm)",
39
- "coin_diameter": "Diameter van referentiemunt(mm). Pas aan volgens munt.",
 
 
40
  "output_image": "Uitvoer Afbeelding",
41
  "outlines": "Contouren van Objecten",
42
  "dxf_file": "DXF bestand",
43
  "mask": "Masker",
 
 
44
  "scaling_factor": "Schalingsfactor(mm)",
45
  "scaling_placeholder": "Elke pixel is gelijk aan genoemd aantal in millimeters",
46
  "language_selector": "Selecteer Taal",
47
  }
48
  }
49
 
50
-
51
- birefnet = AutoModelForImageSegmentation.from_pretrained(
52
- "zhengpeng7/BiRefNet", trust_remote_code=True
53
- )
54
-
55
- device = "cpu"
56
- torch.set_float32_matmul_precision(["high", "highest"][0])
57
-
58
- birefnet.to(device)
59
- birefnet.eval()
60
- transform_image = transforms.Compose(
61
- [
62
- transforms.Resize((1024, 1024)),
63
- transforms.ToTensor(),
64
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
65
- ]
66
- )
 
 
 
 
 
 
 
 
67
 
68
  def remove_bg(image: np.ndarray) -> np.ndarray:
 
 
 
 
69
 
70
- image = Image.fromarray(image)
71
- input_images = transform_image(image).unsqueeze(0).to("cpu")
72
-
73
- # Prediction
74
- with torch.no_grad():
75
- preds = birefnet(input_images)[-1].sigmoid().cpu()
76
- pred = preds[0].squeeze()
77
 
78
- # Show Results
79
- pred_pil: Image = transforms.ToPILImage()(pred)
80
- print(pred_pil)
81
- # Scale proportionally with max length to 1024 for faster showing
82
- scale_ratio = 1024 / max(image.size)
83
- scaled_size = (int(image.size[0] * scale_ratio), int(image.size[1] * scale_ratio))
84
- print(f"scaled size {scaled_size}")
 
 
85
 
86
- return np.array(pred_pil.resize(scaled_size))
 
87
 
88
  def make_square(img: np.ndarray):
89
- # Get dimensions
90
  height, width = img.shape[:2]
91
-
92
- # Find the larger dimension
93
  max_dim = max(height, width)
94
-
95
- # Calculate padding
96
  pad_height = (max_dim - height) // 2
97
  pad_width = (max_dim - width) // 2
98
-
99
- # Handle odd dimensions
100
  pad_height_extra = max_dim - height - 2 * pad_height
101
  pad_width_extra = max_dim - width - 2 * pad_width
102
-
103
- # Create padding with edge colors
104
  if len(img.shape) == 3: # Color image
105
- # Pad the image
106
  padded = np.pad(
107
  img,
108
  (
@@ -121,9 +206,41 @@ def make_square(img: np.ndarray):
121
  ),
122
  mode="edge",
123
  )
124
-
125
  return padded
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  def exclude_scaling_box(
128
  image: np.ndarray,
129
  bbox: np.ndarray,
@@ -131,22 +248,18 @@ def exclude_scaling_box(
131
  processed_size: tuple,
132
  expansion_factor: float = 1.2,
133
  ) -> np.ndarray:
134
- # Unpack the bounding box
135
  x_min, y_min, x_max, y_max = map(int, bbox)
136
-
137
- # Calculate scaling factors
138
- scale_x = processed_size[1] / orig_size[1] # Width scale
139
- scale_y = processed_size[0] / orig_size[0] # Height scale
140
-
141
- # Adjust bounding box coordinates
142
  x_min = int(x_min * scale_x)
143
  x_max = int(x_max * scale_x)
144
  y_min = int(y_min * scale_y)
145
  y_max = int(y_max * scale_y)
146
-
147
- # Calculate expanded box coordinates
148
  box_width = x_max - x_min
149
  box_height = y_max - y_min
 
150
  expanded_x_min = max(0, int(x_min - (expansion_factor - 1) * box_width / 2))
151
  expanded_x_max = min(
152
  image.shape[1], int(x_max + (expansion_factor - 1) * box_width / 2)
@@ -155,245 +268,639 @@ def exclude_scaling_box(
155
  expanded_y_max = min(
156
  image.shape[0], int(y_max + (expansion_factor - 1) * box_height / 2)
157
  )
158
-
159
- # Black out the expanded region
160
  image[expanded_y_min:expanded_y_max, expanded_x_min:expanded_x_max] = 0
161
-
162
  return image
163
 
164
- def resample_contour(contour):
165
- # Get all the parameters at the start:
166
- num_points = 1000
167
- smoothing_factor = 5
168
- spline_degree = 3 # Typically k=3 for cubic spline
169
 
170
- smoothed_x_sigma = 1
171
- smoothed_y_sigma = 1
172
 
173
- # Ensure contour has enough points
174
- if len(contour) < spline_degree + 1:
175
- raise ValueError(f"Contour must have at least {spline_degree + 1} points, but has {len(contour)} points.")
176
 
177
- contour = contour[:, 0, :]
178
 
179
- tck, _ = splprep([contour[:, 0], contour[:, 1]], s=smoothing_factor)
180
- u = np.linspace(0, 1, num_points)
181
- resampled_points = splev(u, tck)
182
 
183
- smoothed_x = gaussian_filter1d(resampled_points[0], sigma=smoothed_x_sigma)
184
- smoothed_y = gaussian_filter1d(resampled_points[1], sigma=smoothed_y_sigma)
185
 
186
- return np.array([smoothed_x, smoothed_y]).T
 
 
 
187
 
 
 
 
188
 
 
 
 
189
 
190
- def save_dxf_spline(inflated_contours, scaling_factor, height):
191
- degree = 3
192
- closed = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
194
- # Create a new DXF document with millimeters as the unit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
  doc = ezdxf.new(units=ezdxf.units.MM)
196
- doc.units = ezdxf.units.MM # Ensure units are millimeters
197
- doc.header["$INSUNITS"] = ezdxf.units.MM # Set insertion units to millimeters
198
-
199
  msp = doc.modelspace()
 
 
 
 
 
 
200
 
201
  for contour in inflated_contours:
202
  try:
203
- resampled_contour = resample_contour(contour)
204
- points = [
205
- (x * scaling_factor, (height - y) * scaling_factor)
206
- for x, y in resampled_contour
207
- ]
208
- if len(points) >= 3:
209
- if np.linalg.norm(np.array(points[0]) - np.array(points[-1])) > 1e-2:
210
- points.append(points[0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
 
212
- spline = msp.add_spline(points, degree=degree)
213
- spline.closed = closed
 
 
 
214
 
215
  except ValueError as e:
216
- print(f"Skipping contour: {e}")
217
 
218
  dxf_filepath = os.path.join("./outputs", "out.dxf")
219
  doc.saveas(dxf_filepath)
 
220
 
221
- return dxf_filepath
222
 
223
 
224
- def extract_outlines(binary_image: np.ndarray) -> np.ndarray:
225
- """
226
- Extracts and draws the outlines of masks from a binary image.
227
- Args:
228
- binary_image: Grayscale binary image where white represents masks and black is the background.
229
- Returns:
230
- Image with outlines drawn.
231
- """
232
- # Detect contours from the binary image
233
- contours, _ = cv2.findContours(
234
- binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
235
- )
236
 
237
- outline_image = np.zeros_like(binary_image)
238
 
239
- # Draw the contours on the blank image
240
- cv2.drawContours(
241
- outline_image, contours, -1, (255), thickness=1
242
- ) # White color for outlines
243
 
244
- return cv2.bitwise_not(outline_image), contours
245
 
246
- def to_dxf(contours):
247
- # Create a new DXF document with millimeters as the unit
248
- doc = ezdxf.new(units=ezdxf.units.MM)
249
- doc.units = ezdxf.units.MM # Ensure units are millimeters
250
- doc.header["$INSUNITS"] = ezdxf.units.MM # Set insertion units to millimeters)
251
- msp = doc.modelspace()
252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
  try:
254
- for contour in contours:
255
- points = [(point[0][0], point[0][1]) for point in contour]
256
- msp.add_lwpolyline(points, close=True) # Add a polyline for each contour
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  except Exception as e:
258
- raise gr.Error(f"Unable to generate DXF: {e}")
 
 
259
 
260
- output_path = "./outputs/out.dxf"
261
- doc.saveas(output_path)
262
- return output_path
263
 
264
- def smooth_contours(contour):
265
- epsilon = 0.01 * cv2.arcLength(contour, True) # Adjust factor (e.g., 0.01)
266
- return cv2.approxPolyDP(contour, epsilon, True)
267
 
268
 
269
- def scale_image(image: np.ndarray, scale_factor: float) -> np.ndarray:
270
- """
271
- Resize image by scaling both width and height by the same factor.
272
- Args:
273
- image: Input numpy image
274
- scale_factor: Factor to scale the image (e.g., 0.5 for half size, 2 for double size)
275
- Returns:
276
- np.ndarray: Resized image
277
- """
278
- if scale_factor <= 0:
279
- raise ValueError("Scale factor must be positive")
280
 
281
- current_height, current_width = image.shape[:2]
282
 
283
- # Calculate new dimensions
284
- new_width = int(current_width * scale_factor)
285
- new_height = int(current_height * scale_factor)
286
 
287
- # Choose interpolation method based on whether we're scaling up or down
288
- interpolation = cv2.INTER_AREA if scale_factor < 1 else cv2.INTER_CUBIC
289
 
290
- # Resize image
291
- resized_image = cv2.resize(
292
- image, (new_width, new_height), interpolation=interpolation
 
293
  )
294
 
295
- return resized_image
296
 
297
- def detect_reference_square(img) -> np.ndarray:
298
- box_detector = YOLO("./best1.pt")
299
- res = box_detector.predict(img, conf=0.05)
300
- del box_detector
301
- return save_one_box(res[0].cpu().boxes.xyxy, res[0].orig_img, save=False), res[
302
- 0
303
- ].cpu().boxes.xyxy[0]
304
 
305
 
306
- def resize_img(img: np.ndarray, resize_dim):
307
- return np.array(Image.fromarray(img).resize(resize_dim))
308
 
309
 
310
- def predict(image, offset, coin_size_mm):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
 
312
  if offset < 0:
313
  raise gr.Error("Offset Value Can't be negative")
314
 
315
  try:
316
  reference_obj_img, scaling_box_coords = detect_reference_square(image)
317
- except:
318
- raise gr.Error("Unable to detect the COIN. Please try again with different magnification.")
 
 
 
 
 
 
 
 
319
 
320
  reference_obj_img = make_square(reference_obj_img)
321
-
322
- reference_square_mask = remove_bg(reference_obj_img)
323
-
324
- reference_square_mask = resize_img(reference_square_mask, (reference_obj_img.shape[1], reference_obj_img.shape[0]))
325
 
326
  try:
327
- scaling_factor= calculate_scaling_factor(
328
  target_image=reference_square_mask,
329
- reference_obj_size_mm = coin_size_mm,
330
  feature_detector="ORB",
331
  )
332
  except Exception as e:
333
  scaling_factor = None
334
- print(f"Error calculating scaling factor: {e}")
335
 
336
- # Default to a scaling factor if calculation fails
337
- if scaling_factor is None or scaling_factor == 0:
338
- scaling_factor = 0.07
339
- print("Using default scaling factor due to calculation error")
340
 
 
341
  orig_size = image.shape[:2]
342
  objects_mask = remove_bg(image)
343
  processed_size = objects_mask.shape[:2]
344
 
345
- objects_mask = exclude_scaling_box(
346
- objects_mask,
347
- scaling_box_coords,
348
- orig_size,
349
- processed_size,
350
- expansion_factor=1.2,
351
- )
 
 
 
 
 
 
352
  objects_mask = resize_img(objects_mask, (image.shape[1], image.shape[0]))
 
 
 
 
 
 
 
 
 
 
 
353
 
354
- # Ensure offset_inches is valid
355
- if scaling_factor != 0:
356
- offset_pixels = (float(offset) / float(scaling_factor)) * 2 + 1
357
- else:
358
- offset_pixels = 1 # Default value in case of invalid scaling factor
 
 
 
 
 
 
 
 
 
 
359
 
360
- dilated_mask = cv2.dilate(objects_mask, np.ones((int(offset_pixels), int(offset_pixels)), np.uint8))
361
 
362
- Image.fromarray(dilated_mask).save("./outputs/scaled_mask_new.jpg")
363
- outlines, contours = extract_outlines(dilated_mask)
364
- shrunked_img_contours = cv2.drawContours(image, contours, -1, (0, 0, 255), thickness=2)
365
- dxf = save_dxf_spline(contours, scaling_factor, processed_size[0])
366
- # dxf = to_dxf(contours)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367
 
368
  return (
369
- shrunked_img_contours,
370
- outlines,
371
- dxf,
372
- dilated_mask,
373
- scaling_factor,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
374
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375
 
376
  def update_interface(language):
377
- """Updates the interface labels based on selected language"""
378
  return [
379
  gr.Image(label=TRANSLATIONS[language]["input_image"], type="numpy"),
380
- gr.Number(label=TRANSLATIONS[language]["offset_value"], value=0.15),
381
- gr.Number(label=TRANSLATIONS[language]["coin_diameter"], value=20),
 
 
 
 
 
382
  gr.Image(label=TRANSLATIONS[language]["output_image"]),
383
  gr.Image(label=TRANSLATIONS[language]["outlines"]),
384
  gr.File(label=TRANSLATIONS[language]["dxf_file"]),
385
  gr.Image(label=TRANSLATIONS[language]["mask"]),
386
- gr.Textbox(
387
- label=TRANSLATIONS[language]["scaling_factor"],
388
- placeholder=TRANSLATIONS[language]["scaling_placeholder"],
389
- ),
390
  ]
391
 
392
  if __name__ == "__main__":
393
  os.makedirs("./outputs", exist_ok=True)
394
 
395
  with gr.Blocks() as demo:
396
- # Language selector
397
  language = gr.Dropdown(
398
  choices=["english", "dutch"],
399
  value="english",
@@ -401,33 +908,72 @@ if __name__ == "__main__":
401
  interactive=True
402
  )
403
 
404
- # Initialize interface components
405
  input_image = gr.Image(label=TRANSLATIONS["english"]["input_image"], type="numpy")
406
- offset = gr.Number(label=TRANSLATIONS["english"]["offset_value"], value=0.15)
407
- coin_size = gr.Number(label=TRANSLATIONS["english"]["coin_diameter"], value=20)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
 
409
  output_image = gr.Image(label=TRANSLATIONS["english"]["output_image"])
410
  outlines = gr.Image(label=TRANSLATIONS["english"]["outlines"])
411
  dxf_file = gr.File(label=TRANSLATIONS["english"]["dxf_file"])
412
  mask = gr.Image(label=TRANSLATIONS["english"]["mask"])
 
413
  scaling = gr.Textbox(
414
  label=TRANSLATIONS["english"]["scaling_factor"],
415
  placeholder=TRANSLATIONS["english"]["scaling_placeholder"]
416
  )
417
 
418
- # Create submit button
419
  submit_btn = gr.Button("Submit")
420
 
421
- # Handle language change
422
  language.change(
423
  fn=lambda x: [
424
  gr.update(label=TRANSLATIONS[x]["input_image"]),
425
  gr.update(label=TRANSLATIONS[x]["offset_value"]),
426
- gr.update(label=TRANSLATIONS[x]["coin_diameter"]),
427
  gr.update(label=TRANSLATIONS[x]["output_image"]),
428
  gr.update(label=TRANSLATIONS[x]["outlines"]),
 
429
  gr.update(label=TRANSLATIONS[x]["dxf_file"]),
430
  gr.update(label=TRANSLATIONS[x]["mask"]),
 
 
431
  gr.update(
432
  label=TRANSLATIONS[x]["scaling_factor"],
433
  placeholder=TRANSLATIONS[x]["scaling_placeholder"]
@@ -435,28 +981,37 @@ if __name__ == "__main__":
435
  ],
436
  inputs=[language],
437
  outputs=[
438
- input_image, offset, coin_size,
439
- output_image, outlines, dxf_file,
440
- mask, scaling
441
  ]
442
  )
 
 
 
 
 
 
 
 
 
 
443
 
444
- # Handle prediction
445
  submit_btn.click(
446
- fn=predict,
447
- inputs=[input_image, offset, coin_size],
448
  outputs=[output_image, outlines, dxf_file, mask, scaling]
449
  )
450
 
451
- # Add examples
452
  gr.Examples(
453
  examples=[
454
- ["./examples/Test20.jpg", 0.15],
455
- ["./examples/Test21.jpg", 0.15],
456
- ["./examples/Test22.jpg", 0.15],
457
- ["./examples/Test23.jpg", 0.15],
458
  ],
459
- inputs=[input_image, offset]
460
  )
461
 
462
  demo.launch(share=True)
 
18
  from scipy.interpolate import splprep, splev
19
  from scipy.ndimage import gaussian_filter1d
20
  import json
21
+ import time
22
+ import signal
23
+ from shapely.ops import unary_union
24
+ from shapely.geometry import MultiPolygon, GeometryCollection, Polygon, Point
25
+ from u2netp import U2NETP # Add U2NETP import
26
+ import logging
27
+ import shutil
28
+
29
+ # Initialize logging
30
+ logging.basicConfig(level=logging.INFO)
31
+ logger = logging.getLogger(__name__)
32
+
33
+ # Create cache directory for models
34
+ CACHE_DIR = os.path.join(os.path.dirname(__file__), ".cache")
35
+ os.makedirs(CACHE_DIR, exist_ok=True)
36
+
37
+ # Custom Exception Classes
38
+ class TimeoutReachedError(Exception):
39
+ pass
40
+
41
+ class BoundaryOverlapError(Exception):
42
+ pass
43
+
44
+ class TextOverlapError(Exception):
45
+ pass
46
+
47
+ class ReferenceBoxNotDetectedError(Exception):
48
+ """Raised when the Reference coin cannot be detected in the image"""
49
+ pass
50
+
51
+ class FingerCutOverlapError(Exception):
52
+ """Raised when finger cuts overlap with existing geometry"""
53
+ def __init__(self, message="There was an overlap with fingercuts... Please try again to generate dxf."):
54
+ super().__init__(message)
55
+
56
+ # Global model initialization
57
+ print("Loading models...")
58
+ start_time = time.time()
59
+
60
+ # Load YOLO reference model
61
+ reference_model_path = os.path.join("", "best1.pt")
62
+ if not os.path.exists(reference_model_path):
63
+ shutil.copy("best1.pt", reference_model_path)
64
+ reference_detector_global = YOLO(reference_model_path)
65
+
66
+ # Load U2NETP model
67
+ u2net_model_path = os.path.join(CACHE_DIR, "u2netp.pth")
68
+ if not os.path.exists(u2net_model_path):
69
+ shutil.copy("u2netp.pth", u2net_model_path)
70
+ u2net_global = U2NETP(3, 1)
71
+ u2net_global.load_state_dict(torch.load(u2net_model_path, map_location="cpu"))
72
+
73
+ # Load BiRefNet model
74
+ birefnet = AutoModelForImageSegmentation.from_pretrained(
75
+ "zhengpeng7/BiRefNet", trust_remote_code=True, cache_dir=CACHE_DIR
76
+ )
77
+
78
+ device = "cpu"
79
+ torch.set_float32_matmul_precision(["high", "highest"][0])
80
 
81
+ # Move models to device
82
+ u2net_global.to(device)
83
+ u2net_global.eval()
84
+ birefnet.to(device)
85
+ birefnet.eval()
86
+
87
+ # Define transforms
88
+ transform_image = transforms.Compose([
89
+ transforms.Resize((1024, 1024)),
90
+ transforms.ToTensor(),
91
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
92
+ ])
93
+
94
+ # Language translations dictionary remains unchanged
95
  TRANSLATIONS = {
96
  "english": {
97
  "input_image": "Input Image",
98
+ "offset_value": "Offset value",
99
+ "offset_unit": "Offset unit (mm/in)",
100
+ "enable_finger": "Enable Finger Clearance",
101
+ "edge_radius": "Edge rounding radius (mm)",
102
  "output_image": "Output Image",
103
  "outlines": "Outlines of Objects",
104
  "dxf_file": "DXF file",
105
  "mask": "Mask",
106
+ "enable_radius": "Enable Edge Rounding",
107
+ "radius_disabled": "Rounding Disabled",
108
  "scaling_factor": "Scaling Factor(mm)",
109
  "scaling_placeholder": "Every pixel is equal to mentioned number in millimeters",
110
  "language_selector": "Select Language",
111
  },
112
  "dutch": {
113
  "input_image": "Invoer Afbeelding",
114
+ "offset_value": "Offset waarde",
115
+ "offset_unit": "Offset unit (mm/inch)",
116
+ "enable_finger": "Finger Clearance inschakelen",
117
+ "edge_radius": "Ronding radius rand (mm)",
118
  "output_image": "Uitvoer Afbeelding",
119
  "outlines": "Contouren van Objecten",
120
  "dxf_file": "DXF bestand",
121
  "mask": "Masker",
122
+ "enable_radius": "Ronding inschakelen",
123
+ "radius_disabled": "Ronding uitgeschakeld",
124
  "scaling_factor": "Schalingsfactor(mm)",
125
  "scaling_placeholder": "Elke pixel is gelijk aan genoemd aantal in millimeters",
126
  "language_selector": "Selecteer Taal",
127
  }
128
  }
129
 
130
+ def remove_bg_u2netp(image: np.ndarray) -> np.ndarray:
131
+ """Remove background using U2NETP model specifically for reference objects"""
132
+ try:
133
+ image_pil = Image.fromarray(image)
134
+ transform_u2netp = transforms.Compose([
135
+ transforms.Resize((320, 320)),
136
+ transforms.ToTensor(),
137
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
138
+ ])
139
+
140
+ input_tensor = transform_u2netp(image_pil).unsqueeze(0).to(device)
141
+
142
+ with torch.no_grad():
143
+ outputs = u2net_global(input_tensor)
144
+
145
+ pred = outputs[0]
146
+ pred = (pred - pred.min()) / (pred.max() - pred.min() + 1e-8)
147
+ pred_np = pred.squeeze().cpu().numpy()
148
+ pred_np = cv2.resize(pred_np, (image_pil.width, image_pil.height))
149
+ pred_np = (pred_np * 255).astype(np.uint8)
150
+
151
+ return pred_np
152
+ except Exception as e:
153
+ logger.error(f"Error in U2NETP background removal: {e}")
154
+ raise
155
 
156
  def remove_bg(image: np.ndarray) -> np.ndarray:
157
+ """Remove background using BiRefNet model for main objects"""
158
+ try:
159
+ image = Image.fromarray(image)
160
+ input_images = transform_image(image).unsqueeze(0).to(device)
161
 
162
+ with torch.no_grad():
163
+ preds = birefnet(input_images)[-1].sigmoid().cpu()
164
+ pred = preds[0].squeeze()
 
 
 
 
165
 
166
+ pred_pil: Image = transforms.ToPILImage()(pred)
167
+
168
+ scale_ratio = 1024 / max(image.size)
169
+ scaled_size = (int(image.size[0] * scale_ratio), int(image.size[1] * scale_ratio))
170
+
171
+ return np.array(pred_pil.resize(scaled_size))
172
+ except Exception as e:
173
+ logger.error(f"Error in BiRefNet background removal: {e}")
174
+ raise
175
 
176
+ def resize_img(img: np.ndarray, resize_dim):
177
+ return np.array(Image.fromarray(img).resize(resize_dim))
178
 
179
  def make_square(img: np.ndarray):
180
+ """Make the image square by padding"""
181
  height, width = img.shape[:2]
 
 
182
  max_dim = max(height, width)
183
+
 
184
  pad_height = (max_dim - height) // 2
185
  pad_width = (max_dim - width) // 2
186
+
 
187
  pad_height_extra = max_dim - height - 2 * pad_height
188
  pad_width_extra = max_dim - width - 2 * pad_width
189
+
 
190
  if len(img.shape) == 3: # Color image
 
191
  padded = np.pad(
192
  img,
193
  (
 
206
  ),
207
  mode="edge",
208
  )
209
+
210
  return padded
211
 
212
+
213
+ def detect_reference_square(img) -> tuple:
214
+ """Detect reference square in the image and ignore other coins"""
215
+ try:
216
+ res = reference_detector_global.predict(img, conf=0.75)
217
+ if not res or len(res) == 0 or len(res[0].boxes) == 0:
218
+ raise ReferenceBoxNotDetectedError("Unable to detect the reference coin in the image.")
219
+
220
+ # Get all detected boxes
221
+ boxes = res[0].cpu().boxes.xyxy
222
+
223
+ # Find the largest box (most likely the reference coin)
224
+ largest_box = None
225
+ max_area = 0
226
+ for box in boxes:
227
+ x_min, y_min, x_max, y_max = box
228
+ area = (x_max - x_min) * (y_max - y_min)
229
+ if area > max_area:
230
+ max_area = area
231
+ largest_box = box
232
+
233
+ return (
234
+ save_one_box(largest_box.unsqueeze(0), img, save=False),
235
+ largest_box
236
+ )
237
+ except Exception as e:
238
+ if not isinstance(e, ReferenceBoxNotDetectedError):
239
+ logger.error(f"Error in reference square detection: {e}")
240
+ raise ReferenceBoxNotDetectedError("Error detecting reference coin. Please try again with a clearer image.")
241
+ raise
242
+
243
+
244
  def exclude_scaling_box(
245
  image: np.ndarray,
246
  bbox: np.ndarray,
 
248
  processed_size: tuple,
249
  expansion_factor: float = 1.2,
250
  ) -> np.ndarray:
 
251
  x_min, y_min, x_max, y_max = map(int, bbox)
252
+ scale_x = processed_size[1] / orig_size[1]
253
+ scale_y = processed_size[0] / orig_size[0]
254
+
 
 
 
255
  x_min = int(x_min * scale_x)
256
  x_max = int(x_max * scale_x)
257
  y_min = int(y_min * scale_y)
258
  y_max = int(y_max * scale_y)
259
+
 
260
  box_width = x_max - x_min
261
  box_height = y_max - y_min
262
+
263
  expanded_x_min = max(0, int(x_min - (expansion_factor - 1) * box_width / 2))
264
  expanded_x_max = min(
265
  image.shape[1], int(x_max + (expansion_factor - 1) * box_width / 2)
 
268
  expanded_y_max = min(
269
  image.shape[0], int(y_max + (expansion_factor - 1) * box_height / 2)
270
  )
271
+
 
272
  image[expanded_y_min:expanded_y_max, expanded_x_min:expanded_x_max] = 0
 
273
  return image
274
 
 
 
 
 
 
275
 
 
 
276
 
 
 
 
277
 
 
278
 
279
+ def resample_contour(contour, edge_radius_px: int = 0):
280
+ """Resample contour with radius-aware smoothing and periodic handling."""
281
+ logger.info(f"Starting resample_contour with contour of shape {contour.shape}")
282
 
283
+ num_points = 1500
284
+ sigma = max(2, int(edge_radius_px) // 4) # Adjust sigma based on radius
285
 
286
+ if len(contour) < 4: # Need at least 4 points for spline with periodic condition
287
+ error_msg = f"Contour must have at least 4 points, but has {len(contour)} points."
288
+ logger.error(error_msg)
289
+ raise ValueError(error_msg)
290
 
291
+ try:
292
+ contour = contour[:, 0, :]
293
+ logger.debug(f"Reshaped contour to shape {contour.shape}")
294
 
295
+ # Ensure contour is closed by making start and end points the same
296
+ if not np.array_equal(contour[0], contour[-1]):
297
+ contour = np.vstack([contour, contour[0]])
298
 
299
+ # Create periodic spline representation
300
+ tck, u = splprep(contour.T, u=None, s=0, per=True)
301
+
302
+ # Evaluate spline at evenly spaced points
303
+ u_new = np.linspace(u.min(), u.max(), num_points)
304
+ x_new, y_new = splev(u_new, tck, der=0)
305
+
306
+ # Apply Gaussian smoothing with wrap-around
307
+ if sigma > 0:
308
+ x_new = gaussian_filter1d(x_new, sigma=sigma, mode='wrap')
309
+ y_new = gaussian_filter1d(y_new, sigma=sigma, mode='wrap')
310
+
311
+ # Re-close the contour after smoothing
312
+ x_new[-1] = x_new[0]
313
+ y_new[-1] = y_new[0]
314
+
315
+ result = np.array([x_new, y_new]).T
316
+ logger.info(f"Completed resample_contour with result shape {result.shape}")
317
+ return result
318
 
319
+ except Exception as e:
320
+ logger.error(f"Error in resample_contour: {e}")
321
+ raise
322
+
323
+
324
+
325
+
326
+
327
+
328
+ # def save_dxf_spline(inflated_contours, scaling_factor, height, finger_clearance=False):
329
+ # doc = ezdxf.new(units=ezdxf.units.MM)
330
+ # doc.header["$INSUNITS"] = ezdxf.units.MM
331
+ # msp = doc.modelspace()
332
+ # final_polygons_inch = []
333
+ # finger_centers = []
334
+ # original_polygons = []
335
+
336
+ # for contour in inflated_contours:
337
+ # try:
338
+ # # Removed the second parameter since it was causing the error
339
+ # resampled_contour = resample_contour(contour)
340
+
341
+ # points_inch = [(x * scaling_factor, (height - y) * scaling_factor)
342
+ # for x, y in resampled_contour]
343
+
344
+ # if len(points_inch) < 3:
345
+ # continue
346
+
347
+ # tool_polygon = build_tool_polygon(points_inch)
348
+ # original_polygons.append(tool_polygon)
349
+
350
+ # if finger_clearance:
351
+ # try:
352
+ # tool_polygon, center = place_finger_cut_adjusted(
353
+ # tool_polygon, points_inch, finger_centers, final_polygons_inch
354
+ # )
355
+ # except FingerCutOverlapError:
356
+ # tool_polygon = original_polygons[-1]
357
+
358
+ # exterior_coords = polygon_to_exterior_coords(tool_polygon)
359
+ # if len(exterior_coords) < 3:
360
+ # continue
361
+
362
+ # msp.add_spline(exterior_coords, degree=3, dxfattribs={"layer": "TOOLS"})
363
+ # final_polygons_inch.append(tool_polygon)
364
+
365
+ # except ValueError as e:
366
+ # logger.warning(f"Skipping contour: {e}")
367
+
368
+ # dxf_filepath = os.path.join("./outputs", "out.dxf")
369
+ # doc.saveas(dxf_filepath)
370
+ # return dxf_filepath, final_polygons_inch, original_polygons
371
+
372
+
373
+
374
+
375
+ def save_dxf_spline(inflated_contours, scaling_factor, height, finger_clearance=False):
376
  doc = ezdxf.new(units=ezdxf.units.MM)
377
+ doc.header["$INSUNITS"] = ezdxf.units.MM
 
 
378
  msp = doc.modelspace()
379
+ final_polygons_inch = []
380
+ finger_centers = []
381
+ original_polygons = []
382
+
383
+ # Scale correction factor based on your analysis
384
+ scale_correction = 1.079
385
 
386
  for contour in inflated_contours:
387
  try:
388
+ resampled_contour = resample_contour(contour)
389
+
390
+ points_inch = [(x * scaling_factor, (height - y) * scaling_factor)
391
+ for x, y in resampled_contour]
392
+
393
+ if len(points_inch) < 3:
394
+ continue
395
+
396
+ tool_polygon = build_tool_polygon(points_inch)
397
+ original_polygons.append(tool_polygon)
398
+
399
+ if finger_clearance:
400
+ try:
401
+ tool_polygon, center = place_finger_cut_adjusted(
402
+ tool_polygon, points_inch, finger_centers, final_polygons_inch
403
+ )
404
+ except FingerCutOverlapError:
405
+ tool_polygon = original_polygons[-1]
406
+
407
+ exterior_coords = polygon_to_exterior_coords(tool_polygon)
408
+ if len(exterior_coords) < 3:
409
+ continue
410
 
411
+ # Apply scale correction AFTER finger cuts and polygon adjustments
412
+ corrected_coords = [(x * scale_correction, y * scale_correction) for x, y in exterior_coords]
413
+
414
+ msp.add_spline(corrected_coords, degree=3, dxfattribs={"layer": "TOOLS"})
415
+ final_polygons_inch.append(tool_polygon)
416
 
417
  except ValueError as e:
418
+ logger.warning(f"Skipping contour: {e}")
419
 
420
  dxf_filepath = os.path.join("./outputs", "out.dxf")
421
  doc.saveas(dxf_filepath)
422
+ return dxf_filepath, final_polygons_inch, original_polygons
423
 
 
424
 
425
 
 
 
 
 
 
 
 
 
 
 
 
 
426
 
 
427
 
428
+ def build_tool_polygon(points_inch):
429
+ return Polygon(points_inch)
 
 
430
 
 
431
 
 
 
 
 
 
 
432
 
433
+ def polygon_to_exterior_coords(poly):
434
+ logger.info(f"Starting polygon_to_exterior_coords with input geometry type: {poly.geom_type}")
435
+
436
+ try:
437
+ # 1) If it's a GeometryCollection or MultiPolygon, fuse everything into one shape
438
+ if poly.geom_type == "GeometryCollection" or poly.geom_type == "MultiPolygon":
439
+ logger.debug(f"Performing unary_union on {poly.geom_type}")
440
+ unified = unary_union(poly)
441
+ if unified.is_empty:
442
+ logger.warning("unary_union produced an empty geometry; returning empty list")
443
+ return []
444
+ # If union still yields multiple disjoint pieces, pick the largest Polygon
445
+ if unified.geom_type == "GeometryCollection" or unified.geom_type == "MultiPolygon":
446
+ largest = None
447
+ max_area = 0.0
448
+ for g in getattr(unified, "geoms", []):
449
+ if hasattr(g, "area") and g.area > max_area and hasattr(g, "exterior"):
450
+ max_area = g.area
451
+ largest = g
452
+ if largest is None:
453
+ logger.warning("No valid Polygon found in unified geometry; returning empty list")
454
+ return []
455
+ poly = largest
456
+ else:
457
+ # Now unified should be a single Polygon or LinearRing
458
+ poly = unified
459
+
460
+ # 2) At this point, we must have a single Polygon (or something with an exterior)
461
+ if not hasattr(poly, "exterior") or poly.exterior is None:
462
+ logger.warning("Input geometry has no exterior ring; returning empty list")
463
+ return []
464
+
465
+ raw_coords = list(poly.exterior.coords)
466
+ total = len(raw_coords)
467
+ logger.info(f"Extracted {total} raw exterior coordinates")
468
+
469
+ if total == 0:
470
+ return []
471
+
472
+ # 3) Subsample coordinates to at most 100 points (evenly spaced)
473
+ max_pts = 100
474
+ if total > max_pts:
475
+ step = total // max_pts
476
+ sampled = [raw_coords[i] for i in range(0, total, step)]
477
+ # Ensure we include the last point to close the loop
478
+ if sampled[-1] != raw_coords[-1]:
479
+ sampled.append(raw_coords[-1])
480
+ logger.info(f"Downsampled perimeter from {total} to {len(sampled)} points")
481
+ return sampled
482
+ else:
483
+ return raw_coords
484
+
485
+ except Exception as e:
486
+ logger.error(f"Error in polygon_to_exterior_coords: {e}")
487
+ return []
488
+
489
+
490
+
491
+
492
+
493
+
494
+
495
+
496
+ def place_finger_cut_adjusted(
497
+ tool_polygon: Polygon,
498
+ points_inch: list,
499
+ existing_centers: list,
500
+ all_polygons: list,
501
+ circle_diameter: float = 25.4,
502
+ min_gap: float = 0.5,
503
+ max_attempts: int = 100
504
+ ) -> (Polygon, tuple):
505
+ logger.info(f"Starting place_finger_cut_adjusted with {len(points_inch)} input points")
506
+
507
+ from shapely.geometry import Point
508
+ import numpy as np
509
+ import time
510
+ import random
511
+
512
+ # Fallback: if we run out of time or attempts, place in the "middle" of the outline
513
+ def fallback_solution():
514
+ logger.warning("Using fallback approach for finger cut placement")
515
+ # Pick the midpoint of the original outline as a last-resort center
516
+ fallback_center = points_inch[len(points_inch) // 2]
517
+ r = circle_diameter / 2.0
518
+ fallback_circle = Point(fallback_center).buffer(r, resolution=32)
519
+ try:
520
+ union_poly = tool_polygon.union(fallback_circle)
521
+ except Exception as e:
522
+ logger.warning(f"Fallback union failed ({e}); trying buffer-union fallback")
523
+ union_poly = tool_polygon.buffer(0).union(fallback_circle.buffer(0))
524
+
525
+ existing_centers.append(fallback_center)
526
+ logger.info(f"Fallback finger cut placed at {fallback_center}")
527
+ return union_poly, fallback_center
528
+
529
+ # Precompute values
530
+ r = circle_diameter / 2.0
531
+ needed_center_dist = circle_diameter + min_gap
532
+
533
+ # 1) Get perimeter coordinates of this polygon
534
+ raw_perimeter = polygon_to_exterior_coords(tool_polygon)
535
+ if not raw_perimeter:
536
+ logger.warning("No valid exterior coords found; using fallback immediately")
537
+ return fallback_solution()
538
+
539
+ # 2) Possibly subsample to at most 100 perimeter points
540
+ if len(raw_perimeter) > 100:
541
+ step = len(raw_perimeter) // 100
542
+ perimeter_coords = raw_perimeter[::step]
543
+ logger.info(f"Subsampled perimeter from {len(raw_perimeter)} to {len(perimeter_coords)} points")
544
+ else:
545
+ perimeter_coords = raw_perimeter[:]
546
+
547
+ # 3) Randomize the order to avoid bias
548
+ indices = list(range(len(perimeter_coords)))
549
+ random.shuffle(indices)
550
+ logger.debug(f"Shuffled perimeter indices for candidate order")
551
+
552
+ # 4) Non-blocking timeout setup
553
+ start_time = time.time()
554
+ timeout_secs = 5.0 # leave ~0.1s margin
555
+
556
+ attempts = 0
557
  try:
558
+ while attempts < max_attempts:
559
+ # 5) Abort if we're running out of time
560
+ if time.time() - start_time > timeout_secs - 0.1:
561
+ logger.warning(f"Approaching timeout after {attempts} attempts")
562
+ return fallback_solution()
563
+
564
+ # 6) For each shuffled perimeter point, try small offsets
565
+ for idx in indices:
566
+ # Check timeout inside the loop as well
567
+ if time.time() - start_time > timeout_secs - 0.05:
568
+ logger.warning("Timeout during candidate-point loop")
569
+ return fallback_solution()
570
+
571
+ cx, cy = perimeter_coords[idx]
572
+ # Try five small offsets: (0,0), (±min_gap/2, 0), (0, ±min_gap/2)
573
+ for dx, dy in [(0, 0), (-min_gap/2, 0), (min_gap/2, 0), (0, -min_gap/2), (0, min_gap/2)]:
574
+ candidate_center = (cx + dx, cy + dy)
575
+
576
+ # 6a) Check distance to existing finger centers
577
+ too_close_finger = any(
578
+ np.hypot(candidate_center[0] - ex, candidate_center[1] - ey)
579
+ < needed_center_dist
580
+ for (ex, ey) in existing_centers
581
+ )
582
+ if too_close_finger:
583
+ continue
584
+
585
+ # 6b) Build candidate circle with reduced resolution for speed
586
+ candidate_circle = Point(candidate_center).buffer(r, resolution=32)
587
+
588
+ # 6c) Must overlap ≥30% with this polygon
589
+ try:
590
+ inter_area = tool_polygon.intersection(candidate_circle).area
591
+ except Exception:
592
+ continue
593
+
594
+ if inter_area < 0.3 * candidate_circle.area:
595
+ continue
596
+
597
+ # 6d) Must not intersect or even "touch" any other polygon (buffered by min_gap)
598
+ invalid = False
599
+ for other_poly in all_polygons:
600
+ if other_poly.equals(tool_polygon):
601
+ # Don't compare against itself
602
+ continue
603
+ # Buffer the other polygon by min_gap to enforce a strict clearance
604
+ if other_poly.buffer(min_gap).intersects(candidate_circle) or \
605
+ other_poly.buffer(min_gap).touches(candidate_circle):
606
+ invalid = True
607
+ break
608
+ if invalid:
609
+ continue
610
+
611
+ # 6e) Candidate passes all tests → union and return
612
+ try:
613
+ union_poly = tool_polygon.union(candidate_circle)
614
+ # If union is a MultiPolygon (more than one piece), reject
615
+ if union_poly.geom_type == "MultiPolygon" and len(union_poly.geoms) > 1:
616
+ continue
617
+ # If union didn't change anything (no real cut), reject
618
+ if union_poly.equals(tool_polygon):
619
+ continue
620
+ except Exception:
621
+ continue
622
+
623
+ existing_centers.append(candidate_center)
624
+ logger.info(f"Finger cut placed successfully at {candidate_center} after {attempts} attempts")
625
+ return union_poly, candidate_center
626
+
627
+ attempts += 1
628
+ # If we've done half the attempts and we're near timeout, bail out
629
+ if attempts >= (max_attempts // 2) and (time.time() - start_time) > timeout_secs * 0.8:
630
+ logger.warning(f"Approaching timeout (attempt {attempts})")
631
+ return fallback_solution()
632
+
633
+ logger.debug(f"Completed iteration {attempts}/{max_attempts}")
634
+
635
+ # If we exit loop without finding a valid spot
636
+ logger.warning(f"No valid spot after {max_attempts} attempts, using fallback")
637
+ return fallback_solution()
638
+
639
  except Exception as e:
640
+ logger.error(f"Error in place_finger_cut_adjusted: {e}")
641
+ return fallback_solution()
642
+
643
 
 
 
 
644
 
 
 
 
645
 
646
 
 
 
 
 
 
 
 
 
 
 
 
647
 
 
648
 
 
 
 
649
 
 
 
650
 
651
+
652
+ def extract_outlines(binary_image: np.ndarray) -> tuple:
653
+ contours, _ = cv2.findContours(
654
+ binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
655
  )
656
 
657
+ outline_image = np.full_like(binary_image, 255) # White background
658
 
659
+ return outline_image, contours
 
 
 
 
 
 
660
 
661
 
 
 
662
 
663
 
664
+ def round_edges(mask: np.ndarray, radius_mm: float, scaling_factor: float) -> np.ndarray:
665
+ """Rounds mask edges using contour smoothing."""
666
+ if radius_mm <= 0 or scaling_factor <= 0:
667
+ return mask
668
+
669
+ radius_px = max(1, int(radius_mm / scaling_factor)) # Ensure min 1px
670
+
671
+ # Handle small objects
672
+ if np.count_nonzero(mask) < 500: # Small object threshold
673
+ return cv2.dilate(cv2.erode(mask, np.ones((3,3))), np.ones((3,3)))
674
+
675
+ # Existing contour processing with improvements:
676
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
677
+
678
+ # NEW: Filter small contours
679
+ contours = [c for c in contours if cv2.contourArea(c) > 100]
680
+ smoothed_contours = []
681
+
682
+ for contour in contours:
683
+ try:
684
+ # Resample with radius-based smoothing
685
+ resampled = resample_contour(contour, radius_px)
686
+ resampled = resampled.astype(np.int32).reshape((-1, 1, 2))
687
+ smoothed_contours.append(resampled)
688
+ except Exception as e:
689
+ logger.warning(f"Error smoothing contour: {e}")
690
+ smoothed_contours.append(contour) # Fallback to original contour
691
+
692
+ # Draw smoothed contours
693
+ rounded = np.zeros_like(mask)
694
+ cv2.drawContours(rounded, smoothed_contours, -1, 255, thickness=cv2.FILLED)
695
+
696
+ return rounded
697
+
698
+
699
+ def predict_og(image, offset, offset_unit, edge_radius, finger_clearance=False):
700
+ print(f"DEBUG: Image shape: {image.shape}, dtype: {image.dtype}, range: {image.min()}-{image.max()}")
701
+
702
+ coin_size_mm = 20.0
703
+
704
+ if offset_unit == "inches":
705
+ offset *= 25.4
706
+
707
+ if edge_radius is None or edge_radius == 0:
708
+ edge_radius = 0.0001
709
 
710
  if offset < 0:
711
  raise gr.Error("Offset Value Can't be negative")
712
 
713
  try:
714
  reference_obj_img, scaling_box_coords = detect_reference_square(image)
715
+ except ReferenceBoxNotDetectedError as e:
716
+ return (
717
+ None,
718
+ None,
719
+ None,
720
+ None,
721
+ f"Error: {str(e)}"
722
+ )
723
+ except Exception as e:
724
+ raise gr.Error(f"Error processing image: {str(e)}")
725
 
726
  reference_obj_img = make_square(reference_obj_img)
727
+
728
+ # Use U2NETP for reference object background removal
729
+ reference_square_mask = remove_bg_u2netp(reference_obj_img)
730
+ reference_square_mask = resize_img(reference_square_mask, reference_obj_img.shape[:2][::-1])
731
 
732
  try:
733
+ scaling_factor = calculate_scaling_factor(
734
  target_image=reference_square_mask,
735
+ reference_obj_size_mm=coin_size_mm,
736
  feature_detector="ORB",
737
  )
738
  except Exception as e:
739
  scaling_factor = None
740
+ logger.warning(f"Error calculating scaling factor: {e}")
741
 
742
+ if not scaling_factor:
743
+ ref_size_px = (reference_square_mask.shape[0] + reference_square_mask.shape[1]) / 2
744
+ scaling_factor = 20.0 / ref_size_px
745
+ logger.info(f"Fallback scaling: {scaling_factor:.4f} mm/px using 20mm reference")
746
 
747
+ # Use BiRefNet for main object background removal
748
  orig_size = image.shape[:2]
749
  objects_mask = remove_bg(image)
750
  processed_size = objects_mask.shape[:2]
751
 
752
+ # REMOVE ALL COINS from mask:
753
+ res = reference_detector_global.predict(image, conf=0.05)
754
+ boxes = res[0].cpu().boxes.xyxy if res and len(res) > 0 else []
755
+
756
+ for box in boxes:
757
+ objects_mask = exclude_scaling_box(
758
+ objects_mask,
759
+ box,
760
+ orig_size,
761
+ processed_size,
762
+ expansion_factor=1.2,
763
+ )
764
+
765
  objects_mask = resize_img(objects_mask, (image.shape[1], image.shape[0]))
766
+
767
+ # offset_pixels = (float(offset) / scaling_factor) * 2 + 1 if scaling_factor else 1
768
+ # dilated_mask = cv2.dilate(objects_mask, np.ones((int(offset_pixels), int(offset_pixels)), np.uint8))
769
+ # Image.fromarray(dilated_mask).save("./outputs/scaled_mask_original.jpg")
770
+ # dilated_mask_orig = dilated_mask.copy()
771
+
772
+ # #if edge_radius > 0:
773
+ # # Use morphological rounding instead of contour-based
774
+ # rounded_mask = round_edges(objects_mask, edge_radius, scaling_factor)
775
+ # #else:
776
+ # #rounded_mask = objects_mask.copy()
777
 
778
+ # # Apply dilation AFTER rounding
779
+ # offset_pixels = (float(offset) / scaling_factor) * 2 + 1 if scaling_factor else 1
780
+ # kernel = np.ones((int(offset_pixels), int(offset_pixels)), np.uint8)
781
+ # dilated_mask = cv2.dilate(rounded_mask, kernel)
782
+ # Apply edge rounding first
783
+ rounded_mask = round_edges(objects_mask, edge_radius, scaling_factor)
784
+
785
+ # Apply dilation AFTER rounding
786
+ offset_pixels = (float(offset) / scaling_factor) * 2 + 1 if scaling_factor else 1
787
+ kernel = np.ones((int(offset_pixels), int(offset_pixels)), np.uint8)
788
+ final_dilated_mask = cv2.dilate(rounded_mask, kernel)
789
+
790
+ # Save for debugging
791
+ Image.fromarray(final_dilated_mask).save("./outputs/scaled_mask_original.jpg")
792
+
793
 
794
+ outlines, contours = extract_outlines(final_dilated_mask)
795
 
796
+ try:
797
+ dxf, finger_polygons, original_polygons = save_dxf_spline(
798
+ contours,
799
+ scaling_factor,
800
+ processed_size[0],
801
+ finger_clearance=(finger_clearance == "On")
802
+ )
803
+ except FingerCutOverlapError as e:
804
+ raise gr.Error(str(e))
805
+
806
+ shrunked_img_contours = image.copy()
807
+
808
+ if finger_clearance == "On":
809
+ outlines = np.full_like(final_dilated_mask, 255)
810
+ for poly in finger_polygons:
811
+ try:
812
+ coords = np.array([
813
+ (int(x / scaling_factor), int(processed_size[0] - y / scaling_factor))
814
+ for x, y in poly.exterior.coords
815
+ ], np.int32).reshape((-1, 1, 2))
816
+
817
+ cv2.drawContours(shrunked_img_contours, [coords], -1, 0, thickness=2)
818
+ cv2.drawContours(outlines, [coords], -1, 0, thickness=2)
819
+ except Exception as e:
820
+ logger.warning(f"Failed to draw finger cut: {e}")
821
+ continue
822
+ else:
823
+ outlines = np.full_like(final_dilated_mask, 255)
824
+ cv2.drawContours(shrunked_img_contours, contours, -1, 0, thickness=2)
825
+ cv2.drawContours(outlines, contours, -1, 0, thickness=2)
826
 
827
  return (
828
+ shrunked_img_contours,
829
+ outlines,
830
+ dxf,
831
+ final_dilated_mask,
832
+ f"{scaling_factor:.4f}")
833
+
834
+
835
+ def predict_simple(image):
836
+ """
837
+ Only image in → returns (annotated, outlines, dxf, mask).
838
+ Uses offset=0 mm, no fillet, no finger-cut.
839
+ """
840
+ ann, outlines, dxf_path, mask, _ = predict_og(
841
+ image,
842
+ offset=0,
843
+ offset_unit="mm",
844
+ edge_radius=0,
845
+ finger_clearance="Off",
846
+ )
847
+ return ann, outlines, dxf_path, mask
848
+
849
+ def predict_middle(image, enable_fillet, fillet_value_mm):
850
+ """
851
+ image + (On/Off) fillet toggle + fillet radius → returns (annotated, outlines, dxf, mask).
852
+ Uses offset=0 mm, finger-cut off.
853
+ """
854
+ radius = fillet_value_mm if enable_fillet == "On" else 0
855
+ ann, outlines, dxf_path, mask, _ = predict_og(
856
+ image,
857
+ offset=0,
858
+ offset_unit="mm",
859
+ edge_radius=radius,
860
+ finger_clearance="Off",
861
  )
862
+ return ann, outlines, dxf_path, mask
863
+
864
+ def predict_full(image, enable_fillet, fillet_value_mm, enable_finger_cut):
865
+ """
866
+ image + fillet toggle/value + finger-cut toggle → returns (annotated, outlines, dxf, mask).
867
+ Uses offset=0 mm.
868
+ """
869
+ radius = fillet_value_mm if enable_fillet == "On" else 0
870
+ finger_flag = "On" if enable_finger_cut == "On" else "Off"
871
+ ann, outlines, dxf_path, mask, _ = predict_og(
872
+ image,
873
+ offset=0,
874
+ offset_unit="mm",
875
+ edge_radius=radius,
876
+ finger_clearance=finger_flag,
877
+ )
878
+ return ann, outlines, dxf_path, mask
879
+
880
+
881
+
882
 
883
  def update_interface(language):
 
884
  return [
885
  gr.Image(label=TRANSLATIONS[language]["input_image"], type="numpy"),
886
+ gr.Row([
887
+ gr.Number(label=TRANSLATIONS[language]["offset_value"], value=0),
888
+ gr.Dropdown(["mm", "inches"], value="mm",
889
+ label=TRANSLATIONS[language]["offset_unit"])
890
+ ]),
891
+ gr.Slider(minimum=0,maximum=20,step=1,value=5,label=TRANSLATIONS[language]["edge_radius"],visible=False,interactive=True),
892
+ gr.Radio(choices=["On", "Off"],value="Off",label=TRANSLATIONS[language]["enable_radius"],),
893
  gr.Image(label=TRANSLATIONS[language]["output_image"]),
894
  gr.Image(label=TRANSLATIONS[language]["outlines"]),
895
  gr.File(label=TRANSLATIONS[language]["dxf_file"]),
896
  gr.Image(label=TRANSLATIONS[language]["mask"]),
897
+ gr.Textbox(label=TRANSLATIONS[language]["scaling_factor"],placeholder=TRANSLATIONS[language]["scaling_placeholder"],),
 
 
 
898
  ]
899
 
900
  if __name__ == "__main__":
901
  os.makedirs("./outputs", exist_ok=True)
902
 
903
  with gr.Blocks() as demo:
 
904
  language = gr.Dropdown(
905
  choices=["english", "dutch"],
906
  value="english",
 
908
  interactive=True
909
  )
910
 
 
911
  input_image = gr.Image(label=TRANSLATIONS["english"]["input_image"], type="numpy")
912
+
913
+ with gr.Row():
914
+ offset = gr.Number(label=TRANSLATIONS["english"]["offset_value"], value=0)
915
+ offset_unit = gr.Dropdown([
916
+ "mm", "inches"
917
+ ], value="mm", label=TRANSLATIONS["english"]["offset_unit"])
918
+
919
+ finger_toggle = gr.Radio(
920
+ choices=["On", "Off"],
921
+ value="Off",
922
+ label=TRANSLATIONS["english"]["enable_finger"]
923
+ )
924
+
925
+ edge_radius = gr.Slider(
926
+ minimum=0,
927
+ maximum=20,
928
+ step=1,
929
+ value=5,
930
+ label=TRANSLATIONS["english"]["edge_radius"],
931
+ visible=False,
932
+ interactive=True
933
+ )
934
+
935
+ radius_toggle = gr.Radio(
936
+ choices=["On", "Off"],
937
+ value="Off",
938
+ label=TRANSLATIONS["english"]["enable_radius"],
939
+ interactive=True
940
+ )
941
+
942
+ def toggle_radius(choice):
943
+ if choice == "On":
944
+ return gr.Slider(visible=True)
945
+ return gr.Slider(visible=False, value=0)
946
+
947
+ radius_toggle.change(
948
+ fn=toggle_radius,
949
+ inputs=radius_toggle,
950
+ outputs=edge_radius
951
+ )
952
 
953
  output_image = gr.Image(label=TRANSLATIONS["english"]["output_image"])
954
  outlines = gr.Image(label=TRANSLATIONS["english"]["outlines"])
955
  dxf_file = gr.File(label=TRANSLATIONS["english"]["dxf_file"])
956
  mask = gr.Image(label=TRANSLATIONS["english"]["mask"])
957
+
958
  scaling = gr.Textbox(
959
  label=TRANSLATIONS["english"]["scaling_factor"],
960
  placeholder=TRANSLATIONS["english"]["scaling_placeholder"]
961
  )
962
 
 
963
  submit_btn = gr.Button("Submit")
964
 
 
965
  language.change(
966
  fn=lambda x: [
967
  gr.update(label=TRANSLATIONS[x]["input_image"]),
968
  gr.update(label=TRANSLATIONS[x]["offset_value"]),
969
+ gr.update(label=TRANSLATIONS[x]["offset_unit"]),
970
  gr.update(label=TRANSLATIONS[x]["output_image"]),
971
  gr.update(label=TRANSLATIONS[x]["outlines"]),
972
+ gr.update(label=TRANSLATIONS[x]["enable_finger"]),
973
  gr.update(label=TRANSLATIONS[x]["dxf_file"]),
974
  gr.update(label=TRANSLATIONS[x]["mask"]),
975
+ gr.update(label=TRANSLATIONS[x]["enable_radius"]),
976
+ gr.update(label=TRANSLATIONS[x]["edge_radius"]),
977
  gr.update(
978
  label=TRANSLATIONS[x]["scaling_factor"],
979
  placeholder=TRANSLATIONS[x]["scaling_placeholder"]
 
981
  ],
982
  inputs=[language],
983
  outputs=[
984
+ input_image, offset, offset_unit,
985
+ output_image, outlines, finger_toggle, dxf_file,
986
+ mask, radius_toggle, edge_radius, scaling
987
  ]
988
  )
989
+
990
+ def custom_predict_and_format(*args):
991
+ output_image, outlines, dxf_path, mask, scaling = predict_og(*args)
992
+ if output_image is None:
993
+ return (
994
+ None, None, None, None, "Reference coin not detected!"
995
+ )
996
+ return (
997
+ output_image, outlines, dxf_path, mask, scaling
998
+ )
999
 
 
1000
  submit_btn.click(
1001
+ fn=custom_predict_and_format,
1002
+ inputs=[input_image, offset, offset_unit, edge_radius, finger_toggle],
1003
  outputs=[output_image, outlines, dxf_file, mask, scaling]
1004
  )
1005
 
1006
+
1007
  gr.Examples(
1008
  examples=[
1009
+ ["./examples/Test20.jpg", 0, "mm"],
1010
+ ["./examples/Test21.jpg", 0, "mm"],
1011
+ ["./examples/Test22.jpg", 0, "mm"],
1012
+ ["./examples/Test23.jpg", 0, "mm"],
1013
  ],
1014
+ inputs=[input_image, offset, offset_unit]
1015
  )
1016
 
1017
  demo.launch(share=True)