muhammadsalmanalfaridzi commited on
Commit
93307f9
·
verified ·
1 Parent(s): 36c6295

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -43
app.py CHANGED
@@ -27,82 +27,85 @@ def detect_objects(image):
27
  temp_file_path = temp_file.name
28
 
29
  try:
30
- # Perform sliced inference with SAHI
31
- result = get_sliced_prediction(
32
- temp_file_path,
33
- model,
34
- slice_height=256, # Adjust slice height as needed
35
- slice_width=256, # Adjust slice width as needed
36
- overlap_height_ratio=0.2, # Adjust overlap height ratio as needed
37
- overlap_width_ratio=0.2 # Adjust overlap width ratio as needed
38
- )
39
-
40
- # Menghitung jumlah objek per kelas
41
- class_count = {}
42
- total_count = 0 # Menyimpan total jumlah objek
43
-
44
- for prediction in result.object_prediction_list:
45
- class_name = prediction.class_id # or prediction.class_name if available
46
- class_count[class_name] = class_count.get(class_name, 0) + 1
47
- total_count += 1 # Tambah jumlah objek untuk setiap prediksi
48
 
49
- # Menyusun output berupa string hasil perhitungan
50
- result_text = "Product Nestle\n\n"
51
- for class_name, count in class_count.items():
52
- result_text += f"{class_name}: {count}\n"
53
- result_text += f"\nTotal Product Nestle: {total_count}"
 
 
 
54
 
55
- # Menyimpan gambar dengan prediksi
56
- output_image_path = "/tmp/prediction.jpg"
57
- result.export_visuals(export_dir="/tmp/") # Export visuals for display
58
- output_image_path = "/tmp/prediction_visual.png" # Assuming the visual output is saved here
59
 
60
- # Annotating the image with the detections (optional)
61
- label_annotator = sv.LabelAnnotator()
62
  box_annotator = sv.BoxAnnotator()
 
63
 
64
  annotated_image = box_annotator.annotate(
65
- scene=image.copy(), detections=result.object_prediction_list)
66
 
67
  annotated_image = label_annotator.annotate(
68
- scene=annotated_image, detections=result.object_prediction_list)
69
 
70
  # Save the annotated image
 
71
  annotated_image.save(output_image_path)
72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  except requests.exceptions.HTTPError as http_err:
74
- # Menangani kesalahan HTTP
75
  result_text = f"HTTP error occurred: {http_err}"
76
- output_image_path = temp_file_path # Kembalikan gambar asli jika terjadi error
77
  except Exception as err:
78
- # Menangani kesalahan lain
79
  result_text = f"An error occurred: {err}"
80
- output_image_path = temp_file_path # Kembalikan gambar asli jika terjadi error
81
 
82
- # Hapus file sementara setelah prediksi
83
  os.remove(temp_file_path)
84
 
85
  return output_image_path, result_text
86
 
87
- # Membuat antarmuka Gradio dengan tata letak fleksibel
88
  with gr.Blocks() as iface:
89
  with gr.Row():
90
  with gr.Column():
91
  input_image = gr.Image(type="pil", label="Input Image")
92
  with gr.Column():
93
- output_image = gr.Image(label="Detect Object")
94
  with gr.Column():
95
- output_text = gr.Textbox(label="Counting Object")
96
 
97
- # Tombol untuk memproses input
98
- detect_button = gr.Button("Detect")
99
 
100
- # Hubungkan tombol dengan fungsi deteksi
101
  detect_button.click(
102
  fn=detect_objects,
103
  inputs=input_image,
104
  outputs=[output_image, output_text]
105
  )
106
 
107
- # Menjalankan antarmuka
108
  iface.launch()
 
27
  temp_file_path = temp_file.name
28
 
29
  try:
30
+ # Perform sliced inference with SAHI using InferenceSlicer
31
+ def callback(image_slice: np.ndarray) -> sv.Detections:
32
+ results = model.infer(image_slice)[0] # Perform inference on each slice
33
+ return sv.Detections.from_inference(results)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
+ # Configure the SAHI Slicer with specific slice dimensions and overlap
36
+ slicer = sv.InferenceSlicer(
37
+ callback=callback,
38
+ slice_wh=(320, 320), # Adjust slice dimensions as needed
39
+ overlap_wh=(0.2, 0.2), # Adjust overlap ratio for better results
40
+ overlap_filter=sv.OverlapFilter.NON_MAX_SUPPRESSION, # Filter overlapping detections
41
+ iou_threshold=0.5, # Intersection over Union threshold for NMS
42
+ )
43
 
44
+ # Run slicing-based inference
45
+ detections = slicer(image)
 
 
46
 
47
+ # Annotate the results on the image
 
48
  box_annotator = sv.BoxAnnotator()
49
+ label_annotator = sv.LabelAnnotator()
50
 
51
  annotated_image = box_annotator.annotate(
52
+ scene=image.copy(), detections=detections)
53
 
54
  annotated_image = label_annotator.annotate(
55
+ scene=annotated_image, detections=detections)
56
 
57
  # Save the annotated image
58
+ output_image_path = "/tmp/prediction_visual.png"
59
  annotated_image.save(output_image_path)
60
 
61
+ # Count the number of detected objects per class
62
+ class_count = {}
63
+ total_count = 0
64
+
65
+ for prediction in detections:
66
+ class_name = prediction.class_id # or prediction.class_name if available
67
+ class_count[class_name] = class_count.get(class_name, 0) + 1
68
+ total_count += 1 # Increment the total object count
69
+
70
+ # Create a result text with object counts
71
+ result_text = "Detected Objects:\n\n"
72
+ for class_name, count in class_count.items():
73
+ result_text += f"{class_name}: {count}\n"
74
+ result_text += f"\nTotal objects detected: {total_count}"
75
+
76
  except requests.exceptions.HTTPError as http_err:
77
+ # Handle HTTP errors
78
  result_text = f"HTTP error occurred: {http_err}"
79
+ output_image_path = temp_file_path # Return the original image in case of error
80
  except Exception as err:
81
+ # Handle other errors
82
  result_text = f"An error occurred: {err}"
83
+ output_image_path = temp_file_path # Return the original image in case of error
84
 
85
+ # Clean up temporary files
86
  os.remove(temp_file_path)
87
 
88
  return output_image_path, result_text
89
 
90
+ # Create the Gradio interface
91
  with gr.Blocks() as iface:
92
  with gr.Row():
93
  with gr.Column():
94
  input_image = gr.Image(type="pil", label="Input Image")
95
  with gr.Column():
96
+ output_image = gr.Image(label="Detected Objects")
97
  with gr.Column():
98
+ output_text = gr.Textbox(label="Object Count")
99
 
100
+ # Button to trigger object detection
101
+ detect_button = gr.Button("Detect Objects")
102
 
103
+ # Link the button to the detect_objects function
104
  detect_button.click(
105
  fn=detect_objects,
106
  inputs=input_image,
107
  outputs=[output_image, output_text]
108
  )
109
 
110
+ # Launch the interface
111
  iface.launch()