muhammadsalmanalfaridzi commited on
Commit
7f8fbdb
·
verified ·
1 Parent(s): 368870b

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -169
app.py DELETED
@@ -1,169 +0,0 @@
1
- import gradio as gr
2
- from roboflow import Roboflow
3
- import tempfile
4
- import os
5
- from sahi.slicing import slice_image
6
- import numpy as np
7
- import cv2
8
- from PIL import Image, ImageDraw
9
-
10
- # Initialize Roboflow
11
- rf = Roboflow(api_key="Otg64Ra6wNOgDyjuhMYU")
12
- project = rf.workspace("alat-pelindung-diri").project("nescafe-4base")
13
- model = project.version(20).model
14
-
15
- def apply_nms(predictions, iou_threshold=0.5):
16
- boxes = []
17
- scores = []
18
- classes = []
19
-
20
- # Extract boxes, scores, and class info
21
- for prediction in predictions:
22
- # Construct the bounding box from x, y, width, height
23
- x = prediction['x']
24
- y = prediction['y']
25
- width = prediction['width']
26
- height = prediction['height']
27
- box = [x, y, width, height]
28
-
29
- boxes.append(box)
30
- scores.append(prediction['confidence'])
31
- classes.append(prediction['class'])
32
-
33
- boxes = np.array(boxes)
34
- scores = np.array(scores)
35
- classes = np.array(classes)
36
-
37
- # Perform NMS using OpenCV
38
- indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), score_threshold=0.25, nms_threshold=iou_threshold)
39
-
40
- print(f"Predictions before NMS: {predictions}")
41
- print(f"Indices after NMS: {indices}")
42
-
43
- # Check if indices is empty or invalid
44
- if indices is None or len(indices) == 0:
45
- print("No valid indices returned from NMS.")
46
- return [] # Return an empty list if no valid indices are found
47
-
48
- # Flatten indices array (if returned as a tuple)
49
- indices = indices.flatten()
50
-
51
- nms_predictions = []
52
-
53
- for i in indices:
54
- nms_predictions.append({
55
- 'class': classes[i],
56
- 'bbox': boxes[i], # Now using the constructed box
57
- 'confidence': scores[i]
58
- })
59
-
60
- return nms_predictions
61
-
62
- # Detect objects and annotate the image
63
- def detect_objects(image):
64
- # Save the image temporarily
65
- with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
66
- image.save(temp_file, format="JPEG")
67
- temp_file_path = temp_file.name
68
-
69
- # Slice the image into smaller pieces
70
- slice_image_result = slice_image(
71
- image=temp_file_path,
72
- output_file_name="sliced_image",
73
- output_dir="/tmp/sliced/",
74
- slice_height=256,
75
- slice_width=256,
76
- overlap_height_ratio=0.1,
77
- overlap_width_ratio=0.1
78
- )
79
-
80
- # Print to check the available attributes of the slice_image_result object
81
- print(f"Slice result: {slice_image_result}")
82
-
83
- # Try accessing the sliced image paths from the result object
84
- try:
85
- sliced_image_paths = slice_image_result.sliced_image_paths # Assuming this is the correct attribute
86
- print(f"Sliced image paths: {sliced_image_paths}")
87
- except AttributeError:
88
- print("Failed to access sliced_image_paths attribute.")
89
- sliced_image_paths = []
90
-
91
- # Check predictions for the whole image first
92
- print("Predicting on the whole image (without slicing)...")
93
- whole_image_predictions = model.predict(image_path=temp_file_path).json()
94
- print(f"Whole image predictions: {whole_image_predictions}")
95
-
96
- # If there are predictions, return them
97
- if whole_image_predictions['predictions']:
98
- print("Using predictions from the whole image.")
99
- all_predictions = whole_image_predictions['predictions']
100
- else:
101
- print("No predictions found for the whole image. Predicting on slices...")
102
- # If no predictions for the whole image, predict on slices
103
- all_predictions = []
104
-
105
- for sliced_image_path in sliced_image_paths:
106
- if isinstance(sliced_image_path, str):
107
- predictions = model.predict(image_path=sliced_image_path).json()
108
- all_predictions.extend(predictions['predictions'])
109
- else:
110
- print(f"Skipping invalid image path: {sliced_image_path}")
111
-
112
- # Apply NMS to remove duplicate detections
113
- postprocessed_predictions = apply_nms(all_predictions, iou_threshold=0.5)
114
-
115
- # Annotate the image with prediction results using OpenCV
116
- img = cv2.imread(temp_file_path)
117
- for prediction in postprocessed_predictions:
118
- class_name = prediction['class']
119
- bbox = prediction['bbox']
120
- confidence = prediction['confidence']
121
-
122
- # Unpack the bounding box coordinates
123
- x, y, w, h = map(int, bbox)
124
-
125
- # Draw the bounding box and label on the image
126
- color = (0, 255, 0) # Green color for the box
127
- thickness = 2
128
- cv2.rectangle(img, (x, y), (x + w, y + h), color, thickness)
129
-
130
- label = f"{class_name}: {confidence:.2f}"
131
- cv2.putText(img, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness)
132
-
133
- # Convert the image from BGR to RGB for PIL compatibility
134
- img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
135
- annotated_image = Image.fromarray(img_rgb)
136
-
137
- # Save the annotated image
138
- output_image_path = "/tmp/prediction.jpg"
139
- annotated_image.save(output_image_path)
140
-
141
- # Count objects per class
142
- class_count = {}
143
- for detection in postprocessed_predictions:
144
- class_name = detection['class']
145
- if class_name in class_count:
146
- class_count[class_name] += 1
147
- else:
148
- class_count[class_name] = 1
149
-
150
- # Object count result
151
- result_text = "Jumlah objek per kelas:\n"
152
- for class_name, count in class_count.items():
153
- result_text += f"{class_name}: {count} objek\n"
154
-
155
- # Remove temporary file
156
- os.remove(temp_file_path)
157
-
158
- return output_image_path, result_text
159
-
160
- # Gradio interface
161
- iface = gr.Interface(
162
- fn=detect_objects, # Function called when image is uploaded
163
- inputs=gr.Image(type="pil"), # Input is an image
164
- outputs=[gr.Image(), gr.Textbox()], # Output is an image and text
165
- live=True # Display results live
166
- )
167
-
168
- # Run the interface
169
- iface.launch()