Spaces:
Build error
Build error
Delete app.py
Browse files
app.py
DELETED
|
@@ -1,282 +0,0 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
from dotenv import load_dotenv
|
| 3 |
-
from roboflow import Roboflow
|
| 4 |
-
import tempfile
|
| 5 |
-
import os
|
| 6 |
-
import requests
|
| 7 |
-
import cv2
|
| 8 |
-
import numpy as np
|
| 9 |
-
import subprocess
|
| 10 |
-
|
| 11 |
-
# ========== Konfigurasi ==========
|
| 12 |
-
load_dotenv()
|
| 13 |
-
|
| 14 |
-
# Roboflow Config
|
| 15 |
-
rf_api_key = os.getenv("ROBOFLOW_API_KEY")
|
| 16 |
-
workspace = os.getenv("ROBOFLOW_WORKSPACE")
|
| 17 |
-
project_name = os.getenv("ROBOFLOW_PROJECT")
|
| 18 |
-
model_version = int(os.getenv("ROBOFLOW_MODEL_VERSION"))
|
| 19 |
-
|
| 20 |
-
# OWLv2 Config
|
| 21 |
-
OWLV2_API_KEY = os.getenv("COUNTGD_API_KEY")
|
| 22 |
-
OWLV2_PROMPTS = ["bottle", "tetra pak","cans", "carton drink"]
|
| 23 |
-
|
| 24 |
-
# Inisialisasi Model YOLO
|
| 25 |
-
rf = Roboflow(api_key=rf_api_key)
|
| 26 |
-
project = rf.workspace(workspace).project(project_name)
|
| 27 |
-
yolo_model = project.version(model_version).model
|
| 28 |
-
|
| 29 |
-
# ========== Fungsi Deteksi Kombinasi ==========
|
| 30 |
-
from PIL import Image
|
| 31 |
-
|
| 32 |
-
# Fungsi untuk deteksi dengan resize
|
| 33 |
-
from PIL import Image
|
| 34 |
-
|
| 35 |
-
# Fungsi untuk deteksi dengan resize
|
| 36 |
-
def detect_combined(image):
|
| 37 |
-
# Simpan gambar input ke file sementara
|
| 38 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
|
| 39 |
-
image.save(temp_file, format="JPEG")
|
| 40 |
-
temp_path = temp_file.name
|
| 41 |
-
|
| 42 |
-
try:
|
| 43 |
-
# Simpan dimensi asli untuk scaling
|
| 44 |
-
original_width, original_height = image.size
|
| 45 |
-
|
| 46 |
-
# Resize gambar input menjadi 640x640
|
| 47 |
-
img = Image.open(temp_path)
|
| 48 |
-
img = img.resize((640, 640), Image.Resampling.LANCZOS) # Ganti ANTIALIAS dengan LANCZOS
|
| 49 |
-
img.save(temp_path, format="JPEG")
|
| 50 |
-
|
| 51 |
-
# ========== [1] YOLO: Deteksi Produk Nestlé (Per Class) ==========
|
| 52 |
-
yolo_pred = yolo_model.predict(temp_path, confidence=50, overlap=80).json()
|
| 53 |
-
|
| 54 |
-
# Hitung per class Nestlé dan simpan bounding box (format: (x_center, y_center, width, height))
|
| 55 |
-
nestle_class_count = {}
|
| 56 |
-
nestle_boxes = []
|
| 57 |
-
for pred in yolo_pred['predictions']:
|
| 58 |
-
class_name = pred['class']
|
| 59 |
-
nestle_class_count[class_name] = nestle_class_count.get(class_name, 0) + 1
|
| 60 |
-
nestle_boxes.append((pred['x'], pred['y'], pred['width'], pred['height']))
|
| 61 |
-
|
| 62 |
-
total_nestle = sum(nestle_class_count.values())
|
| 63 |
-
|
| 64 |
-
# ========== [2] OWLv2: Deteksi Kompetitor ==========
|
| 65 |
-
headers = {
|
| 66 |
-
"Authorization": "Basic " + OWLV2_API_KEY,
|
| 67 |
-
}
|
| 68 |
-
data = {
|
| 69 |
-
"prompts": OWLV2_PROMPTS,
|
| 70 |
-
"model": "owlv2",
|
| 71 |
-
"confidence": 0.25
|
| 72 |
-
}
|
| 73 |
-
with open(temp_path, "rb") as f:
|
| 74 |
-
files = {"image": f}
|
| 75 |
-
response = requests.post("https://api.landing.ai/v1/tools/text-to-object-detection", files=files, data=data, headers=headers)
|
| 76 |
-
result = response.json()
|
| 77 |
-
owlv2_objects = result['data'][0] if 'data' in result else []
|
| 78 |
-
|
| 79 |
-
competitor_class_count = {}
|
| 80 |
-
competitor_boxes = []
|
| 81 |
-
for obj in owlv2_objects:
|
| 82 |
-
if 'bounding_box' in obj:
|
| 83 |
-
bbox = obj['bounding_box'] # Format: [x1, y1, x2, y2]
|
| 84 |
-
if not is_overlap(bbox, nestle_boxes):
|
| 85 |
-
class_name = obj.get('label', 'unknown').strip().lower()
|
| 86 |
-
competitor_class_count[class_name] = competitor_class_count.get(class_name, 0) + 1
|
| 87 |
-
competitor_boxes.append({
|
| 88 |
-
"class": class_name,
|
| 89 |
-
"box": bbox,
|
| 90 |
-
"confidence": obj.get("score", 0)
|
| 91 |
-
})
|
| 92 |
-
|
| 93 |
-
total_competitor = sum(competitor_class_count.values())
|
| 94 |
-
|
| 95 |
-
# ========== [3] Format Output ==========
|
| 96 |
-
result_text = "Product Nestle\n\n"
|
| 97 |
-
for class_name, count in nestle_class_count.items():
|
| 98 |
-
result_text += f"{class_name}: {count}\n"
|
| 99 |
-
result_text += f"\nTotal Products Nestle: {total_nestle}\n\n"
|
| 100 |
-
if competitor_class_count:
|
| 101 |
-
result_text += f"Total Unclassified Products: {total_competitor}\n"
|
| 102 |
-
else:
|
| 103 |
-
result_text += "No Unclassified Products detected\n"
|
| 104 |
-
|
| 105 |
-
# ========== [4] Visualisasi ==========
|
| 106 |
-
img = cv2.imread(temp_path)
|
| 107 |
-
|
| 108 |
-
# Gambar bounding box untuk produk Nestlé (Hijau)
|
| 109 |
-
for pred in yolo_pred['predictions']:
|
| 110 |
-
x, y, w, h = pred['x'], pred['y'], pred['width'], pred['height']
|
| 111 |
-
x1 = int(x - w/2)
|
| 112 |
-
y1 = int(y - h/2)
|
| 113 |
-
x2 = int(x + w/2)
|
| 114 |
-
y2 = int(y + h/2)
|
| 115 |
-
|
| 116 |
-
# Scale bounding box to original size
|
| 117 |
-
scale_x = original_width / 640
|
| 118 |
-
scale_y = original_height / 640
|
| 119 |
-
x1_original = int(x1 * scale_x)
|
| 120 |
-
y1_original = int(y1 * scale_y)
|
| 121 |
-
x2_original = int(x2 * scale_x)
|
| 122 |
-
y2_original = int(y2 * scale_y)
|
| 123 |
-
|
| 124 |
-
cv2.rectangle(img, (x1_original, y1_original), (x2_original, y2_original), (0, 255, 0), 2)
|
| 125 |
-
cv2.putText(img, pred['class'], (x1_original, y1_original - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
|
| 126 |
-
|
| 127 |
-
# Gambar bounding box untuk kompetitor (Merah)
|
| 128 |
-
for comp in competitor_boxes:
|
| 129 |
-
x1, y1, x2, y2 = comp['box']
|
| 130 |
-
# Scale bounding box to original size
|
| 131 |
-
x1_original = int(x1 * scale_x)
|
| 132 |
-
y1_original = int(y1 * scale_y)
|
| 133 |
-
x2_original = int(x2 * scale_x)
|
| 134 |
-
y2_original = int(y2 * scale_y)
|
| 135 |
-
|
| 136 |
-
cv2.rectangle(img, (x1_original, y1_original), (x2_original, y2_original), (0, 0, 255), 2)
|
| 137 |
-
cv2.putText(img, f"{comp['class']} {comp['confidence']:.2f}", (x1_original, y1_original - 10),
|
| 138 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
| 139 |
-
|
| 140 |
-
output_path = "/tmp/combined_output.jpg"
|
| 141 |
-
cv2.imwrite(output_path, img)
|
| 142 |
-
|
| 143 |
-
return output_path, result_text
|
| 144 |
-
|
| 145 |
-
except Exception as e:
|
| 146 |
-
return temp_path, f"Error: {str(e)}"
|
| 147 |
-
finally:
|
| 148 |
-
os.remove(temp_path)
|
| 149 |
-
|
| 150 |
-
def is_overlap(box1, boxes2, threshold=0.3):
|
| 151 |
-
"""
|
| 152 |
-
Fungsi untuk mendeteksi overlap bounding box.
|
| 153 |
-
Parameter:
|
| 154 |
-
- box1: Bounding box pertama dengan format (x1, y1, x2, y2)
|
| 155 |
-
- boxes2: List bounding box lainnya dengan format (x_center, y_center, width, height)
|
| 156 |
-
"""
|
| 157 |
-
x1_min, y1_min, x1_max, y1_max = box1
|
| 158 |
-
for b2 in boxes2:
|
| 159 |
-
x2, y2, w2, h2 = b2
|
| 160 |
-
x2_min = x2 - w2/2
|
| 161 |
-
x2_max = x2 + w2/2
|
| 162 |
-
y2_min = y2 - h2/2
|
| 163 |
-
y2_max = y2 + h2/2
|
| 164 |
-
|
| 165 |
-
dx = min(x1_max, x2_max) - max(x1_min, x2_min)
|
| 166 |
-
dy = min(y1_max, y2_max) - max(y1_min, y2_min)
|
| 167 |
-
if (dx >= 0) and (dy >= 0):
|
| 168 |
-
area_overlap = dx * dy
|
| 169 |
-
area_box1 = (x1_max - x1_min) * (y1_max - y1_min)
|
| 170 |
-
if area_overlap / area_box1 > threshold:
|
| 171 |
-
return True
|
| 172 |
-
return False
|
| 173 |
-
|
| 174 |
-
# ========== Fungsi untuk Deteksi Video ==========
|
| 175 |
-
def convert_video_to_mp4(input_path, output_path):
|
| 176 |
-
try:
|
| 177 |
-
subprocess.run(['ffmpeg', '-i', input_path, '-vcodec', 'libx264', '-acodec', 'aac', output_path], check=True)
|
| 178 |
-
return output_path
|
| 179 |
-
except subprocess.CalledProcessError as e:
|
| 180 |
-
return None, f"Error converting video: {e}"
|
| 181 |
-
|
| 182 |
-
def detect_objects_in_video(video_path):
|
| 183 |
-
temp_output_path = "/tmp/output_video.mp4"
|
| 184 |
-
temp_frames_dir = tempfile.mkdtemp()
|
| 185 |
-
all_class_count = {} # Untuk menyimpan total hitungan objek dari semua frame
|
| 186 |
-
nestle_total = 0
|
| 187 |
-
frame_count = 0
|
| 188 |
-
|
| 189 |
-
try:
|
| 190 |
-
# Convert video ke MP4 jika perlu
|
| 191 |
-
if not video_path.endswith(".mp4"):
|
| 192 |
-
video_path, err = convert_video_to_mp4(video_path, temp_output_path)
|
| 193 |
-
if not video_path:
|
| 194 |
-
return None, f"Video conversion error: {err}"
|
| 195 |
-
|
| 196 |
-
# Membaca dan memproses frame video
|
| 197 |
-
video = cv2.VideoCapture(video_path)
|
| 198 |
-
frame_rate = int(video.get(cv2.CAP_PROP_FPS))
|
| 199 |
-
frame_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 200 |
-
frame_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 201 |
-
frame_size = (frame_width, frame_height)
|
| 202 |
-
|
| 203 |
-
# VideoWriter untuk output video
|
| 204 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 205 |
-
output_video = cv2.VideoWriter(temp_output_path, fourcc, frame_rate, frame_size)
|
| 206 |
-
|
| 207 |
-
while True:
|
| 208 |
-
ret, frame = video.read()
|
| 209 |
-
if not ret:
|
| 210 |
-
break
|
| 211 |
-
|
| 212 |
-
# Simpan frame untuk prediksi
|
| 213 |
-
frame_path = os.path.join(temp_frames_dir, f"frame_{frame_count}.jpg")
|
| 214 |
-
cv2.imwrite(frame_path, frame)
|
| 215 |
-
|
| 216 |
-
# Proses prediksi untuk frame
|
| 217 |
-
predictions = yolo_model.predict(frame_path, confidence=60, overlap=80).json()
|
| 218 |
-
|
| 219 |
-
# Update hitungan objek untuk frame ini
|
| 220 |
-
frame_class_count = {}
|
| 221 |
-
for prediction in predictions['predictions']:
|
| 222 |
-
class_name = prediction['class']
|
| 223 |
-
frame_class_count[class_name] = frame_class_count.get(class_name, 0) + 1
|
| 224 |
-
cv2.rectangle(frame, (int(prediction['x'] - prediction['width']/2),
|
| 225 |
-
int(prediction['y'] - prediction['height']/2)),
|
| 226 |
-
(int(prediction['x'] + prediction['width']/2),
|
| 227 |
-
int(prediction['y'] + prediction['height']/2)),
|
| 228 |
-
(0, 255, 0), 2)
|
| 229 |
-
cv2.putText(frame, class_name, (int(prediction['x'] - prediction['width']/2),
|
| 230 |
-
int(prediction['y'] - prediction['height']/2 - 10)),
|
| 231 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
|
| 232 |
-
|
| 233 |
-
# Update hitungan kumulatif
|
| 234 |
-
for class_name, count in frame_class_count.items():
|
| 235 |
-
all_class_count[class_name] = all_class_count.get(class_name, 0) + count
|
| 236 |
-
|
| 237 |
-
nestle_total = sum(all_class_count.values())
|
| 238 |
-
|
| 239 |
-
# Overlay teks hitungan pada frame
|
| 240 |
-
count_text = "Cumulative Object Counts\n"
|
| 241 |
-
for class_name, count in all_class_count.items():
|
| 242 |
-
count_text += f"{class_name}: {count}\n"
|
| 243 |
-
count_text += f"\nTotal Product Nestlé: {nestle_total}"
|
| 244 |
-
|
| 245 |
-
y_offset = 20
|
| 246 |
-
for line in count_text.split("\n"):
|
| 247 |
-
cv2.putText(frame, line, (10, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
|
| 248 |
-
y_offset += 30
|
| 249 |
-
|
| 250 |
-
output_video.write(frame)
|
| 251 |
-
frame_count += 1
|
| 252 |
-
|
| 253 |
-
video.release()
|
| 254 |
-
output_video.release()
|
| 255 |
-
|
| 256 |
-
return temp_output_path
|
| 257 |
-
|
| 258 |
-
except Exception as e:
|
| 259 |
-
return None, f"An error occurred: {e}"
|
| 260 |
-
|
| 261 |
-
# ========== Gradio Interface ==========
|
| 262 |
-
with gr.Blocks(theme=gr.themes.Base(primary_hue="teal", secondary_hue="teal", neutral_hue="slate")) as iface:
|
| 263 |
-
gr.Markdown("""<div style="text-align: center;"><h1>NESTLE - STOCK COUNTING</h1></div>""")
|
| 264 |
-
with gr.Row():
|
| 265 |
-
with gr.Column():
|
| 266 |
-
input_image = gr.Image(type="pil", label="Input Image")
|
| 267 |
-
with gr.Column():
|
| 268 |
-
output_image = gr.Image(label="Detect Object")
|
| 269 |
-
with gr.Column():
|
| 270 |
-
output_text = gr.Textbox(label="Counting Object")
|
| 271 |
-
|
| 272 |
-
# Tombol untuk memproses input
|
| 273 |
-
detect_button = gr.Button("Detect")
|
| 274 |
-
|
| 275 |
-
# Hubungkan tombol dengan fungsi deteksi
|
| 276 |
-
detect_button.click(
|
| 277 |
-
fn=detect_combined,
|
| 278 |
-
inputs=input_image,
|
| 279 |
-
outputs=[output_image, output_text]
|
| 280 |
-
)
|
| 281 |
-
|
| 282 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|