muhammadsalmanalfaridzi commited on
Commit
a1e8b35
·
verified ·
1 Parent(s): 5f4e276

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -90
app.py CHANGED
@@ -1,108 +1,72 @@
1
  import gradio as gr
 
 
 
 
2
  from dotenv import load_dotenv
3
- from roboflow import Roboflow
4
- import tempfile
5
  import os
6
- import requests
7
- from PIL import Image
8
 
9
- # Muat variabel lingkungan dari file .env
10
  load_dotenv()
11
  api_key = os.getenv("ROBOFLOW_API_KEY")
12
- workspace = os.getenv("ROBOFLOW_WORKSPACE")
13
- project_name = os.getenv("ROBOFLOW_PROJECT")
14
- model_version = int(os.getenv("ROBOFLOW_MODEL_VERSION"))
15
-
16
- # Inisialisasi Roboflow menggunakan data yang diambil dari secrets
17
- rf = Roboflow(api_key=api_key)
18
- project = rf.workspace(workspace).project(project_name)
19
- model = project.version(model_version).model
20
-
21
- # Fungsi untuk memotong gambar menjadi potongan-potongan kecil
22
- def slice_image(image, slice_size=512, overlap=0):
23
- width, height = image.size
24
- slices = []
25
-
26
- step = slice_size - overlap
27
-
28
- for top in range(0, height, step):
29
- for left in range(0, width, step):
30
- bottom = min(top + slice_size, height)
31
- right = min(left + slice_size, width)
32
- slices.append((left, top, right, bottom))
33
-
34
- return slices
35
-
36
- # Fungsi untuk menangani input dan output gambar
37
- def detect_objects(image):
38
- slice_size = 512
39
- overlap = 50
40
-
41
- # Potong gambar menjadi bagian kecil
42
- slices = slice_image(image, slice_size, overlap)
43
- results = []
44
- class_count = {}
45
- total_count = 0
46
-
47
- for i, (left, top, right, bottom) in enumerate(slices):
48
- sliced_image = image.crop((left, top, right, bottom))
49
-
50
- # Simpan gambar slice sementara
51
- with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
52
- sliced_image.save(temp_file, format="JPEG")
53
- temp_file_path = temp_file.name
54
-
55
- try:
56
- # Lakukan prediksi pada setiap slice
57
- predictions = model.predict(temp_file_path, confidence=60, overlap=80).json()
58
-
59
- for prediction in predictions['predictions']:
60
- prediction["left"] += left
61
- prediction["top"] += top
62
- prediction["right"] += left
63
- prediction["bottom"] += top
64
-
65
- results.append(prediction)
66
-
67
- # Perbarui jumlah objek per kelas
68
- class_name = prediction['class']
69
- class_count[class_name] = class_count.get(class_name, 0) + 1
70
- total_count += 1
71
- except requests.exceptions.HTTPError as http_err:
72
- return f"HTTP error occurred: {http_err}", None
73
- except Exception as err:
74
- return f"An error occurred: {err}", None
75
- finally:
76
- os.remove(temp_file_path)
77
-
78
- # Gabungkan hasil deteksi
79
- result_text = "Product Nestle\n\n"
80
- for class_name, count in class_count.items():
81
  result_text += f"{class_name}: {count}\n"
82
- result_text += f"\nTotal Product Nestle: {total_count}"
83
 
84
- # Kembalikan hasil
85
- return image, result_text
86
 
87
- # Membuat antarmuka Gradio dengan tata letak fleksibel
88
- with gr.Blocks() as iface:
89
  with gr.Row():
90
  with gr.Column():
91
- input_image = gr.Image(type="pil", label="Input Image")
 
92
  with gr.Column():
93
- output_image = gr.Image(label="Detect Object")
94
- with gr.Column():
95
- output_text = gr.Textbox(label="Counting Object")
96
-
97
- # Tombol untuk memproses input
98
- detect_button = gr.Button("Detect")
99
 
100
- # Hubungkan tombol dengan fungsi deteksi
101
  detect_button.click(
102
- fn=detect_objects,
103
  inputs=input_image,
104
  outputs=[output_image, output_text]
105
  )
106
 
107
- # Menjalankan antarmuka
108
- iface.launch()
 
1
  import gradio as gr
2
+ import supervision as sv
3
+ import numpy as np
4
+ import cv2
5
+ from inference import get_roboflow_model
6
  from dotenv import load_dotenv
 
 
7
  import os
 
 
8
 
9
+ # Load environment variables from .env file
10
  load_dotenv()
11
  api_key = os.getenv("ROBOFLOW_API_KEY")
12
+ model_id = os.getenv("ROBOFLOW_MODEL_ID") # Example: "people-detection-general"
13
+ model_version = os.getenv("ROBOFLOW_MODEL_VERSION") # Example: "5"
14
+
15
+ # Initialize the Roboflow model
16
+ model = get_roboflow_model(model_id=f"{model_id}/{model_version}", api_key=api_key)
17
+
18
+ # Callback function for SAHI Slicer
19
+ def callback(image_slice: np.ndarray) -> sv.Detections:
20
+ results = model.infer(image_slice)[0]
21
+ return sv.Detections.from_inference(results)
22
+
23
+ # Object detection function
24
+ def detect_objects_with_sahi(image):
25
+ # Convert Gradio PIL image to NumPy array
26
+ image_np = np.array(image)
27
+
28
+ # Run inference with SAHI Slicer
29
+ slicer = sv.InferenceSlicer(callback=callback)
30
+ sliced_detections = slicer(image=image_np)
31
+
32
+ # Annotate image with detected objects
33
+ label_annotator = sv.LabelAnnotator()
34
+ box_annotator = sv.BoxAnnotator()
35
+ annotated_image = box_annotator.annotate(scene=image_np.copy(), detections=sliced_detections)
36
+ annotated_image = label_annotator.annotate(scene=annotated_image, detections=sliced_detections)
37
+
38
+ # Count objects by class
39
+ class_counts = {}
40
+ for detection in sliced_detections:
41
+ class_name = detection.class_id
42
+ class_counts[class_name] = class_counts.get(class_name, 0) + 1
43
+
44
+ # Create summary text
45
+ total_objects = sum(class_counts.values())
46
+ result_text = "Detected Objects:\n"
47
+ for class_name, count in class_counts.items():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  result_text += f"{class_name}: {count}\n"
49
+ result_text += f"\nTotal Objects: {total_objects}"
50
 
51
+ # Return the annotated image and summary text
52
+ return annotated_image, result_text
53
 
54
+ # Create Gradio interface
55
+ with gr.Blocks() as app:
56
  with gr.Row():
57
  with gr.Column():
58
+ input_image = gr.Image(type="pil", label="Upload Image")
59
+ detect_button = gr.Button("Detect Objects")
60
  with gr.Column():
61
+ output_image = gr.Image(label="Annotated Image")
62
+ output_text = gr.Textbox(label="Object Count Summary", lines=10)
 
 
 
 
63
 
64
+ # Link button to detection function
65
  detect_button.click(
66
+ fn=detect_objects_with_sahi,
67
  inputs=input_image,
68
  outputs=[output_image, output_text]
69
  )
70
 
71
+ # Launch Gradio app
72
+ app.launch()