muhammadsalmanalfaridzi commited on
Commit
0fc2ac3
·
verified ·
1 Parent(s): 64c6f6f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -38
app.py CHANGED
@@ -1,55 +1,115 @@
1
  import gradio as gr
2
- import supervision as sv
3
  import numpy as np
4
  import cv2
5
- from inference import get_roboflow_model
6
-
7
- # Define the Roboflow model
8
- model = get_roboflow_model(model_id="nescafe-4base/46", api_key="Otg64Ra6wNOgDyjuhMYU")
 
 
9
 
10
- def callback(image_slice: np.ndarray) -> sv.Detections:
11
- # Perform inference on the image slice
12
- results = model.infer(image_slice)[0]
13
- return sv.Detections.from_inference(results) # Wrap inference results in the proper supervision format
 
 
14
 
15
- # Define the slicer
16
- slicer = sv.InferenceSlicer(callback=callback)
 
 
17
 
18
  def detect_objects(image):
19
- image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # Convert from RGB (Gradio) to BGR (OpenCV)
 
 
 
20
 
21
- # Run inference
22
- sliced_detections = slicer(image=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- # Annotating the image with boxes and labels
25
- label_annotator = sv.LabelAnnotator()
26
- box_annotator = sv.BoxAnnotator()
 
 
 
 
 
27
 
28
- annotated_image = box_annotator.annotate(scene=image.copy(), detections=sliced_detections)
29
- annotated_image = label_annotator.annotate(scene=annotated_image, detections=sliced_detections)
 
30
 
31
- # Count detected objects per class
32
- class_counts = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
- # Loop through the detections, which should now be in the correct format
35
- for detection in sliced_detections:
36
- class_name = detection.class_name # Now `detection` should be a detection object with class_name
37
- class_counts[class_name] = class_counts.get(class_name, 0) + 1
38
 
39
- # Total objects detected
40
- total_count = sum(class_counts.values())
 
 
 
 
 
 
 
 
 
41
 
42
- # Display results: annotated image and object counts
43
- result_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) # Convert back to RGB for Gradio
44
- return result_image, class_counts, total_count
45
-
46
- # Create a Gradio interface
47
- iface = gr.Interface(
48
- fn=detect_objects,
49
- inputs=gr.Image(type="pil"),
50
- outputs=[gr.Image(type="pil"), gr.JSON(), gr.Number(label="Total Objects Detected")],
51
- live=True
52
- )
53
 
54
  # Launch the Gradio interface
55
  iface.launch()
 
1
  import gradio as gr
 
2
  import numpy as np
3
  import cv2
4
+ import supervision as sv
5
+ from roboflow import Roboflow
6
+ import tempfile
7
+ import os
8
+ import requests
9
+ from dotenv import load_dotenv
10
 
11
+ # Load environment variables from .env file
12
+ load_dotenv()
13
+ api_key = os.getenv("ROBOFLOW_API_KEY")
14
+ workspace = os.getenv("ROBOFLOW_WORKSPACE")
15
+ project_name = os.getenv("ROBOFLOW_PROJECT")
16
+ model_version = int(os.getenv("ROBOFLOW_MODEL_VERSION"))
17
 
18
+ # Initialize Roboflow with the API key
19
+ rf = Roboflow(api_key=api_key)
20
+ project = rf.workspace(workspace).project(project_name)
21
+ model = project.version(model_version).model
22
 
23
  def detect_objects(image):
24
+ # Save the uploaded image to a temporary file
25
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
26
+ image.save(temp_file, format="JPEG")
27
+ temp_file_path = temp_file.name
28
 
29
+ try:
30
+ # Perform inference on the uploaded image using the Roboflow model
31
+ predictions = model.predict(temp_file_path, confidence=60, overlap=80).json()
32
+
33
+ # Initialize Supervision annotations
34
+ detections = []
35
+ for prediction in predictions['predictions']:
36
+ # Get bounding box and class for each prediction
37
+ bbox = prediction['bbox']
38
+ class_name = prediction['class']
39
+ confidence = prediction['confidence']
40
+
41
+ # Add detection to Supervision Detections list
42
+ detections.append(
43
+ sv.Detection(
44
+ x1=bbox[0],
45
+ y1=bbox[1],
46
+ x2=bbox[2],
47
+ y2=bbox[3],
48
+ confidence=confidence,
49
+ class_name=class_name
50
+ )
51
+ )
52
+
53
+ # Convert detections to a Detections object for Supervision
54
+ detections = sv.Detections(detections)
55
 
56
+ # Annotate the image with bounding boxes and labels
57
+ label_annotator = sv.LabelAnnotator()
58
+ box_annotator = sv.BoxAnnotator()
59
+
60
+ # Read the image back for OpenCV processing
61
+ image_cv = cv2.imread(temp_file_path)
62
+ annotated_image = box_annotator.annotate(scene=image_cv.copy(), detections=detections)
63
+ annotated_image = label_annotator.annotate(scene=annotated_image, detections=detections)
64
 
65
+ # Count detected objects per class
66
+ class_count = {}
67
+ total_count = 0
68
 
69
+ for detection in detections:
70
+ class_name = detection.class_name
71
+ class_count[class_name] = class_count.get(class_name, 0) + 1
72
+ total_count += 1
73
+
74
+ # Prepare result text
75
+ result_text = "Detected Objects:\n\n"
76
+ for class_name, count in class_count.items():
77
+ result_text += f"{class_name}: {count}\n"
78
+ result_text += f"\nTotal objects detected: {total_count}"
79
+
80
+ # Save the annotated image as output
81
+ output_image_path = "/tmp/prediction.jpg"
82
+ cv2.imwrite(output_image_path, annotated_image)
83
+
84
+ except requests.exceptions.HTTPError as http_err:
85
+ result_text = f"HTTP error occurred: {http_err}"
86
+ output_image_path = temp_file_path # Return original image on error
87
+ except Exception as err:
88
+ result_text = f"An error occurred: {err}"
89
+ output_image_path = temp_file_path # Return original image on error
90
+
91
+ # Clean up by removing the temporary file
92
+ os.remove(temp_file_path)
93
 
94
+ return output_image_path, result_text
 
 
 
95
 
96
+ # Gradio interface
97
+ with gr.Blocks() as iface:
98
+ with gr.Row():
99
+ with gr.Column():
100
+ input_image = gr.Image(type="pil", label="Input Image")
101
+ with gr.Column():
102
+ output_image = gr.Image(label="Detected Image")
103
+ with gr.Column():
104
+ output_text = gr.Textbox(label="Object Count Results")
105
+
106
+ detect_button = gr.Button("Detect")
107
 
108
+ detect_button.click(
109
+ fn=detect_objects,
110
+ inputs=input_image,
111
+ outputs=[output_image, output_text]
112
+ )
 
 
 
 
 
 
113
 
114
  # Launch the Gradio interface
115
  iface.launch()