Update app.py
Browse files
app.py
CHANGED
@@ -35,7 +35,7 @@ def create_monitor_interface():
|
|
35 |
def analyze_frame(self, frame: np.ndarray) -> str:
|
36 |
if frame is None:
|
37 |
return "No frame received"
|
38 |
-
|
39 |
# Convert and resize image
|
40 |
if len(frame.shape) == 2:
|
41 |
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
|
@@ -45,12 +45,12 @@ def create_monitor_interface():
|
|
45 |
frame = self.resize_image(frame)
|
46 |
frame_pil = PILImage.fromarray(frame)
|
47 |
|
48 |
-
#
|
49 |
buffered = io.BytesIO()
|
50 |
frame_pil.save(buffered,
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
55 |
image_url = f"data:image/jpeg;base64,{img_base64}"
|
56 |
|
@@ -58,14 +58,43 @@ def create_monitor_interface():
|
|
58 |
completion = self.client.chat.completions.create(
|
59 |
model=self.model_name,
|
60 |
messages=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
{
|
62 |
"role": "user",
|
63 |
"content": [
|
64 |
{
|
65 |
"type": "text",
|
66 |
-
"text": """Analyze this
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
},
|
70 |
{
|
71 |
"type": "image_url",
|
@@ -74,17 +103,11 @@ def create_monitor_interface():
|
|
74 |
}
|
75 |
}
|
76 |
]
|
77 |
-
},
|
78 |
-
{
|
79 |
-
"role": "assistant",
|
80 |
-
"content": ""
|
81 |
}
|
82 |
],
|
83 |
-
temperature=0.
|
84 |
-
max_tokens=
|
85 |
-
|
86 |
-
stream=False,
|
87 |
-
stop=None
|
88 |
)
|
89 |
return completion.choices[0].message.content
|
90 |
except Exception as e:
|
@@ -92,29 +115,77 @@ def create_monitor_interface():
|
|
92 |
return f"Analysis Error: {str(e)}"
|
93 |
|
94 |
def draw_observations(self, image, observations):
|
|
|
95 |
height, width = image.shape[:2]
|
96 |
font = cv2.FONT_HERSHEY_SIMPLEX
|
97 |
font_scale = 0.5
|
98 |
thickness = 2
|
|
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
109 |
|
110 |
-
|
111 |
-
|
|
|
|
|
112 |
|
113 |
-
#
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
return image
|
120 |
|
|
|
35 |
def analyze_frame(self, frame: np.ndarray) -> str:
|
36 |
if frame is None:
|
37 |
return "No frame received"
|
38 |
+
|
39 |
# Convert and resize image
|
40 |
if len(frame.shape) == 2:
|
41 |
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
|
|
|
45 |
frame = self.resize_image(frame)
|
46 |
frame_pil = PILImage.fromarray(frame)
|
47 |
|
48 |
+
# High quality image for better analysis
|
49 |
buffered = io.BytesIO()
|
50 |
frame_pil.save(buffered,
|
51 |
+
format="JPEG",
|
52 |
+
quality=95,
|
53 |
+
optimize=True)
|
54 |
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
55 |
image_url = f"data:image/jpeg;base64,{img_base64}"
|
56 |
|
|
|
58 |
completion = self.client.chat.completions.create(
|
59 |
model=self.model_name,
|
60 |
messages=[
|
61 |
+
{
|
62 |
+
"role": "system",
|
63 |
+
"content": """You are a comprehensive safety analysis system. Analyze images for ALL types of safety concerns including but not limited to:
|
64 |
+
- Personal Protective Equipment (PPE)
|
65 |
+
- Ergonomic issues
|
66 |
+
- Fire and electrical hazards
|
67 |
+
- Chemical and environmental hazards
|
68 |
+
- Machine and equipment safety
|
69 |
+
- Fall protection and working at heights
|
70 |
+
- Material handling and storage
|
71 |
+
- Emergency access and exits
|
72 |
+
- Housekeeping and organization
|
73 |
+
- Lighting and visibility
|
74 |
+
- Ventilation and air quality
|
75 |
+
- Tool safety and maintenance"""
|
76 |
+
},
|
77 |
{
|
78 |
"role": "user",
|
79 |
"content": [
|
80 |
{
|
81 |
"type": "text",
|
82 |
+
"text": """Analyze this image for ANY safety concerns or hazards. For each issue identified, specify:
|
83 |
+
|
84 |
+
1. The exact location in the image (be specific: top-left, center-right, bottom, etc.)
|
85 |
+
2. The type of safety concern
|
86 |
+
3. The potential risk or hazard
|
87 |
+
4. Any relevant safety standards being violated
|
88 |
+
|
89 |
+
Format each observation as:
|
90 |
+
- <location>position:safety issue description</location>
|
91 |
+
|
92 |
+
Example formats:
|
93 |
+
- <location>top-right:Exposed electrical wiring creating shock hazard</location>
|
94 |
+
- <location>bottom-left:Improperly stored chemicals without proper labeling</location>
|
95 |
+
- <location>center:Missing machine guarding on rotating equipment</location>
|
96 |
+
|
97 |
+
Be thorough and identify ALL safety issues, not just the obvious ones."""
|
98 |
},
|
99 |
{
|
100 |
"type": "image_url",
|
|
|
103 |
}
|
104 |
}
|
105 |
]
|
|
|
|
|
|
|
|
|
106 |
}
|
107 |
],
|
108 |
+
temperature=0.7, # Higher temperature for more comprehensive analysis
|
109 |
+
max_tokens=500,
|
110 |
+
stream=False
|
|
|
|
|
111 |
)
|
112 |
return completion.choices[0].message.content
|
113 |
except Exception as e:
|
|
|
115 |
return f"Analysis Error: {str(e)}"
|
116 |
|
117 |
def draw_observations(self, image, observations):
|
118 |
+
"""Draw accurate bounding boxes based on safety issue locations."""
|
119 |
height, width = image.shape[:2]
|
120 |
font = cv2.FONT_HERSHEY_SIMPLEX
|
121 |
font_scale = 0.5
|
122 |
thickness = 2
|
123 |
+
padding = 10
|
124 |
|
125 |
+
def get_region_coordinates(position: str) -> tuple:
|
126 |
+
"""Get coordinates based on position description."""
|
127 |
+
# Basic regions
|
128 |
+
regions = {
|
129 |
+
'top-left': (0, 0, width//3, height//3),
|
130 |
+
'top': (width//3, 0, 2*width//3, height//3),
|
131 |
+
'top-right': (2*width//3, 0, width, height//3),
|
132 |
+
'center-left': (0, height//3, width//3, 2*height//3),
|
133 |
+
'center': (width//3, height//3, 2*width//3, 2*height//3),
|
134 |
+
'center-right': (2*width//3, height//3, width, 2*height//3),
|
135 |
+
'bottom-left': (0, 2*height//3, width//3, height),
|
136 |
+
'bottom': (width//3, 2*height//3, 2*width//3, height),
|
137 |
+
'bottom-right': (2*width//3, 2*height//3, width, height),
|
138 |
+
'left': (0, height//4, width//3, 3*height//4),
|
139 |
+
'right': (2*width//3, height//4, width, 3*height//4)
|
140 |
+
}
|
141 |
+
|
142 |
+
# Find best matching region
|
143 |
+
best_match = 'center'
|
144 |
+
max_words = 0
|
145 |
+
pos_lower = position.lower()
|
146 |
|
147 |
+
for region in regions.keys():
|
148 |
+
words = region.split('-')
|
149 |
+
matches = sum(1 for word in words if word in pos_lower)
|
150 |
+
if matches > max_words:
|
151 |
+
max_words = matches
|
152 |
+
best_match = region
|
153 |
|
154 |
+
return regions[best_match]
|
155 |
+
|
156 |
+
for idx, obs in enumerate(observations):
|
157 |
+
color = self.colors[idx % len(self.colors)]
|
158 |
|
159 |
+
# Parse location and description
|
160 |
+
parts = obs.split(':')
|
161 |
+
if len(parts) >= 2:
|
162 |
+
position = parts[0]
|
163 |
+
description = ':'.join(parts[1:])
|
164 |
+
|
165 |
+
# Get region coordinates
|
166 |
+
x1, y1, x2, y2 = get_region_coordinates(position)
|
167 |
+
|
168 |
+
# Draw rectangle
|
169 |
+
cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
|
170 |
+
|
171 |
+
# Add label with background
|
172 |
+
label = description[:50] + "..." if len(description) > 50 else description
|
173 |
+
label_size, _ = cv2.getTextSize(label, font, font_scale, thickness)
|
174 |
+
|
175 |
+
# Position text above the box
|
176 |
+
text_x = max(0, x1)
|
177 |
+
text_y = max(label_size[1] + padding, y1 - padding)
|
178 |
+
|
179 |
+
# Draw text background
|
180 |
+
cv2.rectangle(image,
|
181 |
+
(text_x, text_y - label_size[1] - padding),
|
182 |
+
(text_x + label_size[0] + padding, text_y),
|
183 |
+
color, -1)
|
184 |
+
|
185 |
+
# Draw text
|
186 |
+
cv2.putText(image, label,
|
187 |
+
(text_x + padding//2, text_y - padding//2),
|
188 |
+
font, font_scale, (255, 255, 255), thickness)
|
189 |
|
190 |
return image
|
191 |
|