Update visualization.py
Browse files- visualization.py +43 -57
visualization.py
CHANGED
@@ -216,60 +216,38 @@ def plot_posture(df, posture_scores, color='blue', anomaly_threshold=3):
|
|
216 |
plt.tight_layout()
|
217 |
plt.close()
|
218 |
return fig
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
combined_heatmap = np.vstack((face_heatmap, posture_heatmap, voice_heatmap))
|
252 |
-
|
253 |
-
# Add labels
|
254 |
-
label_height = 20
|
255 |
-
label_image = np.ones((label_height, combined_heatmap.shape[1], 3), dtype=np.uint8) * 255
|
256 |
-
cv2.putText(label_image, 'Face', (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
|
257 |
-
cv2.putText(label_image, 'Posture', (5, 35), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
|
258 |
-
cv2.putText(label_image, 'Voice', (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
|
259 |
-
|
260 |
-
combined_heatmap = np.vstack((label_image, combined_heatmap))
|
261 |
-
|
262 |
-
# Calculate position of vertical line
|
263 |
-
video_frame = int(t * video_fps)
|
264 |
-
total_analysis_frames = len(mse_embeddings)
|
265 |
-
line_pos = int((video_frame / (video_fps / analysis_fps)) * combined_heatmap.shape[1] / total_analysis_frames)
|
266 |
-
|
267 |
-
# Draw vertical line
|
268 |
-
cv2.line(combined_heatmap, (line_pos, 0), (line_pos, combined_heatmap.shape[0]), (0, 0, 0), 2)
|
269 |
-
|
270 |
-
return combined_heatmap
|
271 |
-
|
272 |
-
def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, mse_voice, output_folder, analysis_fps):
|
273 |
print(f"Creating heatmap video. Output folder: {output_folder}")
|
274 |
|
275 |
os.makedirs(output_folder, exist_ok=True)
|
@@ -284,11 +262,20 @@ def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, mse_v
|
|
284 |
|
285 |
# Get video properties
|
286 |
width, height = video.w, video.h
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
287 |
|
288 |
def combine_video_and_heatmap(t):
|
289 |
video_frame = video.get_frame(t)
|
290 |
-
heatmap_frame =
|
291 |
-
heatmap_frame_resized = cv2.resize(heatmap_frame, (width,
|
292 |
combined_frame = np.vstack((video_frame, heatmap_frame_resized))
|
293 |
return combined_frame
|
294 |
|
@@ -311,7 +298,6 @@ def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, mse_v
|
|
311 |
return None
|
312 |
|
313 |
|
314 |
-
|
315 |
# Function to create the correlation heatmap
|
316 |
def plot_correlation_heatmap(mse_embeddings, mse_posture, mse_voice):
|
317 |
data = np.vstack((mse_embeddings, mse_posture, mse_voice)).T
|
|
|
216 |
plt.tight_layout()
|
217 |
plt.close()
|
218 |
return fig
|
219 |
+
|
220 |
+
def create_heatmap(t, mse_embeddings, mse_posture, mse_voice, video_fps, total_frames, video_width):
|
221 |
+
frame_count = int(t * video_fps)
|
222 |
+
|
223 |
+
# Normalize MSE values
|
224 |
+
mse_embeddings_norm = (mse_embeddings - np.min(mse_embeddings)) / (np.max(mse_embeddings) - np.min(mse_embeddings))
|
225 |
+
mse_posture_norm = (mse_posture - np.min(mse_posture)) / (np.max(mse_posture) - np.min(mse_posture))
|
226 |
+
mse_voice_norm = (mse_voice - np.min(mse_voice)) / (np.max(mse_voice) - np.min(mse_voice))
|
227 |
+
|
228 |
+
combined_mse = np.zeros((3, total_frames))
|
229 |
+
combined_mse[0] = mse_embeddings_norm
|
230 |
+
combined_mse[1] = mse_posture_norm
|
231 |
+
combined_mse[2] = mse_voice_norm
|
232 |
+
|
233 |
+
fig, ax = plt.subplots(figsize=(video_width / 240, 0.6))
|
234 |
+
ax.imshow(combined_mse, aspect='auto', cmap='Reds', vmin=0, vmax=1, extent=[0, total_frames, 0, 3])
|
235 |
+
ax.set_yticks([0.5, 1.5, 2.5])
|
236 |
+
ax.set_yticklabels(['Voice', 'Posture', 'Face'], fontsize=7)
|
237 |
+
ax.set_xticks([])
|
238 |
+
|
239 |
+
ax.axvline(x=frame_count, color='black', linewidth=3)
|
240 |
+
|
241 |
+
plt.tight_layout(pad=0.5)
|
242 |
+
|
243 |
+
canvas = FigureCanvas(fig)
|
244 |
+
canvas.draw()
|
245 |
+
heatmap_img = np.frombuffer(canvas.tostring_rgb(), dtype='uint8')
|
246 |
+
heatmap_img = heatmap_img.reshape(canvas.get_width_height()[::-1] + (3,))
|
247 |
+
plt.close(fig)
|
248 |
+
return heatmap_img
|
249 |
+
|
250 |
+
def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, mse_voice, output_folder, desired_fps, largest_cluster):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
251 |
print(f"Creating heatmap video. Output folder: {output_folder}")
|
252 |
|
253 |
os.makedirs(output_folder, exist_ok=True)
|
|
|
262 |
|
263 |
# Get video properties
|
264 |
width, height = video.w, video.h
|
265 |
+
total_frames = int(video.duration * video.fps)
|
266 |
+
|
267 |
+
# Ensure all MSE arrays have the same length as total_frames
|
268 |
+
mse_embeddings = np.interp(np.linspace(0, len(mse_embeddings) - 1, total_frames),
|
269 |
+
np.arange(len(mse_embeddings)), mse_embeddings)
|
270 |
+
mse_posture = np.interp(np.linspace(0, len(mse_posture) - 1, total_frames),
|
271 |
+
np.arange(len(mse_posture)), mse_posture)
|
272 |
+
mse_voice = np.interp(np.linspace(0, len(mse_voice) - 1, total_frames),
|
273 |
+
np.arange(len(mse_voice)), mse_voice)
|
274 |
|
275 |
def combine_video_and_heatmap(t):
|
276 |
video_frame = video.get_frame(t)
|
277 |
+
heatmap_frame = create_heatmap(t, mse_embeddings, mse_posture, mse_voice, video.fps, total_frames, width)
|
278 |
+
heatmap_frame_resized = cv2.resize(heatmap_frame, (width, heatmap_frame.shape[0]))
|
279 |
combined_frame = np.vstack((video_frame, heatmap_frame_resized))
|
280 |
return combined_frame
|
281 |
|
|
|
298 |
return None
|
299 |
|
300 |
|
|
|
301 |
# Function to create the correlation heatmap
|
302 |
def plot_correlation_heatmap(mse_embeddings, mse_posture, mse_voice):
|
303 |
data = np.vstack((mse_embeddings, mse_posture, mse_voice)).T
|