Update visualization.py
Browse files- visualization.py +41 -42
visualization.py
CHANGED
@@ -219,26 +219,35 @@ def plot_posture(df, posture_scores, color='blue', anomaly_threshold=3):
|
|
219 |
|
220 |
|
221 |
|
222 |
-
def
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
|
|
|
|
|
|
|
|
|
|
234 |
|
235 |
fig, ax = plt.subplots(figsize=(video_width / 300, 0.4))
|
236 |
-
ax.imshow(combined_mse, aspect='auto', cmap='Reds',
|
|
|
|
|
237 |
ax.set_yticks([0.5, 1.5, 2.5])
|
238 |
-
ax.set_yticklabels(['Voice', 'Posture', 'Face'], fontsize=7)
|
239 |
-
|
|
|
240 |
|
241 |
-
ax.
|
|
|
242 |
|
243 |
plt.tight_layout(pad=0.5)
|
244 |
|
@@ -261,35 +270,23 @@ def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, mse_v
|
|
261 |
|
262 |
# Get video properties
|
263 |
width, height = video.w, video.h
|
264 |
-
total_frames = int(video.duration *
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
for i in range(1, total_frames):
|
271 |
-
if result[i] == 0:
|
272 |
-
result[i] = result[i-1]
|
273 |
-
return result
|
274 |
-
|
275 |
-
# Fill gaps with previous values
|
276 |
-
mse_embeddings = fill_with_previous_values(mse_embeddings, total_frames)
|
277 |
-
mse_posture = fill_with_previous_values(mse_posture, total_frames)
|
278 |
-
mse_voice = fill_with_previous_values(mse_voice, total_frames)
|
279 |
|
280 |
def combine_video_and_heatmap(t):
|
281 |
-
|
282 |
-
|
283 |
-
heatmap_frame_resized = cv2.resize(heatmap_frame, (width, heatmap_frame.shape[0]))
|
284 |
|
285 |
-
|
286 |
-
|
287 |
-
heatmap_frame_resized = cv2.cvtColor(heatmap_frame_resized, cv2.COLOR_RGBA2RGB)
|
288 |
|
289 |
-
# Ensure both frames have the same number of channels
|
290 |
if video_frame.shape[2] != heatmap_frame_resized.shape[2]:
|
291 |
if video_frame.shape[2] == 3:
|
292 |
-
heatmap_frame_resized = heatmap_frame_resized[:, :, :3]
|
293 |
else:
|
294 |
video_frame = cv2.cvtColor(video_frame, cv2.COLOR_RGB2RGBA)
|
295 |
|
@@ -297,10 +294,12 @@ def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, mse_v
|
|
297 |
return combined_frame
|
298 |
|
299 |
final_clip = VideoClip(combine_video_and_heatmap, duration=video.duration)
|
300 |
-
final_clip = final_clip.
|
|
|
|
|
|
|
301 |
|
302 |
-
|
303 |
-
final_clip.write_videofile(heatmap_video_path, codec='libx264', audio_codec='aac', fps=video.fps)
|
304 |
|
305 |
# Close the video clips
|
306 |
video.close()
|
|
|
219 |
|
220 |
|
221 |
|
222 |
+
def fill_with_zeros(mse_array, total_frames):
|
223 |
+
result = np.zeros(total_frames)
|
224 |
+
indices = np.linspace(0, total_frames - 1, len(mse_array)).astype(int)
|
225 |
+
result[indices] = mse_array
|
226 |
+
return result
|
227 |
+
|
228 |
+
def create_heatmap(t, mse_embeddings, mse_posture, mse_voice, desired_fps, total_frames, video_width):
|
229 |
+
frame_count = int(t * desired_fps)
|
230 |
+
window_size = min(300, total_frames)
|
231 |
+
start_frame = max(0, frame_count - window_size // 2)
|
232 |
+
end_frame = min(total_frames, start_frame + window_size)
|
233 |
+
|
234 |
+
combined_mse = np.array([
|
235 |
+
mse_embeddings[start_frame:end_frame],
|
236 |
+
mse_posture[start_frame:end_frame],
|
237 |
+
mse_voice[start_frame:end_frame]
|
238 |
+
])
|
239 |
|
240 |
fig, ax = plt.subplots(figsize=(video_width / 300, 0.4))
|
241 |
+
im = ax.imshow(combined_mse, aspect='auto', cmap='Reds',
|
242 |
+
extent=[start_frame/desired_fps, end_frame/desired_fps, 0, 3],
|
243 |
+
vmin=0, vmax=max(np.max(mse_embeddings), np.max(mse_posture), np.max(mse_voice)))
|
244 |
ax.set_yticks([0.5, 1.5, 2.5])
|
245 |
+
ax.set_yticklabels(['Voice', 'Posture', 'Face'], fontsize=7)
|
246 |
+
|
247 |
+
ax.axvline(x=t, color='black', linewidth=2)
|
248 |
|
249 |
+
ax.set_xticks([start_frame/desired_fps, t, end_frame/desired_fps])
|
250 |
+
ax.set_xticklabels([f'{start_frame/desired_fps:.2f}', f'{t:.2f}', f'{end_frame/desired_fps:.2f}'], fontsize=6)
|
251 |
|
252 |
plt.tight_layout(pad=0.5)
|
253 |
|
|
|
270 |
|
271 |
# Get video properties
|
272 |
width, height = video.w, video.h
|
273 |
+
total_frames = int(video.duration * desired_fps)
|
274 |
+
|
275 |
+
# Fill gaps with zeros
|
276 |
+
mse_embeddings = fill_with_zeros(mse_embeddings, total_frames)
|
277 |
+
mse_posture = fill_with_zeros(mse_posture, total_frames)
|
278 |
+
mse_voice = fill_with_zeros(mse_voice, total_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
279 |
|
280 |
def combine_video_and_heatmap(t):
|
281 |
+
original_frame = int(t * video.fps)
|
282 |
+
video_frame = video.get_frame(original_frame / video.fps)
|
|
|
283 |
|
284 |
+
heatmap_frame = create_heatmap(t, mse_embeddings, mse_posture, mse_voice, desired_fps, total_frames, width)
|
285 |
+
heatmap_frame_resized = cv2.resize(heatmap_frame, (width, heatmap_frame.shape[0]))
|
|
|
286 |
|
|
|
287 |
if video_frame.shape[2] != heatmap_frame_resized.shape[2]:
|
288 |
if video_frame.shape[2] == 3:
|
289 |
+
heatmap_frame_resized = heatmap_frame_resized[:, :, :3]
|
290 |
else:
|
291 |
video_frame = cv2.cvtColor(video_frame, cv2.COLOR_RGB2RGBA)
|
292 |
|
|
|
294 |
return combined_frame
|
295 |
|
296 |
final_clip = VideoClip(combine_video_and_heatmap, duration=video.duration)
|
297 |
+
final_clip = final_clip.set_fps(desired_fps)
|
298 |
+
|
299 |
+
if video.audio is not None:
|
300 |
+
final_clip = final_clip.set_audio(video.audio.set_fps(desired_fps))
|
301 |
|
302 |
+
final_clip.write_videofile(heatmap_video_path, codec='libx264', audio_codec='aac', fps=desired_fps)
|
|
|
303 |
|
304 |
# Close the video clips
|
305 |
video.close()
|