Update visualization.py
Browse files- visualization.py +82 -33
visualization.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import matplotlib.pyplot as plt
|
2 |
from mpl_toolkits.mplot3d import Axes3D
|
3 |
-
from matplotlib.backends.backend_agg import FigureCanvasAgg
|
4 |
import matplotlib.colors as mcolors
|
5 |
from matplotlib.colors import LinearSegmentedColormap
|
6 |
import seaborn as sns
|
@@ -217,43 +217,92 @@ def plot_posture(df, posture_scores, color='blue', anomaly_threshold=3):
|
|
217 |
return fig
|
218 |
|
219 |
|
220 |
-
def create_heatmap(
|
221 |
-
|
222 |
-
time_index = int(frame_time)
|
223 |
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
|
|
|
|
|
|
|
|
|
|
228 |
|
229 |
-
|
230 |
-
ax.
|
|
|
|
|
|
|
231 |
|
232 |
-
|
|
|
|
|
233 |
canvas.draw()
|
234 |
-
|
235 |
-
|
236 |
plt.close(fig)
|
237 |
-
return
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
257 |
|
258 |
|
259 |
# Function to create the correlation heatmap
|
|
|
1 |
import matplotlib.pyplot as plt
|
2 |
from mpl_toolkits.mplot3d import Axes3D
|
3 |
+
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
|
4 |
import matplotlib.colors as mcolors
|
5 |
from matplotlib.colors import LinearSegmentedColormap
|
6 |
import seaborn as sns
|
|
|
217 |
return fig
|
218 |
|
219 |
|
220 |
+
def create_heatmap(t, mse_embeddings, mse_posture, mse_voice):
|
221 |
+
frame_count = int(t * video.fps)
|
|
|
222 |
|
223 |
+
# Normalize MSE values
|
224 |
+
mse_embeddings_norm = (mse_embeddings - np.min(mse_embeddings)) / (np.max(mse_embeddings) - np.min(mse_embeddings))
|
225 |
+
mse_posture_norm = (mse_posture - np.min(mse_posture)) / (np.max(mse_posture) - np.min(mse_posture))
|
226 |
+
mse_voice_norm = (mse_voice - np.min(mse_voice)) / (np.max(mse_voice) - np.min(mse_voice))
|
227 |
+
|
228 |
+
combined_mse = np.zeros((3, total_frames))
|
229 |
+
combined_mse[0] = mse_embeddings_norm
|
230 |
+
combined_mse[1] = mse_posture_norm
|
231 |
+
combined_mse[2] = mse_voice_norm
|
232 |
|
233 |
+
fig, ax = plt.subplots(figsize=(10, 2))
|
234 |
+
ax.imshow(combined_mse, aspect='auto', cmap='coolwarm', vmin=0, vmax=1, extent=[0, total_frames, 0, 3])
|
235 |
+
ax.set_yticks([0.5, 1.5, 2.5])
|
236 |
+
ax.set_yticklabels(['Face', 'Posture', 'Voice'])
|
237 |
+
ax.set_xticks([])
|
238 |
|
239 |
+
ax.axvline(x=frame_count, color='blue', linewidth=2)
|
240 |
+
|
241 |
+
canvas = FigureCanvas(fig)
|
242 |
canvas.draw()
|
243 |
+
heatmap_img = np.frombuffer(canvas.tostring_rgb(), dtype='uint8')
|
244 |
+
heatmap_img = heatmap_img.reshape(canvas.get_width_height()[::-1] + (3,))
|
245 |
plt.close(fig)
|
246 |
+
return heatmap_img
|
247 |
+
|
248 |
+
# Function to create video with heatmap
|
249 |
+
def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, mse_voice, output_folder, desired_fps, largest_cluster):
|
250 |
+
print(f"Creating heatmap video. Output folder: {output_folder}")
|
251 |
+
|
252 |
+
os.makedirs(output_folder, exist_ok=True)
|
253 |
+
|
254 |
+
output_filename = os.path.basename(video_path).rsplit('.', 1)[0] + '_heatmap.mp4'
|
255 |
+
heatmap_video_path = os.path.join(output_folder, output_filename)
|
256 |
+
|
257 |
+
print(f"Heatmap video will be saved at: {heatmap_video_path}")
|
258 |
+
|
259 |
+
# Load the original video
|
260 |
+
video = VideoFileClip(video_path)
|
261 |
+
|
262 |
+
# Get video properties
|
263 |
+
width, height = video.w, video.h
|
264 |
+
total_frames = int(video.duration * video.fps)
|
265 |
+
|
266 |
+
# Ensure all MSE arrays have the same length as total_frames
|
267 |
+
mse_embeddings = np.interp(np.linspace(0, len(mse_embeddings) - 1, total_frames),
|
268 |
+
np.arange(len(mse_embeddings)), mse_embeddings)
|
269 |
+
mse_posture = np.interp(np.linspace(0, len(mse_posture) - 1, total_frames),
|
270 |
+
np.arange(len(mse_posture)), mse_posture)
|
271 |
+
mse_voice = np.interp(np.linspace(0, len(mse_voice) - 1, total_frames),
|
272 |
+
np.arange(len(mse_voice)), mse_voice)
|
273 |
+
|
274 |
+
mse_embeddings_norm = (mse_embeddings - np.min(mse_embeddings)) / (np.max(mse_embeddings) - np.min(mse_embeddings))
|
275 |
+
mse_posture_norm = (mse_posture - np.min(mse_posture)) / (np.max(mse_posture) - np.min(mse_posture))
|
276 |
+
mse_voice_norm = (mse_voice - np.min(mse_voice)) / (np.max(mse_voice) - np.min(mse_voice))
|
277 |
+
|
278 |
+
combined_mse = np.zeros((3, total_frames))
|
279 |
+
combined_mse[0] = mse_embeddings_norm
|
280 |
+
combined_mse[1] = mse_posture_norm
|
281 |
+
combined_mse[2] = mse_voice_norm
|
282 |
+
|
283 |
+
def combine_video_and_heatmap(t):
|
284 |
+
video_frame = video.get_frame(t)
|
285 |
+
heatmap_frame = create_heatmap(t, mse_embeddings, mse_posture, mse_voice)
|
286 |
+
combined_frame = np.vstack((video_frame, heatmap_frame))
|
287 |
+
return add_timecode(combined_frame, t)
|
288 |
+
|
289 |
+
final_clip = VideoClip(combine_video_and_heatmap, duration=video.duration)
|
290 |
+
final_clip = final_clip.set_audio(video.audio)
|
291 |
+
|
292 |
+
# Write the final video
|
293 |
+
final_clip.write_videofile(heatmap_video_path, codec='libx264', audio_codec='aac', fps=video.fps)
|
294 |
+
|
295 |
+
# Close the video clips
|
296 |
+
video.close()
|
297 |
+
final_clip.close()
|
298 |
+
|
299 |
+
if os.path.exists(heatmap_video_path):
|
300 |
+
print(f"Heatmap video created at: {heatmap_video_path}")
|
301 |
+
print(f"Heatmap video size: {os.path.getsize(heatmap_video_path)} bytes")
|
302 |
+
return heatmap_video_path
|
303 |
+
else:
|
304 |
+
print(f"Failed to create heatmap video at: {heatmap_video_path}")
|
305 |
+
return None
|
306 |
|
307 |
|
308 |
# Function to create the correlation heatmap
|