reab5555 commited on
Commit
2128c11
·
verified ·
1 Parent(s): 3723697

Update visualization.py

Browse files
Files changed (1) hide show
  1. visualization.py +37 -99
visualization.py CHANGED
@@ -7,7 +7,7 @@ import seaborn as sns
7
  import numpy as np
8
  import pandas as pd
9
  import cv2
10
- from moviepy.editor import VideoFileClip, AudioFileClip, CompositeVideoClip, ImageClip, VideoClip
11
  from moviepy.video.fx.all import resize
12
  from PIL import Image, ImageDraw, ImageFont
13
  from matplotlib.patches import Rectangle
@@ -216,107 +216,45 @@ def plot_posture(df, posture_scores, color='blue', anomaly_threshold=3):
216
  plt.close()
217
  return fig
218
 
219
- def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, mse_voice, output_folder, desired_fps, largest_cluster):
220
- print(f"Creating heatmap video. Output folder: {output_folder}")
221
-
222
- os.makedirs(output_folder, exist_ok=True)
223
-
224
- output_filename = os.path.basename(video_path).rsplit('.', 1)[0] + '_heatmap.mp4'
225
- heatmap_video_path = os.path.join(output_folder, output_filename)
226
-
227
- print(f"Heatmap video will be saved at: {heatmap_video_path}")
228
-
229
- # Load the original video
230
- video = VideoFileClip(video_path)
231
-
232
- # Get video properties
233
- width, height = video.w, video.h
234
- total_frames = int(video.duration * video.fps)
235
-
236
- # Ensure all MSE arrays have the same length as total_frames
237
- mse_embeddings = np.interp(np.linspace(0, len(mse_embeddings) - 1, total_frames),
238
- np.arange(len(mse_embeddings)), mse_embeddings)
239
- mse_posture = np.interp(np.linspace(0, len(mse_posture) - 1, total_frames),
240
- np.arange(len(mse_posture)), mse_posture)
241
- mse_voice = np.interp(np.linspace(0, len(mse_voice) - 1, total_frames),
242
- np.arange(len(mse_voice)), mse_voice)
243
-
244
- # Normalize the MSE values
245
- mse_embeddings_norm = (mse_embeddings - np.min(mse_embeddings)) / (np.max(mse_embeddings) - np.min(mse_embeddings))
246
- mse_posture_norm = (mse_posture - np.min(mse_posture)) / (np.max(mse_posture) - np.min(mse_posture))
247
- mse_voice_norm = (mse_voice - np.min(mse_voice)) / (np.max(mse_voice) - np.min(mse_voice))
248
-
249
- combined_mse = np.full((3, total_frames), np.nan)
250
- combined_mse[0] = mse_embeddings_norm
251
- combined_mse[1] = mse_posture_norm
252
- combined_mse[2] = mse_voice_norm
253
-
254
- # Create custom colormap
255
- cdict = {
256
- 'red': [(0.0, 0.5, 0.5), (1.0, 1.0, 1.0)],
257
- 'green': [(0.0, 0.5, 0.5), (1.0, 0.0, 0.0)],
258
- 'blue': [(0.0, 0.5, 0.5), (1.0, 0.0, 0.0)],
259
- }
260
- custom_cmap = LinearSegmentedColormap('custom_cmap', segmentdata=cdict, N=256)
261
-
262
- fig, ax = plt.subplots(figsize=(width/100, 2))
263
- im = ax.imshow(combined_mse, aspect='auto', cmap=custom_cmap, extent=[0, total_frames, 0, 3], vmin=0, vmax=1)
264
- ax.set_yticks([0.5, 1.5, 2.5])
265
- ax.set_yticklabels(['Face', 'Posture', 'Voice'])
266
- ax.set_xticks([])
267
- plt.tight_layout()
268
 
269
- def create_heatmap(t):
270
- frame_count = int(t * video.fps)
271
-
272
- # Clear previous lines
273
- if hasattr(ax, 'lines') and len(ax.lines) > 0:
274
- ax.lines.pop(0)
275
- ax.axvline(x=frame_count, color='blue', linewidth=2)
276
-
277
- canvas = FigureCanvasAgg(fig)
278
- canvas.draw()
279
- heatmap_img = np.frombuffer(canvas.tostring_rgb(), dtype='uint8')
280
- heatmap_img = heatmap_img.reshape(canvas.get_width_height()[::-1] + (3,))
281
- return heatmap_img
282
-
283
- def add_timecode(frame, t):
284
- seconds = t
285
- timecode = f"{int(seconds//3600):02d}:{int((seconds%3600)//60):02d}:{int(seconds%60):02d}"
286
-
287
- pil_img = Image.fromarray(frame.astype('uint8'))
288
- draw = ImageDraw.Draw(pil_img)
289
- font = ImageFont.load_default()
290
- draw.text((10, 30), f"Time: {timecode}", font=font, fill=(255, 255, 255))
291
-
292
- return np.array(pil_img)
293
-
294
- heatmap_clip = VideoClip(create_heatmap, duration=video.duration)
295
- heatmap_clip = heatmap_clip.resize(height=200)
296
-
297
- def combine_video_and_heatmap(t):
298
- video_frame = video.get_frame(t)
299
- heatmap_frame = heatmap_clip.get_frame(t)
300
- combined_frame = np.vstack((video_frame, heatmap_frame))
301
- return add_timecode(combined_frame, t)
302
-
303
- final_clip = VideoClip(combine_video_and_heatmap, duration=video.duration)
304
- final_clip = final_clip.set_audio(video.audio)
305
-
306
- # Write the final video
307
- final_clip.write_videofile(heatmap_video_path, codec='libx264', audio_codec='aac', fps=video.fps)
308
-
309
- # Close the video clips
310
- video.close()
311
- final_clip.close()
312
 
313
- if os.path.exists(heatmap_video_path):
314
- print(f"Heatmap video created at: {heatmap_video_path}")
315
- print(f"Heatmap video size: {os.path.getsize(heatmap_video_path)} bytes")
316
- return heatmap_video_path
317
  else:
318
- print(f"Failed to create heatmap video at: {heatmap_video_path}")
319
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
 
321
 
322
  # Function to create the correlation heatmap
 
7
  import numpy as np
8
  import pandas as pd
9
  import cv2
10
+ from moviepy.editor import VideoFileClip, AudioFileClip, CompositeVideoClip, ImageClip, VideoClip, concatenate_videoclips
11
  from moviepy.video.fx.all import resize
12
  from PIL import Image, ImageDraw, ImageFont
13
  from matplotlib.patches import Rectangle
 
216
  plt.close()
217
  return fig
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
 
220
+ def create_heatmap(frame_time, mse_embeddings, mse_posture, mse_voice):
221
+ fig = Figure(figsize=(10, 1))
222
+ canvas = FigureCanvas(fig)
223
+ ax = fig.add_subplot(111)
224
+ time_index = int(frame_time)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
 
226
+ if time_index < len(mse_embeddings) and time_index < len(mse_posture) and time_index < len(mse_voice):
227
+ mse_values = [mse_embeddings[time_index], mse_posture[time_index], mse_voice[time_index]]
 
 
228
  else:
229
+ mse_values = [0, 0, 0] # Default values if the index is out of bounds
230
+
231
+ ax.barh(['Face', 'Posture', 'Voice'], mse_values, color=['navy', 'purple', 'green'])
232
+ ax.set_xlim(0, 1) # Normalize the MSE values
233
+
234
+ canvas.draw()
235
+ img = np.frombuffer(canvas.tostring_rgb(), dtype='uint8')
236
+ img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
237
+ plt.close(fig)
238
+ return img
239
+
240
+ def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, mse_voice, output_folder, fps, largest_cluster):
241
+ original_clip = VideoFileClip(video_path)
242
+ duration = original_clip.duration
243
+ heatmap_clips = []
244
+
245
+ for t in np.arange(0, duration, 1.0 / fps):
246
+ heatmap_img = create_heatmap(t, mse_embeddings, mse_posture, mse_voice)
247
+ heatmap_img_bgr = cv2.cvtColor(heatmap_img, cv2.COLOR_RGB2BGR)
248
+ heatmap_filename = os.path.join(output_folder, f"heatmap_{int(t * fps)}.png")
249
+ cv2.imwrite(heatmap_filename, heatmap_img_bgr)
250
+ heatmap_clips.append(ImageClip(heatmap_filename).set_duration(1.0 / fps).set_start(t).resize(height=100))
251
+
252
+ heatmap_clip = concatenate_videoclips(heatmap_clips, method="compose")
253
+ final_clip = CompositeVideoClip([original_clip, heatmap_clip.set_position(('center', 'bottom'))])
254
+ heatmap_video_path = os.path.join(output_folder, "heatmap_video.mp4")
255
+ final_clip.write_videofile(heatmap_video_path, codec='libx264', fps=fps, audio_codec='aac')
256
+
257
+ return heatmap_video_path
258
 
259
 
260
  # Function to create the correlation heatmap