Update visualization.py
Browse files- visualization.py +12 -6
visualization.py
CHANGED
@@ -207,12 +207,13 @@ def plot_posture(df, posture_scores, color='blue', anomaly_threshold=3):
|
|
207 |
return fig
|
208 |
|
209 |
|
210 |
-
def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, output_path, desired_fps, largest_cluster
|
211 |
# Filter the DataFrame to only include frames from the largest cluster
|
212 |
df_largest_cluster = df[df['Cluster'] == largest_cluster]
|
213 |
mse_embeddings = mse_embeddings[df['Cluster'] == largest_cluster]
|
214 |
mse_posture = mse_posture[df['Cluster'] == largest_cluster]
|
215 |
-
|
|
|
216 |
cap = cv2.VideoCapture(video_path)
|
217 |
original_fps = cap.get(cv2.CAP_PROP_FPS)
|
218 |
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
@@ -226,14 +227,19 @@ def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, outpu
|
|
226 |
np.arange(len(mse_embeddings)), mse_embeddings)
|
227 |
mse_posture = np.interp(np.linspace(0, len(mse_posture) - 1, total_frames),
|
228 |
np.arange(len(mse_posture)), mse_posture)
|
|
|
|
|
|
|
229 |
|
230 |
mse_embeddings_norm = (mse_embeddings - np.min(mse_embeddings)) / (np.max(mse_embeddings) - np.min(mse_embeddings))
|
231 |
mse_posture_norm = (mse_posture - np.min(mse_posture)) / (np.max(mse_posture) - np.min(mse_posture))
|
|
|
232 |
|
233 |
combined_mse = np.zeros((2, total_frames))
|
234 |
combined_mse[0] = mse_embeddings_norm # Use normalized MSE values for facial
|
235 |
combined_mse[1] = mse_posture_norm # Use normalized MSE values for posture
|
236 |
-
|
|
|
237 |
# Custom colormap definition
|
238 |
cdict = {
|
239 |
'red': [(0.0, 0.5, 0.5), # Low MSE: 50% red (gray)
|
@@ -248,9 +254,9 @@ def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, outpu
|
|
248 |
|
249 |
fig, ax = plt.subplots(figsize=(width/100, 2))
|
250 |
# Use the custom colormap in the heatmap generation
|
251 |
-
im = ax.imshow(combined_mse, aspect='auto', cmap=custom_cmap, extent=[0, total_frames, 0,
|
252 |
-
ax.set_yticks([0.5, 1.5])
|
253 |
-
ax.set_yticklabels(['Face', 'Posture'])
|
254 |
ax.set_xticks([])
|
255 |
plt.tight_layout()
|
256 |
|
|
|
207 |
return fig
|
208 |
|
209 |
|
210 |
+
def create_video_with_heatmap(video_path, df, mse_embeddings, mse_posture, mse_voice, output_path, desired_fps, largest_cluster):
|
211 |
# Filter the DataFrame to only include frames from the largest cluster
|
212 |
df_largest_cluster = df[df['Cluster'] == largest_cluster]
|
213 |
mse_embeddings = mse_embeddings[df['Cluster'] == largest_cluster]
|
214 |
mse_posture = mse_posture[df['Cluster'] == largest_cluster]
|
215 |
+
mse_voice = mse_voice[df['Cluster'] == largest_cluster]
|
216 |
+
|
217 |
cap = cv2.VideoCapture(video_path)
|
218 |
original_fps = cap.get(cv2.CAP_PROP_FPS)
|
219 |
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
|
227 |
np.arange(len(mse_embeddings)), mse_embeddings)
|
228 |
mse_posture = np.interp(np.linspace(0, len(mse_posture) - 1, total_frames),
|
229 |
np.arange(len(mse_posture)), mse_posture)
|
230 |
+
mse_voice = np.interp(np.linspace(0, len(mse_voice) - 1, total_frames),
|
231 |
+
np.arange(len(mse_voice)), mse_voice)
|
232 |
+
|
233 |
|
234 |
mse_embeddings_norm = (mse_embeddings - np.min(mse_embeddings)) / (np.max(mse_embeddings) - np.min(mse_embeddings))
|
235 |
mse_posture_norm = (mse_posture - np.min(mse_posture)) / (np.max(mse_posture) - np.min(mse_posture))
|
236 |
+
mse_voice_norm = (mse_voice - np.min(mse_voice)) / (np.max(mse_voice) - np.min(mse_voice))
|
237 |
|
238 |
combined_mse = np.zeros((2, total_frames))
|
239 |
combined_mse[0] = mse_embeddings_norm # Use normalized MSE values for facial
|
240 |
combined_mse[1] = mse_posture_norm # Use normalized MSE values for posture
|
241 |
+
combined_mse[2] = mse_voice_norm
|
242 |
+
|
243 |
# Custom colormap definition
|
244 |
cdict = {
|
245 |
'red': [(0.0, 0.5, 0.5), # Low MSE: 50% red (gray)
|
|
|
254 |
|
255 |
fig, ax = plt.subplots(figsize=(width/100, 2))
|
256 |
# Use the custom colormap in the heatmap generation
|
257 |
+
im = ax.imshow(combined_mse, aspect='auto', cmap=custom_cmap, extent=[0, total_frames, 0, 3])
|
258 |
+
ax.set_yticks([0.5, 1.5, 2.5])
|
259 |
+
ax.set_yticklabels(['Face', 'Posture', 'Voice'])
|
260 |
ax.set_xticks([])
|
261 |
plt.tight_layout()
|
262 |
|