Update app.py
Browse files
app.py
CHANGED
@@ -20,14 +20,12 @@ import tempfile
|
|
20 |
import shutil
|
21 |
import subprocess
|
22 |
import fractions
|
23 |
-
import spaces
|
24 |
|
25 |
# Suppress TensorFlow warnings
|
26 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
27 |
import tensorflow as tf
|
28 |
tf.get_logger().setLevel('ERROR')
|
29 |
|
30 |
-
|
31 |
# Initialize models and other global variables
|
32 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
33 |
|
@@ -93,12 +91,6 @@ def extract_frames(video_path, output_folder, fps):
|
|
93 |
'-vf', f'fps={fps}',
|
94 |
f'{output_folder}/frame_%04d.jpg'
|
95 |
]
|
96 |
-
|
97 |
-
print(f"FFmpeg command: {command}")
|
98 |
-
|
99 |
-
if any(arg is None for arg in command):
|
100 |
-
raise ValueError(f"None value found in FFmpeg command: {command}")
|
101 |
-
|
102 |
try:
|
103 |
result = subprocess.run(command, check=True, capture_output=True, text=True)
|
104 |
print(f"FFmpeg stdout: {result.stdout}")
|
@@ -339,6 +331,8 @@ def plot_emotion(df, emotion, num_anomalies, color):
|
|
339 |
plt.tight_layout()
|
340 |
return fig
|
341 |
|
|
|
|
|
342 |
def get_random_face_sample(organized_faces_folder, largest_cluster, output_folder):
|
343 |
person_folder = os.path.join(organized_faces_folder, f"person_{largest_cluster}")
|
344 |
face_files = [f for f in os.listdir(person_folder) if f.endswith('.jpg')]
|
@@ -349,22 +343,13 @@ def get_random_face_sample(organized_faces_folder, largest_cluster, output_folde
|
|
349 |
|
350 |
# Read the image and resize it to be smaller
|
351 |
face_img = cv2.imread(face_path)
|
352 |
-
small_face = cv2.resize(face_img, (100, 100)) # Resize to
|
353 |
cv2.imwrite(output_path, small_face)
|
354 |
|
355 |
return output_path
|
356 |
return None
|
357 |
|
358 |
def process_video(video_path, num_anomalies, num_components, desired_fps, batch_size, progress=gr.Progress()):
|
359 |
-
print(f"Video path: {video_path}")
|
360 |
-
print(f"Num anomalies: {num_anomalies}")
|
361 |
-
print(f"Num components: {num_components}")
|
362 |
-
print(f"Desired FPS: {desired_fps}")
|
363 |
-
print(f"Batch size: {batch_size}")
|
364 |
-
|
365 |
-
if not os.path.exists(video_path):
|
366 |
-
raise FileNotFoundError(f"Video file not found: {video_path}")
|
367 |
-
|
368 |
output_folder = "output"
|
369 |
os.makedirs(output_folder, exist_ok=True)
|
370 |
|
@@ -405,7 +390,7 @@ def process_video(video_path, num_anomalies, num_components, desired_fps, batch_
|
|
405 |
try:
|
406 |
anomalies_all, anomaly_scores_all, top_indices_all, anomalies_comp, anomaly_scores_comp, top_indices_comp, _ = lstm_anomaly_detection(X, feature_columns, num_anomalies=num_anomalies, batch_size=batch_size)
|
407 |
except Exception as e:
|
408 |
-
print(f"Error
|
409 |
print(f"X shape: {X.shape}")
|
410 |
print(f"X dtype: {X.dtype}")
|
411 |
return f"Error in anomaly detection: {str(e)}", None, None, None, None, None, None
|
@@ -425,6 +410,7 @@ def process_video(video_path, num_anomalies, num_components, desired_fps, batch_
|
|
425 |
except Exception as e:
|
426 |
return f"Error generating plots: {str(e)}", None, None, None, None, None, None, None, None, None
|
427 |
|
|
|
428 |
# Get a random face sample
|
429 |
face_sample = get_random_face_sample(organized_faces_folder, largest_cluster, output_folder)
|
430 |
|
@@ -442,7 +428,7 @@ def process_video(video_path, num_anomalies, num_components, desired_fps, batch_
|
|
442 |
results += "\n".join([f"{df[emotion].iloc[i]:.4f} at {df['Timecode'].iloc[i]}" for i in top_indices])
|
443 |
|
444 |
return results, face_sample, anomaly_plot_all, anomaly_plot_comp, *emotion_plots
|
445 |
-
|
446 |
# Gradio interface
|
447 |
iface = gr.Interface(
|
448 |
fn=process_video,
|
|
|
20 |
import shutil
|
21 |
import subprocess
|
22 |
import fractions
|
|
|
23 |
|
24 |
# Suppress TensorFlow warnings
|
25 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
26 |
import tensorflow as tf
|
27 |
tf.get_logger().setLevel('ERROR')
|
28 |
|
|
|
29 |
# Initialize models and other global variables
|
30 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
31 |
|
|
|
91 |
'-vf', f'fps={fps}',
|
92 |
f'{output_folder}/frame_%04d.jpg'
|
93 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
try:
|
95 |
result = subprocess.run(command, check=True, capture_output=True, text=True)
|
96 |
print(f"FFmpeg stdout: {result.stdout}")
|
|
|
331 |
plt.tight_layout()
|
332 |
return fig
|
333 |
|
334 |
+
import base64
|
335 |
+
|
336 |
def get_random_face_sample(organized_faces_folder, largest_cluster, output_folder):
|
337 |
person_folder = os.path.join(organized_faces_folder, f"person_{largest_cluster}")
|
338 |
face_files = [f for f in os.listdir(person_folder) if f.endswith('.jpg')]
|
|
|
343 |
|
344 |
# Read the image and resize it to be smaller
|
345 |
face_img = cv2.imread(face_path)
|
346 |
+
small_face = cv2.resize(face_img, (100, 100)) # Resize to NxN pixels
|
347 |
cv2.imwrite(output_path, small_face)
|
348 |
|
349 |
return output_path
|
350 |
return None
|
351 |
|
352 |
def process_video(video_path, num_anomalies, num_components, desired_fps, batch_size, progress=gr.Progress()):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
output_folder = "output"
|
354 |
os.makedirs(output_folder, exist_ok=True)
|
355 |
|
|
|
390 |
try:
|
391 |
anomalies_all, anomaly_scores_all, top_indices_all, anomalies_comp, anomaly_scores_comp, top_indices_comp, _ = lstm_anomaly_detection(X, feature_columns, num_anomalies=num_anomalies, batch_size=batch_size)
|
392 |
except Exception as e:
|
393 |
+
print(f"Error details: {str(e)}")
|
394 |
print(f"X shape: {X.shape}")
|
395 |
print(f"X dtype: {X.dtype}")
|
396 |
return f"Error in anomaly detection: {str(e)}", None, None, None, None, None, None
|
|
|
410 |
except Exception as e:
|
411 |
return f"Error generating plots: {str(e)}", None, None, None, None, None, None, None, None, None
|
412 |
|
413 |
+
|
414 |
# Get a random face sample
|
415 |
face_sample = get_random_face_sample(organized_faces_folder, largest_cluster, output_folder)
|
416 |
|
|
|
428 |
results += "\n".join([f"{df[emotion].iloc[i]:.4f} at {df['Timecode'].iloc[i]}" for i in top_indices])
|
429 |
|
430 |
return results, face_sample, anomaly_plot_all, anomaly_plot_comp, *emotion_plots
|
431 |
+
|
432 |
# Gradio interface
|
433 |
iface = gr.Interface(
|
434 |
fn=process_video,
|