Update app.py
Browse files
app.py
CHANGED
@@ -27,6 +27,8 @@ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
|
27 |
import tensorflow as tf
|
28 |
tf.get_logger().setLevel('ERROR')
|
29 |
|
|
|
|
|
30 |
# Initialize models and other global variables
|
31 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
32 |
|
@@ -92,6 +94,12 @@ def extract_frames(video_path, output_folder, fps):
|
|
92 |
'-vf', f'fps={fps}',
|
93 |
f'{output_folder}/frame_%04d.jpg'
|
94 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
try:
|
96 |
result = subprocess.run(command, check=True, capture_output=True, text=True)
|
97 |
print(f"FFmpeg stdout: {result.stdout}")
|
@@ -294,7 +302,7 @@ def lstm_anomaly_detection(X, feature_columns, num_anomalies=10, epochs=100, bat
|
|
294 |
else:
|
295 |
mse_comp = mse_all # If no components, use all features
|
296 |
|
297 |
-
|
298 |
anomalies_comp = np.zeros(len(mse_comp), dtype=bool)
|
299 |
anomalies_comp[top_indices_comp] = True
|
300 |
|
@@ -332,8 +340,6 @@ def plot_emotion(df, emotion, num_anomalies, color):
|
|
332 |
plt.tight_layout()
|
333 |
return fig
|
334 |
|
335 |
-
import base64
|
336 |
-
|
337 |
def get_random_face_sample(organized_faces_folder, largest_cluster, output_folder):
|
338 |
person_folder = os.path.join(organized_faces_folder, f"person_{largest_cluster}")
|
339 |
face_files = [f for f in os.listdir(person_folder) if f.endswith('.jpg')]
|
@@ -344,15 +350,22 @@ def get_random_face_sample(organized_faces_folder, largest_cluster, output_folde
|
|
344 |
|
345 |
# Read the image and resize it to be smaller
|
346 |
face_img = cv2.imread(face_path)
|
347 |
-
small_face = cv2.resize(face_img, (100, 100)) # Resize to
|
348 |
cv2.imwrite(output_path, small_face)
|
349 |
|
350 |
return output_path
|
351 |
return None
|
352 |
|
353 |
-
@spaces.GPU(duration=300) # Set to 5 minutes (300 seconds)
|
354 |
-
|
355 |
def process_video(video_path, num_anomalies, num_components, desired_fps, batch_size, progress=gr.Progress()):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
356 |
output_folder = "output"
|
357 |
os.makedirs(output_folder, exist_ok=True)
|
358 |
|
@@ -393,7 +406,7 @@ def process_video(video_path, num_anomalies, num_components, desired_fps, batch_
|
|
393 |
try:
|
394 |
anomalies_all, anomaly_scores_all, top_indices_all, anomalies_comp, anomaly_scores_comp, top_indices_comp, _ = lstm_anomaly_detection(X, feature_columns, num_anomalies=num_anomalies, batch_size=batch_size)
|
395 |
except Exception as e:
|
396 |
-
print(f"Error
|
397 |
print(f"X shape: {X.shape}")
|
398 |
print(f"X dtype: {X.dtype}")
|
399 |
return f"Error in anomaly detection: {str(e)}", None, None, None, None, None, None
|
@@ -413,7 +426,6 @@ def process_video(video_path, num_anomalies, num_components, desired_fps, batch_
|
|
413 |
except Exception as e:
|
414 |
return f"Error generating plots: {str(e)}", None, None, None, None, None, None, None, None, None
|
415 |
|
416 |
-
|
417 |
# Get a random face sample
|
418 |
face_sample = get_random_face_sample(organized_faces_folder, largest_cluster, output_folder)
|
419 |
|
@@ -431,7 +443,7 @@ def process_video(video_path, num_anomalies, num_components, desired_fps, batch_
|
|
431 |
results += "\n".join([f"{df[emotion].iloc[i]:.4f} at {df['Timecode'].iloc[i]}" for i in top_indices])
|
432 |
|
433 |
return results, face_sample, anomaly_plot_all, anomaly_plot_comp, *emotion_plots
|
434 |
-
|
435 |
# Gradio interface
|
436 |
iface = gr.Interface(
|
437 |
fn=process_video,
|
|
|
27 |
import tensorflow as tf
|
28 |
tf.get_logger().setLevel('ERROR')
|
29 |
|
30 |
+
@spaces.GPU(duration=500)
|
31 |
+
|
32 |
# Initialize models and other global variables
|
33 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
34 |
|
|
|
94 |
'-vf', f'fps={fps}',
|
95 |
f'{output_folder}/frame_%04d.jpg'
|
96 |
]
|
97 |
+
|
98 |
+
print(f"FFmpeg command: {command}")
|
99 |
+
|
100 |
+
if any(arg is None for arg in command):
|
101 |
+
raise ValueError(f"None value found in FFmpeg command: {command}")
|
102 |
+
|
103 |
try:
|
104 |
result = subprocess.run(command, check=True, capture_output=True, text=True)
|
105 |
print(f"FFmpeg stdout: {result.stdout}")
|
|
|
302 |
else:
|
303 |
mse_comp = mse_all # If no components, use all features
|
304 |
|
305 |
+
top_indices_comp = mse_comp.argsort()[-num_anomalies:][::-1]
|
306 |
anomalies_comp = np.zeros(len(mse_comp), dtype=bool)
|
307 |
anomalies_comp[top_indices_comp] = True
|
308 |
|
|
|
340 |
plt.tight_layout()
|
341 |
return fig
|
342 |
|
|
|
|
|
343 |
def get_random_face_sample(organized_faces_folder, largest_cluster, output_folder):
|
344 |
person_folder = os.path.join(organized_faces_folder, f"person_{largest_cluster}")
|
345 |
face_files = [f for f in os.listdir(person_folder) if f.endswith('.jpg')]
|
|
|
350 |
|
351 |
# Read the image and resize it to be smaller
|
352 |
face_img = cv2.imread(face_path)
|
353 |
+
small_face = cv2.resize(face_img, (100, 100)) # Resize to 100x100 pixels
|
354 |
cv2.imwrite(output_path, small_face)
|
355 |
|
356 |
return output_path
|
357 |
return None
|
358 |
|
|
|
|
|
359 |
def process_video(video_path, num_anomalies, num_components, desired_fps, batch_size, progress=gr.Progress()):
|
360 |
+
print(f"Video path: {video_path}")
|
361 |
+
print(f"Num anomalies: {num_anomalies}")
|
362 |
+
print(f"Num components: {num_components}")
|
363 |
+
print(f"Desired FPS: {desired_fps}")
|
364 |
+
print(f"Batch size: {batch_size}")
|
365 |
+
|
366 |
+
if not os.path.exists(video_path):
|
367 |
+
raise FileNotFoundError(f"Video file not found: {video_path}")
|
368 |
+
|
369 |
output_folder = "output"
|
370 |
os.makedirs(output_folder, exist_ok=True)
|
371 |
|
|
|
406 |
try:
|
407 |
anomalies_all, anomaly_scores_all, top_indices_all, anomalies_comp, anomaly_scores_comp, top_indices_comp, _ = lstm_anomaly_detection(X, feature_columns, num_anomalies=num_anomalies, batch_size=batch_size)
|
408 |
except Exception as e:
|
409 |
+
print(f"Error in anomaly detection: {str(e)}")
|
410 |
print(f"X shape: {X.shape}")
|
411 |
print(f"X dtype: {X.dtype}")
|
412 |
return f"Error in anomaly detection: {str(e)}", None, None, None, None, None, None
|
|
|
426 |
except Exception as e:
|
427 |
return f"Error generating plots: {str(e)}", None, None, None, None, None, None, None, None, None
|
428 |
|
|
|
429 |
# Get a random face sample
|
430 |
face_sample = get_random_face_sample(organized_faces_folder, largest_cluster, output_folder)
|
431 |
|
|
|
443 |
results += "\n".join([f"{df[emotion].iloc[i]:.4f} at {df['Timecode'].iloc[i]}" for i in top_indices])
|
444 |
|
445 |
return results, face_sample, anomaly_plot_all, anomaly_plot_comp, *emotion_plots
|
446 |
+
|
447 |
# Gradio interface
|
448 |
iface = gr.Interface(
|
449 |
fn=process_video,
|