reab5555 commited on
Commit
edb037a
·
verified ·
1 Parent(s): 556ecd8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -7
app.py CHANGED
@@ -75,7 +75,10 @@ def on_button_click(video, threshold, fps):
75
  face_samples_most_frequent: results[13],
76
  heatmap_video: results[14],
77
  combined_mse_plot: results[15],
78
- correlation_heatmap_plot: results[16]
 
 
 
79
  }
80
 
81
  with gr.Blocks() as iface:
@@ -86,8 +89,7 @@ with gr.Blocks() as iface:
86
  It extracts faces, postures, and voice from video frames, and analyzes them to identify anomalies using time series analysis and a variational autoencoder (VAE) approach.
87
  """)
88
 
89
- with gr.Row():
90
- video_input = gr.Video()
91
 
92
  anomaly_threshold = gr.Slider(minimum=1, maximum=5, step=0.1, value=3, label="Anomaly Detection Threshold (Standard deviation)")
93
  fps_slider = gr.Slider(minimum=5, maximum=20, step=1, value=10, label="Frames Per Second (FPS)")
@@ -174,8 +176,7 @@ with gr.Blocks() as iface:
174
  ### High-Speed Cameras
175
 
176
  Effective capture of micro-expressions generally requires frame rates above 100 fps. High-speed video systems designed for micro-expression detection often operate at 118 fps or higher, with some systems reaching up to 200 fps.
177
- caution.
178
-
179
  ## Limitations
180
 
181
  - **Evaluation Challenges**: Since this is an unsupervised method, there is no labeled data to compare against. This makes it difficult to quantitatively evaluate the accuracy or effectiveness of the anomaly detection.
@@ -190,12 +191,13 @@ with gr.Blocks() as iface:
190
 
191
  ## Conclusion
192
  This tool offers solutions for detecting emotional, posture, and vocal anomalies in video-based facial expressions, body language, and speech, beneficial for both forensic analysis and HUMINT operations. However, users should be aware of its limitations and the challenges inherent in unsupervised anomaly detection methodologies. By leveraging advanced computer vision techniques and the power of autoencoders, it provides crucial insights into human behavior in a timely manner, but results should be interpreted with caution and, where possible, supplemented with additional context and expert analysis.
193
-
194
  """)
195
 
196
  with gr.TabItem("Results", id="results_tab", visible=False) as results_tab:
197
  with gr.Tabs():
198
  with gr.TabItem("Facial Features"):
 
199
  results_text = gr.TextArea(label="Faces Breakdown", lines=5)
200
  mse_features_plot = gr.Plot(label="MSE: Facial Features")
201
  mse_features_hist = gr.Plot(label="MSE Distribution: Facial Features")
@@ -204,12 +206,14 @@ with gr.Blocks() as iface:
204
  face_samples_most_frequent = gr.Gallery(label="Most Frequent Person Samples", columns=10, rows=2, height="auto")
205
 
206
  with gr.TabItem("Body Posture"):
 
207
  mse_posture_plot = gr.Plot(label="MSE: Body Posture")
208
  mse_posture_hist = gr.Plot(label="MSE Distribution: Body Posture")
209
  mse_posture_heatmap = gr.Plot(label="MSE Heatmap: Body Posture")
210
  anomaly_frames_posture = gr.Gallery(label="Anomaly Frames (Body Posture)", columns=6, rows=2, height="auto")
211
 
212
  with gr.TabItem("Voice"):
 
213
  mse_voice_plot = gr.Plot(label="MSE: Voice")
214
  mse_voice_hist = gr.Plot(label="MSE Distribution: Voice")
215
  mse_voice_heatmap = gr.Plot(label="MSE Heatmap: Voice")
@@ -229,7 +233,7 @@ with gr.Blocks() as iface:
229
  mse_features_heatmap, mse_posture_heatmap, mse_voice_heatmap,
230
  anomaly_frames_features, anomaly_frames_posture,
231
  face_samples_most_frequent, heatmap_video, combined_mse_plot,
232
- correlation_heatmap_plot
233
  ]
234
  )
235
 
 
75
  face_samples_most_frequent: results[13],
76
  heatmap_video: results[14],
77
  combined_mse_plot: results[15],
78
+ correlation_heatmap_plot: results[16],
79
+ video_display_facial: video,
80
+ video_display_body: video,
81
+ video_display_voice: video
82
  }
83
 
84
  with gr.Blocks() as iface:
 
89
  It extracts faces, postures, and voice from video frames, and analyzes them to identify anomalies using time series analysis and a variational autoencoder (VAE) approach.
90
  """)
91
 
92
+ video_input = gr.Video(label="Input Video", visible=True)
 
93
 
94
  anomaly_threshold = gr.Slider(minimum=1, maximum=5, step=0.1, value=3, label="Anomaly Detection Threshold (Standard deviation)")
95
  fps_slider = gr.Slider(minimum=5, maximum=20, step=1, value=10, label="Frames Per Second (FPS)")
 
176
  ### High-Speed Cameras
177
 
178
  Effective capture of micro-expressions generally requires frame rates above 100 fps. High-speed video systems designed for micro-expression detection often operate at 118 fps or higher, with some systems reaching up to 200 fps.
179
+
 
180
  ## Limitations
181
 
182
  - **Evaluation Challenges**: Since this is an unsupervised method, there is no labeled data to compare against. This makes it difficult to quantitatively evaluate the accuracy or effectiveness of the anomaly detection.
 
191
 
192
  ## Conclusion
193
  This tool offers solutions for detecting emotional, posture, and vocal anomalies in video-based facial expressions, body language, and speech, beneficial for both forensic analysis and HUMINT operations. However, users should be aware of its limitations and the challenges inherent in unsupervised anomaly detection methodologies. By leveraging advanced computer vision techniques and the power of autoencoders, it provides crucial insights into human behavior in a timely manner, but results should be interpreted with caution and, where possible, supplemented with additional context and expert analysis.
194
+ h caution.
195
  """)
196
 
197
  with gr.TabItem("Results", id="results_tab", visible=False) as results_tab:
198
  with gr.Tabs():
199
  with gr.TabItem("Facial Features"):
200
+ video_display_facial = gr.Video(label="Input Video")
201
  results_text = gr.TextArea(label="Faces Breakdown", lines=5)
202
  mse_features_plot = gr.Plot(label="MSE: Facial Features")
203
  mse_features_hist = gr.Plot(label="MSE Distribution: Facial Features")
 
206
  face_samples_most_frequent = gr.Gallery(label="Most Frequent Person Samples", columns=10, rows=2, height="auto")
207
 
208
  with gr.TabItem("Body Posture"):
209
+ video_display_body = gr.Video(label="Input Video")
210
  mse_posture_plot = gr.Plot(label="MSE: Body Posture")
211
  mse_posture_hist = gr.Plot(label="MSE Distribution: Body Posture")
212
  mse_posture_heatmap = gr.Plot(label="MSE Heatmap: Body Posture")
213
  anomaly_frames_posture = gr.Gallery(label="Anomaly Frames (Body Posture)", columns=6, rows=2, height="auto")
214
 
215
  with gr.TabItem("Voice"):
216
+ video_display_voice = gr.Video(label="Input Video")
217
  mse_voice_plot = gr.Plot(label="MSE: Voice")
218
  mse_voice_hist = gr.Plot(label="MSE Distribution: Voice")
219
  mse_voice_heatmap = gr.Plot(label="MSE Heatmap: Voice")
 
233
  mse_features_heatmap, mse_posture_heatmap, mse_voice_heatmap,
234
  anomaly_frames_features, anomaly_frames_posture,
235
  face_samples_most_frequent, heatmap_video, combined_mse_plot,
236
+ correlation_heatmap_plot, video_display_facial, video_display_body, video_display_voice
237
  ]
238
  )
239