David Driscoll commited on
Commit
8947b35
·
1 Parent(s): 2553966

Output restoration

Browse files
Files changed (1) hide show
  1. app.py +23 -23
app.py CHANGED
@@ -10,7 +10,7 @@ from fer import FER # Facial emotion recognition
10
  from concurrent.futures import ThreadPoolExecutor
11
 
12
  # -----------------------------
13
- # Global Asynchronous Executor & Caches
14
  # -----------------------------
15
  executor = ThreadPoolExecutor(max_workers=4)
16
  latest_results = {
@@ -27,22 +27,24 @@ futures = {
27
  }
28
 
29
  def async_analyze(key, func, image):
30
- """
31
- Runs the heavy detection function 'func' in a background thread.
32
- Returns the last computed result (if available) so that the output
33
- FPS remains high even if the detection lags.
34
- """
35
- if futures[key] is None or futures[key].done():
36
- futures[key] = executor.submit(func, image)
37
- if futures[key].done():
38
  latest_results[key] = futures[key].result()
39
- # Return latest result if available; otherwise, compute synchronously
40
- return latest_results.get(key, func(image))
 
 
 
 
 
 
 
 
 
41
 
42
  # -----------------------------
43
  # Initialize Models and Helpers
44
  # -----------------------------
45
-
46
  # MediaPipe Pose for posture analysis
47
  mp_pose = mp.solutions.pose
48
  pose = mp_pose.Pose()
@@ -65,9 +67,7 @@ emotion_detector = FER(mtcnn=True)
65
  # -----------------------------
66
  # Heavy (Synchronous) Analysis Functions
67
  # -----------------------------
68
-
69
  def _analyze_posture(image):
70
- # Convert from PIL (RGB) to OpenCV BGR format
71
  frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
72
  output_frame = frame.copy()
73
  frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
@@ -132,18 +132,18 @@ def _analyze_faces(image):
132
  return annotated_image, f"Face Detection: {face_result}"
133
 
134
  # -----------------------------
135
- # Asynchronous (Fast) Analysis Functions
136
  # -----------------------------
137
- def analyze_posture(image):
138
  return async_analyze("posture", _analyze_posture, image)
139
 
140
- def analyze_emotion(image):
141
  return async_analyze("emotion", _analyze_emotion, image)
142
 
143
- def analyze_objects(image):
144
  return async_analyze("objects", _analyze_objects, image)
145
 
146
- def analyze_faces(image):
147
  return async_analyze("faces", _analyze_faces, image)
148
 
149
  # -----------------------------
@@ -183,7 +183,7 @@ body {
183
  # Create Individual Interfaces for Each Analysis
184
  # -----------------------------
185
  posture_interface = gr.Interface(
186
- fn=analyze_posture,
187
  inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Posture"),
188
  outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
189
  title="Posture Analysis",
@@ -192,7 +192,7 @@ posture_interface = gr.Interface(
192
  )
193
 
194
  emotion_interface = gr.Interface(
195
- fn=analyze_emotion,
196
  inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Face"),
197
  outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
198
  title="Emotion Analysis",
@@ -201,7 +201,7 @@ emotion_interface = gr.Interface(
201
  )
202
 
203
  objects_interface = gr.Interface(
204
- fn=analyze_objects,
205
  inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture the Scene"),
206
  outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
207
  title="Object Detection",
@@ -210,7 +210,7 @@ objects_interface = gr.Interface(
210
  )
211
 
212
  faces_interface = gr.Interface(
213
- fn=analyze_faces,
214
  inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Face"),
215
  outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
216
  title="Face Detection",
 
10
  from concurrent.futures import ThreadPoolExecutor
11
 
12
  # -----------------------------
13
+ # Asynchronous Processing Setup
14
  # -----------------------------
15
  executor = ThreadPoolExecutor(max_workers=4)
16
  latest_results = {
 
27
  }
28
 
29
  def async_analyze(key, func, image):
30
+ # If a background task is done, update our cache.
31
+ if futures[key] is not None and futures[key].done():
 
 
 
 
 
 
32
  latest_results[key] = futures[key].result()
33
+ futures[key] = None
34
+ # If we already have a cached result, return it immediately and schedule a new update if none is running.
35
+ if latest_results[key] is not None:
36
+ if futures[key] is None:
37
+ futures[key] = executor.submit(func, image)
38
+ return latest_results[key]
39
+ # Otherwise, compute synchronously (blocking) to initialize the cache.
40
+ result = func(image)
41
+ latest_results[key] = result
42
+ futures[key] = executor.submit(func, image)
43
+ return result
44
 
45
  # -----------------------------
46
  # Initialize Models and Helpers
47
  # -----------------------------
 
48
  # MediaPipe Pose for posture analysis
49
  mp_pose = mp.solutions.pose
50
  pose = mp_pose.Pose()
 
67
  # -----------------------------
68
  # Heavy (Synchronous) Analysis Functions
69
  # -----------------------------
 
70
  def _analyze_posture(image):
 
71
  frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
72
  output_frame = frame.copy()
73
  frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
 
132
  return annotated_image, f"Face Detection: {face_result}"
133
 
134
  # -----------------------------
135
+ # Asynchronous Wrappers for Each Analysis
136
  # -----------------------------
137
+ def analyze_posture_async(image):
138
  return async_analyze("posture", _analyze_posture, image)
139
 
140
+ def analyze_emotion_async(image):
141
  return async_analyze("emotion", _analyze_emotion, image)
142
 
143
+ def analyze_objects_async(image):
144
  return async_analyze("objects", _analyze_objects, image)
145
 
146
+ def analyze_faces_async(image):
147
  return async_analyze("faces", _analyze_faces, image)
148
 
149
  # -----------------------------
 
183
  # Create Individual Interfaces for Each Analysis
184
  # -----------------------------
185
  posture_interface = gr.Interface(
186
+ fn=analyze_posture_async,
187
  inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Posture"),
188
  outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
189
  title="Posture Analysis",
 
192
  )
193
 
194
  emotion_interface = gr.Interface(
195
+ fn=analyze_emotion_async,
196
  inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Face"),
197
  outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
198
  title="Emotion Analysis",
 
201
  )
202
 
203
  objects_interface = gr.Interface(
204
+ fn=analyze_objects_async,
205
  inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture the Scene"),
206
  outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
207
  title="Object Detection",
 
210
  )
211
 
212
  faces_interface = gr.Interface(
213
+ fn=analyze_faces_async,
214
  inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Face"),
215
  outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
216
  title="Face Detection",