Spaces:
Runtime error
Runtime error
Remove video and add examples and handedness results
Browse files- README.md +1 -1
- app.py +29 -44
- examples/example-01.jpg +0 -0
- examples/example-02.jpg +0 -0
- examples/example-03.jpg +0 -0
- requirements.txt +1 -1
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 🙌
|
|
| 4 |
colorFrom: pink
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 3.0.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
|
|
|
| 4 |
colorFrom: pink
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 3.0.26
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
app.py
CHANGED
|
@@ -1,12 +1,10 @@
|
|
| 1 |
-
from os import stat
|
| 2 |
import gradio as gr
|
| 3 |
-
from matplotlib.pyplot import draw
|
| 4 |
import mediapipe as mp
|
| 5 |
import numpy as np
|
| 6 |
-
import tempfile
|
| 7 |
-
import mediapy as media
|
| 8 |
import log_utils
|
| 9 |
from functools import lru_cache
|
|
|
|
|
|
|
| 10 |
|
| 11 |
logger = log_utils.get_logger()
|
| 12 |
|
|
@@ -34,13 +32,16 @@ def get_model(static_image_mode, max_num_hands, model_complexity, min_detection_
|
|
| 34 |
min_tracking_confidence=min_tracking_conf,
|
| 35 |
)
|
| 36 |
|
| 37 |
-
def draw_landmarks(model, img, selected_connections, draw_background):
|
| 38 |
-
|
| 39 |
-
|
|
|
|
| 40 |
if results.multi_hand_landmarks:
|
| 41 |
for hand_landmarks in results.multi_hand_landmarks:
|
| 42 |
-
mp_draw.draw_landmarks(output_img, hand_landmarks, connections[selected_connections])
|
| 43 |
-
|
|
|
|
|
|
|
| 44 |
|
| 45 |
def process_image(
|
| 46 |
img,
|
|
@@ -51,31 +52,14 @@ def process_image(
|
|
| 51 |
min_tracking_conf,
|
| 52 |
selected_connections,
|
| 53 |
draw_background,
|
|
|
|
| 54 |
):
|
| 55 |
logger.info(f"Processing image with connections: {selected_connections}, draw background: {draw_background}")
|
| 56 |
model = get_model(static_image_mode, max_num_hands, model_complexity, min_detection_conf, min_tracking_conf)
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
static_image_mode,
|
| 62 |
-
max_num_hands,
|
| 63 |
-
model_complexity,
|
| 64 |
-
min_detection_conf,
|
| 65 |
-
min_tracking_conf,
|
| 66 |
-
selected_connections,
|
| 67 |
-
draw_background,
|
| 68 |
-
):
|
| 69 |
-
logger.info(f"Processing video with connections: {selected_connections}, draw background: {draw_background}")
|
| 70 |
-
model = get_model(static_image_mode, max_num_hands, model_complexity, min_detection_conf, min_tracking_conf)
|
| 71 |
-
with tempfile.NamedTemporaryFile() as f:
|
| 72 |
-
out_path = f"{f.name}.{video_path.split('.')[-1]}"
|
| 73 |
-
with media.VideoReader(video_path) as r:
|
| 74 |
-
with media.VideoWriter(
|
| 75 |
-
out_path, shape=r.shape, fps=r.fps, bps=r.bps) as w:
|
| 76 |
-
for image in r:
|
| 77 |
-
w.add_image(draw_landmarks(model, image, selected_connections, draw_background))
|
| 78 |
-
return out_path
|
| 79 |
|
| 80 |
|
| 81 |
demo = gr.Blocks()
|
|
@@ -113,6 +97,7 @@ with demo:
|
|
| 113 |
## Step 2: Set processing parameters
|
| 114 |
""")
|
| 115 |
draw_background = gr.Checkbox(value=True, label="Draw background?")
|
|
|
|
| 116 |
connection_keys = list(connections.keys())
|
| 117 |
selected_connections = gr.Dropdown(
|
| 118 |
label="Select connections to draw",
|
|
@@ -121,27 +106,27 @@ with demo:
|
|
| 121 |
)
|
| 122 |
|
| 123 |
gr.Markdown("""
|
| 124 |
-
## Step 3: Select an image
|
| 125 |
""")
|
| 126 |
with gr.Tabs():
|
| 127 |
with gr.TabItem(label="Upload an image"):
|
| 128 |
uploaded_image = gr.Image(type="numpy")
|
|
|
|
| 129 |
submit_uploaded_image = gr.Button(value="Process Image")
|
| 130 |
with gr.TabItem(label="Take a picture"):
|
| 131 |
camera_picture = gr.Image(source="webcam", type="numpy")
|
| 132 |
submit_camera_picture = gr.Button(value="Process Image")
|
| 133 |
-
with gr.TabItem(label="Record a video"):
|
| 134 |
-
recorded_video = gr.Video(source="webcam", format="mp4")
|
| 135 |
-
submit_recorded_video = gr.Button(value="Process Video")
|
| 136 |
-
with gr.TabItem(label="Upload a video"):
|
| 137 |
-
uploaded_video = gr.Video(format="mp4")
|
| 138 |
-
submit_uploaded_video = gr.Button(value="Process Video")
|
| 139 |
|
| 140 |
gr.Markdown("""
|
| 141 |
-
## Step 4: View results
|
| 142 |
""")
|
| 143 |
with gr.Column():
|
| 144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
processed_image = gr.Image()
|
| 146 |
|
| 147 |
gr.Markdown('<img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=kristyc.mediapipe-hands" />')
|
|
@@ -153,10 +138,10 @@ with demo:
|
|
| 153 |
min_tracking_conf,
|
| 154 |
selected_connections,
|
| 155 |
draw_background,
|
|
|
|
| 156 |
]
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
submit_uploaded_video.click(fn=process_video, inputs=[recorded_video, *setting_inputs], outputs=[processed_video])
|
| 161 |
|
| 162 |
demo.launch()
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
import mediapipe as mp
|
| 3 |
import numpy as np
|
|
|
|
|
|
|
| 4 |
import log_utils
|
| 5 |
from functools import lru_cache
|
| 6 |
+
import cv2
|
| 7 |
+
from google.protobuf.json_format import MessageToDict
|
| 8 |
|
| 9 |
logger = log_utils.get_logger()
|
| 10 |
|
|
|
|
| 32 |
min_tracking_confidence=min_tracking_conf,
|
| 33 |
)
|
| 34 |
|
| 35 |
+
def draw_landmarks(model, img, selected_connections, draw_background, flip_image):
|
| 36 |
+
img_to_process = cv2.flip(img, 1) if flip_image else img
|
| 37 |
+
results = model.process(img_to_process)
|
| 38 |
+
output_img = img_to_process if draw_background else np.zeros_like(img_to_process)
|
| 39 |
if results.multi_hand_landmarks:
|
| 40 |
for hand_landmarks in results.multi_hand_landmarks:
|
| 41 |
+
mp_draw.draw_landmarks(output_img, hand_landmarks, connections[selected_connections])
|
| 42 |
+
if flip_image:
|
| 43 |
+
output_img = cv2.flip(output_img, 1)
|
| 44 |
+
return output_img, [MessageToDict(h) for _, h in enumerate(results.multi_handedness or [])]
|
| 45 |
|
| 46 |
def process_image(
|
| 47 |
img,
|
|
|
|
| 52 |
min_tracking_conf,
|
| 53 |
selected_connections,
|
| 54 |
draw_background,
|
| 55 |
+
flip_image,
|
| 56 |
):
|
| 57 |
logger.info(f"Processing image with connections: {selected_connections}, draw background: {draw_background}")
|
| 58 |
model = get_model(static_image_mode, max_num_hands, model_complexity, min_detection_conf, min_tracking_conf)
|
| 59 |
+
img, multi_handedness = draw_landmarks(model, img, selected_connections, draw_background, flip_image)
|
| 60 |
+
left_hand_count = len([h for h in multi_handedness if h['classification'][0]['label'] == 'Left'])
|
| 61 |
+
right_hand_count = len(multi_handedness) - left_hand_count
|
| 62 |
+
return img, multi_handedness, left_hand_count, right_hand_count
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
|
| 65 |
demo = gr.Blocks()
|
|
|
|
| 97 |
## Step 2: Set processing parameters
|
| 98 |
""")
|
| 99 |
draw_background = gr.Checkbox(value=True, label="Draw background?")
|
| 100 |
+
flip_image = gr.Checkbox(value=True, label="Flip image? (Note that handedness is determined assuming the input image is mirrored, i.e., taken with a front-facing/selfie camera with images flipped horizontally. If it is not the case, please swap the handedness output in the application.)")
|
| 101 |
connection_keys = list(connections.keys())
|
| 102 |
selected_connections = gr.Dropdown(
|
| 103 |
label="Select connections to draw",
|
|
|
|
| 106 |
)
|
| 107 |
|
| 108 |
gr.Markdown("""
|
| 109 |
+
## Step 3: Select an image
|
| 110 |
""")
|
| 111 |
with gr.Tabs():
|
| 112 |
with gr.TabItem(label="Upload an image"):
|
| 113 |
uploaded_image = gr.Image(type="numpy")
|
| 114 |
+
example_image = gr.Examples(examples=[['examples/example-01.jpg'], ['examples/example-02.jpg'], ['examples/example-03.jpg']], inputs=[uploaded_image])
|
| 115 |
submit_uploaded_image = gr.Button(value="Process Image")
|
| 116 |
with gr.TabItem(label="Take a picture"):
|
| 117 |
camera_picture = gr.Image(source="webcam", type="numpy")
|
| 118 |
submit_camera_picture = gr.Button(value="Process Image")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
|
| 120 |
gr.Markdown("""
|
| 121 |
+
## Step 4: View results
|
| 122 |
""")
|
| 123 |
with gr.Column():
|
| 124 |
+
with gr.Row():
|
| 125 |
+
with gr.Column():
|
| 126 |
+
left_hands = gr.Number(label="Left hands detected")
|
| 127 |
+
with gr.Column():
|
| 128 |
+
right_hands = gr.Number(label="Right hands detected")
|
| 129 |
+
multi_handedness = gr.JSON(label="Raw results")
|
| 130 |
processed_image = gr.Image()
|
| 131 |
|
| 132 |
gr.Markdown('<img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=kristyc.mediapipe-hands" />')
|
|
|
|
| 138 |
min_tracking_conf,
|
| 139 |
selected_connections,
|
| 140 |
draw_background,
|
| 141 |
+
flip_image,
|
| 142 |
]
|
| 143 |
+
outputs = [processed_image, multi_handedness, left_hands, right_hands]
|
| 144 |
+
submit_uploaded_image.click(fn=process_image, inputs=[uploaded_image, *setting_inputs], outputs=outputs)
|
| 145 |
+
submit_camera_picture.click(fn=process_image, inputs=[camera_picture, *setting_inputs], outputs=outputs)
|
|
|
|
| 146 |
|
| 147 |
demo.launch()
|
examples/example-01.jpg
ADDED
|
examples/example-02.jpg
ADDED
|
examples/example-03.jpg
ADDED
|
requirements.txt
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
mediapipe==0.8.10.1
|
| 2 |
-
gradio==3.0.
|
| 3 |
opencv-contrib-python==4.6.0.66
|
| 4 |
mediapy==1.0.3
|
|
|
|
| 1 |
mediapipe==0.8.10.1
|
| 2 |
+
gradio==3.0.26
|
| 3 |
opencv-contrib-python==4.6.0.66
|
| 4 |
mediapy==1.0.3
|