Spaces:
Running
Running
narugo1992
commited on
Commit
·
f240626
1
Parent(s):
ae08d38
dev(narugo): add hand detection
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ import gradio as gr
|
|
4 |
|
5 |
from censor import _CENSOR_MODELS, _DEFAULT_CENSOR_MODEL, _gr_detect_censors
|
6 |
from face import _FACE_MODELS, _DEFAULT_FACE_MODEL, _gr_detect_faces
|
|
|
7 |
from head import _gr_detect_heads, _HEAD_MODELS, _DEFAULT_HEAD_MODEL
|
8 |
from manbits import _MANBIT_MODELS, _DEFAULT_MANBIT_MODEL, _gr_detect_manbits
|
9 |
from person import _PERSON_MODELS, _DEFAULT_PERSON_MODEL, _gr_detect_person
|
@@ -83,6 +84,30 @@ if __name__ == '__main__':
|
|
83 |
outputs=[gr_person_output_image],
|
84 |
)
|
85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
with gr.Tab('Censor Point Detection'):
|
87 |
with gr.Row():
|
88 |
with gr.Column():
|
@@ -107,7 +132,7 @@ if __name__ == '__main__':
|
|
107 |
outputs=[gr_censor_output_image],
|
108 |
)
|
109 |
|
110 |
-
with gr.Tab('Manbits Detection'):
|
111 |
with gr.Row():
|
112 |
with gr.Column():
|
113 |
gr_manbit_input_image = gr.Image(type='pil', label='Original Image')
|
|
|
4 |
|
5 |
from censor import _CENSOR_MODELS, _DEFAULT_CENSOR_MODEL, _gr_detect_censors
|
6 |
from face import _FACE_MODELS, _DEFAULT_FACE_MODEL, _gr_detect_faces
|
7 |
+
from hand import _gr_detect_hands, _HAND_MODELS, _DEFAULT_HAND_MODEL
|
8 |
from head import _gr_detect_heads, _HEAD_MODELS, _DEFAULT_HEAD_MODEL
|
9 |
from manbits import _MANBIT_MODELS, _DEFAULT_MANBIT_MODEL, _gr_detect_manbits
|
10 |
from person import _PERSON_MODELS, _DEFAULT_PERSON_MODEL, _gr_detect_person
|
|
|
84 |
outputs=[gr_person_output_image],
|
85 |
)
|
86 |
|
87 |
+
with gr.Tab('Hand Detection'):
|
88 |
+
with gr.Row():
|
89 |
+
with gr.Column():
|
90 |
+
gr_hand_input_image = gr.Image(type='pil', label='Original Image')
|
91 |
+
gr_hand_model = gr.Dropdown(_HAND_MODELS, value=_DEFAULT_HAND_MODEL, label='Model')
|
92 |
+
gr_hand_infer_size = gr.Slider(480, 960, value=640, step=32, label='Max Infer Size')
|
93 |
+
with gr.Row():
|
94 |
+
gr_hand_iou_threshold = gr.Slider(0.0, 1.0, 0.7, label='IOU Threshold')
|
95 |
+
gr_hand_score_threshold = gr.Slider(0.0, 1.0, 0.35, label='Score Threshold')
|
96 |
+
|
97 |
+
gr_hand_submit = gr.Button(value='Submit', variant='primary')
|
98 |
+
|
99 |
+
with gr.Column():
|
100 |
+
gr_hand_output_image = gr.Image(type='pil', label="Labeled")
|
101 |
+
|
102 |
+
gr_hand_submit.click(
|
103 |
+
_gr_detect_hands,
|
104 |
+
inputs=[
|
105 |
+
gr_hand_input_image, gr_hand_model,
|
106 |
+
gr_hand_infer_size, gr_hand_score_threshold, gr_hand_iou_threshold,
|
107 |
+
],
|
108 |
+
outputs=[gr_hand_output_image],
|
109 |
+
)
|
110 |
+
|
111 |
with gr.Tab('Censor Point Detection'):
|
112 |
with gr.Row():
|
113 |
with gr.Column():
|
|
|
132 |
outputs=[gr_censor_output_image],
|
133 |
)
|
134 |
|
135 |
+
with gr.Tab('Manbits Detection\n(Deprecated)'):
|
136 |
with gr.Row():
|
137 |
with gr.Column():
|
138 |
gr_manbit_input_image = gr.Image(type='pil', label='Original Image')
|
hand.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import lru_cache
|
2 |
+
from typing import List, Tuple
|
3 |
+
|
4 |
+
from huggingface_hub import hf_hub_download
|
5 |
+
from imgutils.data import ImageTyping, load_image, rgb_encode
|
6 |
+
|
7 |
+
from onnx_ import _open_onnx_model
|
8 |
+
from plot import detection_visualize
|
9 |
+
from yolo_ import _image_preprocess, _data_postprocess
|
10 |
+
|
11 |
+
_HAND_MODELS = [
|
12 |
+
'hand_detect_v0.6_s',
|
13 |
+
'hand_detect_v0.5_s',
|
14 |
+
'hand_detect_v0.4_s',
|
15 |
+
'hand_detect_v0.3_s',
|
16 |
+
'hand_detect_v0.2_s',
|
17 |
+
'hand_detect_v0.1_s',
|
18 |
+
'hand_detect_v0.1_n',
|
19 |
+
]
|
20 |
+
_DEFAULT_HAND_MODEL = _HAND_MODELS[0]
|
21 |
+
|
22 |
+
|
23 |
+
@lru_cache()
|
24 |
+
def _open_hand_detect_model(model_name):
|
25 |
+
return _open_onnx_model(hf_hub_download(
|
26 |
+
f'deepghs/anime_hand_detection',
|
27 |
+
f'{model_name}/model.onnx'
|
28 |
+
))
|
29 |
+
|
30 |
+
|
31 |
+
_LABELS = ['hand']
|
32 |
+
|
33 |
+
|
34 |
+
def detect_hands(image: ImageTyping, model_name: str, max_infer_size=640,
|
35 |
+
conf_threshold: float = 0.35, iou_threshold: float = 0.7) \
|
36 |
+
-> List[Tuple[Tuple[int, int, int, int], str, float]]:
|
37 |
+
image = load_image(image, mode='RGB')
|
38 |
+
new_image, old_size, new_size = _image_preprocess(image, max_infer_size)
|
39 |
+
|
40 |
+
data = rgb_encode(new_image)[None, ...]
|
41 |
+
output, = _open_hand_detect_model(model_name).run(['output0'], {'images': data})
|
42 |
+
return _data_postprocess(output[0], conf_threshold, iou_threshold, old_size, new_size, _LABELS)
|
43 |
+
|
44 |
+
|
45 |
+
def _gr_detect_hands(image: ImageTyping, model_name: str, max_infer_size=640,
|
46 |
+
conf_threshold: float = 0.35, iou_threshold: float = 0.7):
|
47 |
+
ret = detect_hands(image, model_name, max_infer_size, conf_threshold, iou_threshold)
|
48 |
+
return detection_visualize(image, ret, _LABELS)
|