Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -9,11 +9,12 @@ except:
|
|
9 |
|
10 |
import mmpose
|
11 |
import gradio as gr
|
12 |
-
import cv2
|
13 |
from mmpose.apis import (inference_top_down_pose_model, init_pose_model,
|
14 |
vis_pose_result, process_mmdet_results)
|
15 |
from mmdet.apis import inference_detector, init_detector
|
16 |
from PIL import Image
|
|
|
|
|
17 |
|
18 |
pose_config = 'configs/topdown_heatmap_hrnet_w48_coco_256x192.py'
|
19 |
pose_checkpoint = 'hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth'
|
@@ -43,13 +44,38 @@ def predict(img):
|
|
43 |
pose_results,
|
44 |
dataset=pose_model.cfg.data.test.type,
|
45 |
show=False)
|
46 |
-
|
|
|
|
|
47 |
#vis_result = cv2.resize(vis_result, dsize=None, fx=0.5, fy=0.5)
|
48 |
print(f"POSE_RESULTS: {pose_results}")
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
example_list = ['examples/demo2.png']
|
55 |
title = "Pose estimation"
|
@@ -59,7 +85,7 @@ article = ""
|
|
59 |
# Create the Gradio demo
|
60 |
demo = gr.Interface(fn=predict,
|
61 |
inputs=gr.Image(),
|
62 |
-
outputs=[gr.Image(label='Prediction')],
|
63 |
examples=example_list,
|
64 |
title=title,
|
65 |
description=description,
|
|
|
9 |
|
10 |
import mmpose
|
11 |
import gradio as gr
|
|
|
12 |
from mmpose.apis import (inference_top_down_pose_model, init_pose_model,
|
13 |
vis_pose_result, process_mmdet_results)
|
14 |
from mmdet.apis import inference_detector, init_detector
|
15 |
from PIL import Image
|
16 |
+
import cv2
|
17 |
+
import numpy as np
|
18 |
|
19 |
pose_config = 'configs/topdown_heatmap_hrnet_w48_coco_256x192.py'
|
20 |
pose_checkpoint = 'hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth'
|
|
|
44 |
pose_results,
|
45 |
dataset=pose_model.cfg.data.test.type,
|
46 |
show=False)
|
47 |
+
|
48 |
+
original_image = Image.open(img)
|
49 |
+
width, height = original_image.size
|
50 |
#vis_result = cv2.resize(vis_result, dsize=None, fx=0.5, fy=0.5)
|
51 |
print(f"POSE_RESULTS: {pose_results}")
|
52 |
+
|
53 |
+
# create a black image of the same size as the original image
|
54 |
+
black_img = np.zeros((height, width, 3), np.uint8)
|
55 |
+
|
56 |
+
# iterate through each person in the POSE_RESULTS data
|
57 |
+
for person in pose_results:
|
58 |
+
# get the keypoints for this person
|
59 |
+
keypoints = person['keypoints']
|
60 |
+
|
61 |
+
# draw lines between keypoints to form a skeleton
|
62 |
+
skeleton = [(0,1), (1,2), (2,3), (3,4), (1,5), (5,6), (6,7), (1,8), (8,9), (9,10), (10,11), (8,12), (12,13), (13,14), (0,15), (15,17), (0,16), (16,18)]
|
63 |
+
for i, j in skeleton:
|
64 |
+
pt1 = (int(keypoints[i][0]), int(keypoints[i][1]))
|
65 |
+
pt2 = (int(keypoints[j][0]), int(keypoints[j][1]))
|
66 |
+
cv2.line(black_img, pt1, pt2, (255, 255, 255), thickness=2, lineType=cv2.LINE_AA)
|
67 |
+
|
68 |
+
# draw circles at each keypoint
|
69 |
+
for i in range(keypoints.shape[0]):
|
70 |
+
pt = (int(keypoints[i][0]), int(keypoints[i][1]))
|
71 |
+
cv2.circle(black_img, pt, 3, (255, 255, 255), thickness=-1, lineType=cv2.LINE_AA)
|
72 |
+
|
73 |
+
# write black_img to a jpg file
|
74 |
+
cv2.imwrite("output.jpg", black_img)
|
75 |
+
cv2.waitKey(0)
|
76 |
+
cv2.destroyAllWindows()
|
77 |
+
|
78 |
+
return vis_result, "output.jpg"
|
79 |
|
80 |
example_list = ['examples/demo2.png']
|
81 |
title = "Pose estimation"
|
|
|
85 |
# Create the Gradio demo
|
86 |
demo = gr.Interface(fn=predict,
|
87 |
inputs=gr.Image(),
|
88 |
+
outputs=[gr.Image(label='Prediction'), gr.Image(label='Poses')],
|
89 |
examples=example_list,
|
90 |
title=title,
|
91 |
description=description,
|