Spaces:
Paused
Paused
Delete pose
Browse files- pose/0 +0 -1
- pose/__init__.py +0 -2
- pose/pose_estimator.py +0 -280
- pose/pose_transfer.py +0 -118
- pose/pose_utils.py +0 -29
pose/0
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
u
|
|
|
|
|
|
pose/__init__.py
DELETED
|
@@ -1,2 +0,0 @@
|
|
| 1 |
-
from .pose_estimator import PoseEstimator
|
| 2 |
-
from .pose_transfer import PoseTransfer
|
|
|
|
|
|
|
|
|
pose/pose_estimator.py
DELETED
|
@@ -1,280 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
|
| 3 |
-
import cv2
|
| 4 |
-
from scipy.ndimage.filters import gaussian_filter
|
| 5 |
-
|
| 6 |
-
from .pose_utils import _get_keypoints, _pad_image
|
| 7 |
-
from insightface import model_zoo
|
| 8 |
-
from dofaker.utils import download_file, get_model_url
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class PoseEstimator:
|
| 12 |
-
|
| 13 |
-
def __init__(self, name='openpose_body', root='weights/models'):
|
| 14 |
-
_, model_file = download_file(get_model_url(name),
|
| 15 |
-
save_dir=root,
|
| 16 |
-
overwrite=False)
|
| 17 |
-
providers = model_zoo.model_zoo.get_default_providers()
|
| 18 |
-
self.session = model_zoo.model_zoo.PickableInferenceSession(
|
| 19 |
-
model_file, providers=providers)
|
| 20 |
-
|
| 21 |
-
self.input_mean = 127.5
|
| 22 |
-
self.input_std = 255.0
|
| 23 |
-
inputs = self.session.get_inputs()
|
| 24 |
-
self.input_names = []
|
| 25 |
-
for inp in inputs:
|
| 26 |
-
self.input_names.append(inp.name)
|
| 27 |
-
outputs = self.session.get_outputs()
|
| 28 |
-
output_names = []
|
| 29 |
-
for out in outputs:
|
| 30 |
-
output_names.append(out.name)
|
| 31 |
-
self.output_names = output_names
|
| 32 |
-
assert len(
|
| 33 |
-
self.output_names
|
| 34 |
-
) == 2, "The output number of PoseEstimator model should be 2, but got {}, please check your model.".format(
|
| 35 |
-
len(self.output_names))
|
| 36 |
-
output_shape = outputs[0].shape
|
| 37 |
-
input_cfg = inputs[0]
|
| 38 |
-
input_shape = input_cfg.shape
|
| 39 |
-
self.input_shape = input_shape
|
| 40 |
-
print('pose estimator shape:', self.input_shape)
|
| 41 |
-
|
| 42 |
-
def forward(self, image, image_format='rgb'):
|
| 43 |
-
if isinstance(image, str):
|
| 44 |
-
image = cv2.imread(image, 1)
|
| 45 |
-
image_format = 'bgr'
|
| 46 |
-
elif isinstance(image, np.ndarray):
|
| 47 |
-
if image_format == 'bgr':
|
| 48 |
-
pass
|
| 49 |
-
elif image_format == 'rgb':
|
| 50 |
-
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 51 |
-
image_format = 'bgr'
|
| 52 |
-
else:
|
| 53 |
-
raise UserWarning(
|
| 54 |
-
"PoseEstimator not support image format {}".format(
|
| 55 |
-
image_format))
|
| 56 |
-
else:
|
| 57 |
-
raise UserWarning(
|
| 58 |
-
"PoseEstimator input must be str or np.ndarray, but got {}.".
|
| 59 |
-
format(type(image)))
|
| 60 |
-
|
| 61 |
-
scales = [0.5]
|
| 62 |
-
stride = 8
|
| 63 |
-
bboxsize = 368
|
| 64 |
-
padvalue = 128
|
| 65 |
-
thresh_1 = 0.1
|
| 66 |
-
thresh_2 = 0.05
|
| 67 |
-
|
| 68 |
-
multipliers = [scale * bboxsize / image.shape[0] for scale in scales]
|
| 69 |
-
heatmap_avg = np.zeros((image.shape[0], image.shape[1], 19))
|
| 70 |
-
paf_avg = np.zeros((image.shape[0], image.shape[1], 38))
|
| 71 |
-
|
| 72 |
-
for scale in multipliers:
|
| 73 |
-
image_scaled = cv2.resize(image, (0, 0),
|
| 74 |
-
fx=scale,
|
| 75 |
-
fy=scale,
|
| 76 |
-
interpolation=cv2.INTER_CUBIC)
|
| 77 |
-
image_padded, pads = _pad_image(image_scaled, stride, padvalue)
|
| 78 |
-
|
| 79 |
-
image_tensor = np.expand_dims(np.transpose(image_padded, (2, 0, 1)),
|
| 80 |
-
0)
|
| 81 |
-
blob = (np.float32(image_tensor) - self.input_mean) / self.input_std
|
| 82 |
-
|
| 83 |
-
pred = self.session.run(self.output_names,
|
| 84 |
-
{self.input_names[0]: blob})
|
| 85 |
-
Mconv7_stage6_L1, Mconv7_stage6_L2 = pred[0], pred[1]
|
| 86 |
-
|
| 87 |
-
heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0))
|
| 88 |
-
heatmap = cv2.resize(heatmap, (0, 0),
|
| 89 |
-
fx=stride,
|
| 90 |
-
fy=stride,
|
| 91 |
-
interpolation=cv2.INTER_CUBIC)
|
| 92 |
-
heatmap = heatmap[:image_padded.shape[0] -
|
| 93 |
-
pads[3], :image_padded.shape[1] - pads[2], :]
|
| 94 |
-
heatmap = cv2.resize(heatmap, (image.shape[1], image.shape[0]),
|
| 95 |
-
interpolation=cv2.INTER_CUBIC)
|
| 96 |
-
|
| 97 |
-
paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0))
|
| 98 |
-
paf = cv2.resize(paf, (0, 0),
|
| 99 |
-
fx=stride,
|
| 100 |
-
fy=stride,
|
| 101 |
-
interpolation=cv2.INTER_CUBIC)
|
| 102 |
-
paf = paf[:image_padded.shape[0] - pads[3], :image_padded.shape[1] -
|
| 103 |
-
pads[2], :]
|
| 104 |
-
paf = cv2.resize(paf, (image.shape[1], image.shape[0]),
|
| 105 |
-
interpolation=cv2.INTER_CUBIC)
|
| 106 |
-
|
| 107 |
-
heatmap_avg += (heatmap / len(multipliers))
|
| 108 |
-
paf_avg += (paf / len(multipliers))
|
| 109 |
-
|
| 110 |
-
all_peaks = []
|
| 111 |
-
num_peaks = 0
|
| 112 |
-
|
| 113 |
-
for part in range(18):
|
| 114 |
-
map_orig = heatmap_avg[:, :, part]
|
| 115 |
-
map_filt = gaussian_filter(map_orig, sigma=3)
|
| 116 |
-
|
| 117 |
-
map_L = np.zeros_like(map_filt)
|
| 118 |
-
map_T = np.zeros_like(map_filt)
|
| 119 |
-
map_R = np.zeros_like(map_filt)
|
| 120 |
-
map_B = np.zeros_like(map_filt)
|
| 121 |
-
map_L[1:, :] = map_filt[:-1, :]
|
| 122 |
-
map_T[:, 1:] = map_filt[:, :-1]
|
| 123 |
-
map_R[:-1, :] = map_filt[1:, :]
|
| 124 |
-
map_B[:, :-1] = map_filt[:, 1:]
|
| 125 |
-
|
| 126 |
-
peaks_binary = np.logical_and.reduce(
|
| 127 |
-
(map_filt >= map_L, map_filt >= map_T, map_filt
|
| 128 |
-
>= map_R, map_filt >= map_B, map_filt > thresh_1))
|
| 129 |
-
peaks = list(
|
| 130 |
-
zip(np.nonzero(peaks_binary)[1],
|
| 131 |
-
np.nonzero(peaks_binary)[0]))
|
| 132 |
-
peaks_ids = range(num_peaks, num_peaks + len(peaks))
|
| 133 |
-
peaks_with_scores = [
|
| 134 |
-
peak + (map_orig[peak[1], peak[0]], ) for peak in peaks
|
| 135 |
-
]
|
| 136 |
-
peaks_with_scores_and_ids = [peaks_with_scores[i] + (peaks_ids[i],) \
|
| 137 |
-
for i in range(len(peaks_ids))]
|
| 138 |
-
all_peaks.append(peaks_with_scores_and_ids)
|
| 139 |
-
num_peaks += len(peaks)
|
| 140 |
-
|
| 141 |
-
map_idx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44],
|
| 142 |
-
[19, 20], [21, 22], [23, 24], [25, 26], [27, 28], [29, 30],
|
| 143 |
-
[47, 48], [49, 50], [53, 54], [51, 52], [55, 56], [37, 38],
|
| 144 |
-
[45, 46]]
|
| 145 |
-
limbseq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9],
|
| 146 |
-
[9, 10], [10, 11], [2, 12], [12, 13], [13, 14], [2, 1],
|
| 147 |
-
[1, 15], [15, 17], [1, 16], [16, 18], [3, 17], [6, 18]]
|
| 148 |
-
|
| 149 |
-
all_connections = []
|
| 150 |
-
spl_k = []
|
| 151 |
-
mid_n = 10
|
| 152 |
-
|
| 153 |
-
for k in range(len(map_idx)):
|
| 154 |
-
score_mid = paf_avg[:, :, [x - 19 for x in map_idx[k]]]
|
| 155 |
-
candidate_A = all_peaks[limbseq[k][0] - 1]
|
| 156 |
-
candidate_B = all_peaks[limbseq[k][1] - 1]
|
| 157 |
-
n_A = len(candidate_A)
|
| 158 |
-
n_B = len(candidate_B)
|
| 159 |
-
index_A, index_B = limbseq[k]
|
| 160 |
-
if n_A != 0 and n_B != 0:
|
| 161 |
-
connection_candidates = []
|
| 162 |
-
for i in range(n_A):
|
| 163 |
-
for j in range(n_B):
|
| 164 |
-
v = np.subtract(candidate_B[j][:2], candidate_A[i][:2])
|
| 165 |
-
n = np.sqrt(v[0] * v[0] + v[1] * v[1])
|
| 166 |
-
v = np.divide(v, n)
|
| 167 |
-
|
| 168 |
-
ab = list(
|
| 169 |
-
zip(
|
| 170 |
-
np.linspace(candidate_A[i][0],
|
| 171 |
-
candidate_B[j][0],
|
| 172 |
-
num=mid_n),
|
| 173 |
-
np.linspace(candidate_A[i][1],
|
| 174 |
-
candidate_B[j][1],
|
| 175 |
-
num=mid_n)))
|
| 176 |
-
vx = np.array([
|
| 177 |
-
score_mid[int(round(ab[x][1])),
|
| 178 |
-
int(round(ab[x][0])), 0]
|
| 179 |
-
for x in range(len(ab))
|
| 180 |
-
])
|
| 181 |
-
vy = np.array([
|
| 182 |
-
score_mid[int(round(ab[x][1])),
|
| 183 |
-
int(round(ab[x][0])), 1]
|
| 184 |
-
for x in range(len(ab))
|
| 185 |
-
])
|
| 186 |
-
score_midpoints = np.multiply(vx, v[0]) + np.multiply(
|
| 187 |
-
vy, v[1])
|
| 188 |
-
score_with_dist_prior = sum(
|
| 189 |
-
score_midpoints) / len(score_midpoints) + min(
|
| 190 |
-
0.5 * image.shape[0] / n - 1, 0)
|
| 191 |
-
criterion_1 = len(
|
| 192 |
-
np.nonzero(score_midpoints > thresh_2)
|
| 193 |
-
[0]) > 0.8 * len(score_midpoints)
|
| 194 |
-
criterion_2 = score_with_dist_prior > 0
|
| 195 |
-
if criterion_1 and criterion_2:
|
| 196 |
-
connection_candidate = [
|
| 197 |
-
i, j, score_with_dist_prior,
|
| 198 |
-
score_with_dist_prior + candidate_A[i][2] +
|
| 199 |
-
candidate_B[j][2]
|
| 200 |
-
]
|
| 201 |
-
connection_candidates.append(connection_candidate)
|
| 202 |
-
connection_candidates = sorted(connection_candidates,
|
| 203 |
-
key=lambda x: x[2],
|
| 204 |
-
reverse=True)
|
| 205 |
-
connection = np.zeros((0, 5))
|
| 206 |
-
for candidate in connection_candidates:
|
| 207 |
-
i, j, s = candidate[0:3]
|
| 208 |
-
if i not in connection[:, 3] and j not in connection[:, 4]:
|
| 209 |
-
connection = np.vstack([
|
| 210 |
-
connection,
|
| 211 |
-
[candidate_A[i][3], candidate_B[j][3], s, i, j]
|
| 212 |
-
])
|
| 213 |
-
if len(connection) >= min(n_A, n_B):
|
| 214 |
-
break
|
| 215 |
-
all_connections.append(connection)
|
| 216 |
-
else:
|
| 217 |
-
spl_k.append(k)
|
| 218 |
-
all_connections.append([])
|
| 219 |
-
|
| 220 |
-
candidate = np.array(
|
| 221 |
-
[item for sublist in all_peaks for item in sublist])
|
| 222 |
-
subset = np.ones((0, 20)) * -1
|
| 223 |
-
|
| 224 |
-
for k in range(len(map_idx)):
|
| 225 |
-
if k not in spl_k:
|
| 226 |
-
part_As = all_connections[k][:, 0]
|
| 227 |
-
part_Bs = all_connections[k][:, 1]
|
| 228 |
-
index_A, index_B = np.array(limbseq[k]) - 1
|
| 229 |
-
for i in range(len(all_connections[k])):
|
| 230 |
-
found = 0
|
| 231 |
-
subset_idx = [-1, -1]
|
| 232 |
-
for j in range(len(subset)):
|
| 233 |
-
if subset[j][index_A] == part_As[i] or subset[j][
|
| 234 |
-
index_B] == part_Bs[i]:
|
| 235 |
-
subset_idx[found] = j
|
| 236 |
-
found += 1
|
| 237 |
-
if found == 1:
|
| 238 |
-
j = subset_idx[0]
|
| 239 |
-
if subset[j][index_B] != part_Bs[i]:
|
| 240 |
-
subset[j][index_B] = part_Bs[i]
|
| 241 |
-
subset[j][-1] += 1
|
| 242 |
-
subset[j][-2] += candidate[
|
| 243 |
-
part_Bs[i].astype(int),
|
| 244 |
-
2] + all_connections[k][i][2]
|
| 245 |
-
elif found == 2:
|
| 246 |
-
j1, j2 = subset_idx
|
| 247 |
-
membership = ((subset[j1] >= 0).astype(int) +
|
| 248 |
-
(subset[j2] >= 0).astype(int))[:-2]
|
| 249 |
-
if len(np.nonzero(membership == 2)[0]) == 0:
|
| 250 |
-
subset[j1][:-2] += (subset[j2][:-2] + 1)
|
| 251 |
-
subset[j1][-2:] += subset[j2][-2:]
|
| 252 |
-
subset[j1][-2] += all_connections[k][i][2]
|
| 253 |
-
subset = np.delete(subset, j2, 0)
|
| 254 |
-
else:
|
| 255 |
-
subset[j1][index_B] = part_Bs[i]
|
| 256 |
-
subset[j1][-1] += 1
|
| 257 |
-
subset[j1][-2] += candidate[
|
| 258 |
-
part_Bs[i].astype(int),
|
| 259 |
-
2] + all_connections[k][i][2]
|
| 260 |
-
elif not found and k < 17:
|
| 261 |
-
row = np.ones(20) * -1
|
| 262 |
-
row[index_A] = part_As[i]
|
| 263 |
-
row[index_B] = part_Bs[i]
|
| 264 |
-
row[-1] = 2
|
| 265 |
-
row[-2] = sum(
|
| 266 |
-
candidate[all_connections[k][i, :2].astype(int),
|
| 267 |
-
2]) + all_connections[k][i][2]
|
| 268 |
-
subset = np.vstack([subset, row])
|
| 269 |
-
|
| 270 |
-
del_idx = []
|
| 271 |
-
|
| 272 |
-
for i in range(len(subset)):
|
| 273 |
-
if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
|
| 274 |
-
del_idx.append(i)
|
| 275 |
-
subset = np.delete(subset, del_idx, axis=0)
|
| 276 |
-
|
| 277 |
-
return _get_keypoints(candidate, subset)
|
| 278 |
-
|
| 279 |
-
def get(self, image, image_format='rgb'):
|
| 280 |
-
return self.forward(image, image_format)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pose/pose_transfer.py
DELETED
|
@@ -1,118 +0,0 @@
|
|
| 1 |
-
import cv2
|
| 2 |
-
import numpy as np
|
| 3 |
-
from scipy.ndimage.filters import gaussian_filter
|
| 4 |
-
|
| 5 |
-
from .pose_utils import _get_keypoints, _pad_image
|
| 6 |
-
from insightface import model_zoo
|
| 7 |
-
from dofaker.utils import download_file, get_model_url
|
| 8 |
-
from dofaker.transforms import center_crop, pad
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class PoseTransfer:
|
| 12 |
-
|
| 13 |
-
def __init__(self,
|
| 14 |
-
name='pose_transfer',
|
| 15 |
-
root='weights/models',
|
| 16 |
-
pose_estimator=None):
|
| 17 |
-
assert pose_estimator is not None, "The pose_estimator of PoseTransfer shouldn't be None"
|
| 18 |
-
self.pose_estimator = pose_estimator
|
| 19 |
-
_, model_file = download_file(get_model_url(name),
|
| 20 |
-
save_dir=root,
|
| 21 |
-
overwrite=False)
|
| 22 |
-
providers = model_zoo.model_zoo.get_default_providers()
|
| 23 |
-
self.session = model_zoo.model_zoo.PickableInferenceSession(
|
| 24 |
-
model_file, providers=providers)
|
| 25 |
-
|
| 26 |
-
self.input_mean = 127.5
|
| 27 |
-
self.input_std = 127.5
|
| 28 |
-
inputs = self.session.get_inputs()
|
| 29 |
-
self.input_names = []
|
| 30 |
-
for inp in inputs:
|
| 31 |
-
self.input_names.append(inp.name)
|
| 32 |
-
outputs = self.session.get_outputs()
|
| 33 |
-
output_names = []
|
| 34 |
-
for out in outputs:
|
| 35 |
-
output_names.append(out.name)
|
| 36 |
-
self.output_names = output_names
|
| 37 |
-
assert len(
|
| 38 |
-
self.output_names
|
| 39 |
-
) == 1, "The output number of PoseTransfer model should be 1, but got {}, please check your model.".format(
|
| 40 |
-
len(self.output_names))
|
| 41 |
-
output_shape = outputs[0].shape
|
| 42 |
-
input_cfg = inputs[0]
|
| 43 |
-
input_shape = input_cfg.shape
|
| 44 |
-
self.input_shape = input_shape
|
| 45 |
-
print('pose transfer shape:', self.input_shape)
|
| 46 |
-
|
| 47 |
-
def forward(self, source_image, target_image, image_format='rgb'):
|
| 48 |
-
h, w, c = source_image.shape
|
| 49 |
-
if image_format == 'rgb':
|
| 50 |
-
pass
|
| 51 |
-
elif image_format == 'bgr':
|
| 52 |
-
source_image = cv2.cvtColor(source_image, cv2.COLOR_BGR2RGB)
|
| 53 |
-
target_image = cv2.cvtColor(target_image, cv2.COLOR_BGR2RGB)
|
| 54 |
-
image_format = 'rgb'
|
| 55 |
-
else:
|
| 56 |
-
raise UserWarning(
|
| 57 |
-
"PoseTransfer not support image format {}".format(image_format))
|
| 58 |
-
imgA = self._resize_and_pad_image(source_image)
|
| 59 |
-
kptA = self._estimate_keypoints(imgA, image_format=image_format)
|
| 60 |
-
mapA = self._keypoints2heatmaps(kptA)
|
| 61 |
-
|
| 62 |
-
imgB = self._resize_and_pad_image(target_image)
|
| 63 |
-
kptB = self._estimate_keypoints(imgB)
|
| 64 |
-
mapB = self._keypoints2heatmaps(kptB)
|
| 65 |
-
|
| 66 |
-
imgA_t = (imgA.astype('float32') - self.input_mean) / self.input_std
|
| 67 |
-
imgA_t = imgA_t.transpose([2, 0, 1])[None, ...]
|
| 68 |
-
mapA_t = mapA.transpose([2, 0, 1])[None, ...]
|
| 69 |
-
mapB_t = mapB.transpose([2, 0, 1])[None, ...]
|
| 70 |
-
mapAB_t = np.concatenate((mapA_t, mapB_t), axis=1)
|
| 71 |
-
pred = self.session.run(self.output_names, {
|
| 72 |
-
self.input_names[0]: imgA_t,
|
| 73 |
-
self.input_names[1]: mapAB_t
|
| 74 |
-
})[0]
|
| 75 |
-
target_image = pred.transpose((0, 2, 3, 1))[0]
|
| 76 |
-
bgr_target_image = np.clip(
|
| 77 |
-
self.input_std * target_image + self.input_mean, 0,
|
| 78 |
-
255).astype(np.uint8)[:, :, ::-1]
|
| 79 |
-
crop_size = (256,
|
| 80 |
-
min((256 * target_image.shape[1] // target_image.shape[0]),
|
| 81 |
-
176))
|
| 82 |
-
bgr_image = center_crop(bgr_target_image, crop_size)
|
| 83 |
-
bgr_image = cv2.resize(bgr_image, (w, h), interpolation=cv2.INTER_CUBIC)
|
| 84 |
-
return bgr_image
|
| 85 |
-
|
| 86 |
-
def get(self, source_image, target_image, image_format='rgb'):
|
| 87 |
-
return self.forward(source_image, target_image, image_format)
|
| 88 |
-
|
| 89 |
-
def _resize_and_pad_image(self, image: np.ndarray, size=256):
|
| 90 |
-
w = size * image.shape[1] // image.shape[0]
|
| 91 |
-
w_box = min(w, size * 11 // 16)
|
| 92 |
-
image = cv2.resize(image, (w, size), interpolation=cv2.INTER_CUBIC)
|
| 93 |
-
image = center_crop(image, (size, w_box))
|
| 94 |
-
image = pad(image,
|
| 95 |
-
size - w_box,
|
| 96 |
-
size - w_box,
|
| 97 |
-
size - w_box,
|
| 98 |
-
size - w_box,
|
| 99 |
-
fill=255)
|
| 100 |
-
image = center_crop(image, (size, size))
|
| 101 |
-
return image
|
| 102 |
-
|
| 103 |
-
def _estimate_keypoints(self, image: np.ndarray, image_format='rgb'):
|
| 104 |
-
keypoints = self.pose_estimator.get(image, image_format)
|
| 105 |
-
keypoints = keypoints[0] if len(keypoints) > 0 else np.zeros(
|
| 106 |
-
(18, 3), dtype=np.int32)
|
| 107 |
-
keypoints[np.where(keypoints[:, 2] == 0), :2] = -1
|
| 108 |
-
keypoints = keypoints[:, :2]
|
| 109 |
-
return keypoints
|
| 110 |
-
|
| 111 |
-
def _keypoints2heatmaps(self, keypoints, size=256):
|
| 112 |
-
heatmaps = np.zeros((size, size, keypoints.shape[0]), dtype=np.float32)
|
| 113 |
-
for k in range(keypoints.shape[0]):
|
| 114 |
-
x, y = keypoints[k]
|
| 115 |
-
if x == -1 or y == -1:
|
| 116 |
-
continue
|
| 117 |
-
heatmaps[y, x, k] = 1.0
|
| 118 |
-
return heatmaps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pose/pose_utils.py
DELETED
|
@@ -1,29 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
def _pad_image(image, stride=1, padvalue=0):
|
| 5 |
-
assert len(image.shape) == 2 or len(image.shape) == 3
|
| 6 |
-
h, w = image.shape[:2]
|
| 7 |
-
pads = [None] * 4
|
| 8 |
-
pads[0] = 0 # left
|
| 9 |
-
pads[1] = 0 # top
|
| 10 |
-
pads[2] = 0 if (w % stride == 0) else stride - (w % stride) # right
|
| 11 |
-
pads[3] = 0 if (h % stride == 0) else stride - (h % stride) # bottom
|
| 12 |
-
num_channels = 1 if len(image.shape) == 2 else image.shape[2]
|
| 13 |
-
image_padded = np.ones(
|
| 14 |
-
(h + pads[3], w + pads[2], num_channels), dtype=np.uint8) * padvalue
|
| 15 |
-
image_padded = np.squeeze(image_padded)
|
| 16 |
-
image_padded[:h, :w] = image
|
| 17 |
-
return image_padded, pads
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
def _get_keypoints(candidates, subsets):
|
| 21 |
-
k = subsets.shape[0]
|
| 22 |
-
keypoints = np.zeros((k, 18, 3), dtype=np.int32)
|
| 23 |
-
for i in range(k):
|
| 24 |
-
for j in range(18):
|
| 25 |
-
index = np.int32(subsets[i][j])
|
| 26 |
-
if index != -1:
|
| 27 |
-
x, y = np.int32(candidates[index][:2])
|
| 28 |
-
keypoints[i][j] = (x, y, 1)
|
| 29 |
-
return keypoints
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|