lithiumice commited on
Commit
986d6bf
·
verified ·
1 Parent(s): 94a3494

Upload 4 files

Browse files
TR00_004_00_WO_accad.ini ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [All]
2
+ adam_beta1 : 0.9
3
+ base_lr : 0.005
4
+ batch_size : 512
5
+ best_model_fname : None
6
+ cuda_id : 0
7
+ data_shape : [1, 21, 3]
8
+ dataset_dir : None
9
+ display_model_gender : male
10
+ expr_code : 004_00_WO_accad
11
+ fp_precision : 32
12
+ ip_avoid : False
13
+ kl_coef : 0.005
14
+ latentD : 32
15
+ log_every_epoch : 2
16
+ model_type : smpl
17
+ n_workers : 10
18
+ num_bodies_to_display : 10
19
+ num_epochs : 100
20
+ num_neurons : 512
21
+ reg_coef : 0.0001
22
+ remove_Zrot : True
23
+ seed : 4815
24
+ sm_coef : 0.01
25
+ test_only : False
26
+ try_num : 0
27
+ use_cont_repr : True
28
+ verbosity : 0
29
+ work_dir : None
snapshots/TR00_E096.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e4ad40f922606989939d3fae6eadf82d1a8e98112dffb6e39d89d6471270d5c
3
+ size 2702962
version.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ The codenames "TR00_004_00_WO_accad" and "TR00_E096" correspond to "VPoser Version 1.0".
vposer_smpl.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
4
+ # acting on behalf of its Max Planck Institute for Intelligent Systems and the
5
+ # Max Planck Institute for Biological Cybernetics. All rights reserved.
6
+ #
7
+ # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
8
+ # on this computer program. You can only use this computer program if you have closed a license agreement
9
+ # with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
10
+ # Any use of the computer program without a valid license is prohibited and liable to prosecution.
11
+ # Contact: [email protected]
12
+ #
13
+ #
14
+ # If you use this code in a research publication please consider citing the following:
15
+ #
16
+ # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image <https://arxiv.org/abs/1904.05866>
17
+ # AMASS: Archive of Motion Capture as Surface Shapes <https://arxiv.org/abs/1904.03278>
18
+ #
19
+ #
20
+ # Code Developed by:
21
+ # Nima Ghorbani <https://www.linkedin.com/in/nghorbani/>
22
+ # Vassilis Choutas <https://ps.is.tuebingen.mpg.de/employees/vchoutas> for ContinousRotReprDecoder
23
+ #
24
+ # 2018.01.02
25
+
26
+ '''
27
+ A human body pose prior built with Auto-Encoding Variational Bayes
28
+ '''
29
+
30
+ __all__ = ['VPoser']
31
+
32
+ import os, sys, shutil
33
+
34
+ import torch
35
+
36
+ from torch import nn
37
+ from torch.nn import functional as F
38
+
39
+ import numpy as np
40
+
41
+ import torchgeometry as tgm
42
+
43
+ class ContinousRotReprDecoder(nn.Module):
44
+ def __init__(self):
45
+ super(ContinousRotReprDecoder, self).__init__()
46
+
47
+ def forward(self, module_input):
48
+ reshaped_input = module_input.view(-1, 3, 2)
49
+
50
+ b1 = F.normalize(reshaped_input[:, :, 0], dim=1)
51
+
52
+ dot_prod = torch.sum(b1 * reshaped_input[:, :, 1], dim=1, keepdim=True)
53
+ b2 = F.normalize(reshaped_input[:, :, 1] - dot_prod * b1, dim=-1)
54
+ b3 = torch.cross(b1, b2, dim=1)
55
+
56
+ return torch.stack([b1, b2, b3], dim=-1)
57
+
58
+
59
+ class VPoser(nn.Module):
60
+ def __init__(self, num_neurons, latentD, data_shape, use_cont_repr=True):
61
+ super(VPoser, self).__init__()
62
+
63
+ self.latentD = latentD
64
+ self.use_cont_repr = use_cont_repr
65
+
66
+ n_features = np.prod(data_shape)
67
+ self.num_joints = data_shape[1]
68
+
69
+ self.bodyprior_enc_bn1 = nn.BatchNorm1d(n_features)
70
+ self.bodyprior_enc_fc1 = nn.Linear(n_features, num_neurons)
71
+ self.bodyprior_enc_bn2 = nn.BatchNorm1d(num_neurons)
72
+ self.bodyprior_enc_fc2 = nn.Linear(num_neurons, num_neurons)
73
+ self.bodyprior_enc_mu = nn.Linear(num_neurons, latentD)
74
+ self.bodyprior_enc_logvar = nn.Linear(num_neurons, latentD)
75
+ self.dropout = nn.Dropout(p=.1, inplace=False)
76
+
77
+ self.bodyprior_dec_fc1 = nn.Linear(latentD, num_neurons)
78
+ self.bodyprior_dec_fc2 = nn.Linear(num_neurons, num_neurons)
79
+
80
+ if self.use_cont_repr:
81
+ self.rot_decoder = ContinousRotReprDecoder()
82
+
83
+ self.bodyprior_dec_out = nn.Linear(num_neurons, self.num_joints* 6)
84
+
85
+ def encode(self, Pin):
86
+ '''
87
+
88
+ :param Pin: Nx(numjoints*3)
89
+ :param rep_type: 'matrot'/'aa' for matrix rotations or axis-angle
90
+ :return:
91
+ '''
92
+ Xout = Pin.view(Pin.size(0), -1) # flatten input
93
+ Xout = self.bodyprior_enc_bn1(Xout)
94
+
95
+ Xout = F.leaky_relu(self.bodyprior_enc_fc1(Xout), negative_slope=.2)
96
+ Xout = self.bodyprior_enc_bn2(Xout)
97
+ Xout = self.dropout(Xout)
98
+ Xout = F.leaky_relu(self.bodyprior_enc_fc2(Xout), negative_slope=.2)
99
+ return torch.distributions.normal.Normal(self.bodyprior_enc_mu(Xout), F.softplus(self.bodyprior_enc_logvar(Xout)))
100
+
101
+ def decode(self, Zin, output_type='matrot'):
102
+ assert output_type in ['matrot', 'aa']
103
+
104
+ Xout = F.leaky_relu(self.bodyprior_dec_fc1(Zin), negative_slope=.2)
105
+ Xout = self.dropout(Xout)
106
+ Xout = F.leaky_relu(self.bodyprior_dec_fc2(Xout), negative_slope=.2)
107
+ Xout = self.bodyprior_dec_out(Xout)
108
+ if self.use_cont_repr:
109
+ Xout = self.rot_decoder(Xout)
110
+ else:
111
+ Xout = torch.tanh(Xout)
112
+
113
+ Xout = Xout.view([-1, 1, self.num_joints, 9])
114
+ if output_type == 'aa': return VPoser.matrot2aa(Xout)
115
+ return Xout
116
+
117
+ def forward(self, Pin, input_type='matrot', output_type='matrot'):
118
+ '''
119
+
120
+ :param Pin: aa: Nx1xnum_jointsx3 / matrot: Nx1xnum_jointsx9
121
+ :param input_type: matrot / aa for matrix rotations or axis angles
122
+ :param output_type: matrot / aa
123
+ :return:
124
+ '''
125
+ assert output_type in ['matrot', 'aa']
126
+ # if input_type == 'aa': Pin = VPoser.aa2matrot(Pin)
127
+ q_z = self.encode(Pin)
128
+ q_z_sample = q_z.rsample()
129
+ Prec = self.decode(q_z_sample)
130
+ if output_type == 'aa': Prec = VPoser.matrot2aa(Prec)
131
+
132
+ #return Prec, q_z.mean, q_z.sigma
133
+ return {'pose':Prec, 'mean':q_z.mean, 'std':q_z.scale}
134
+
135
+ def sample_poses(self, num_poses, output_type='aa', seed=None):
136
+ np.random.seed(seed)
137
+ dtype = self.bodyprior_dec_fc1.weight.dtype
138
+ device = self.bodyprior_dec_fc1.weight.device
139
+ self.eval()
140
+ with torch.no_grad():
141
+ Zgen = torch.tensor(np.random.normal(0., 1., size=(num_poses, self.latentD)), dtype=dtype).to(device)
142
+ return self.decode(Zgen, output_type=output_type)
143
+
144
+ @staticmethod
145
+ def matrot2aa(pose_matrot):
146
+ '''
147
+ :param pose_matrot: Nx1xnum_jointsx9
148
+ :return: Nx1xnum_jointsx3
149
+ '''
150
+ batch_size = pose_matrot.size(0)
151
+ homogen_matrot = F.pad(pose_matrot.view(-1, 3, 3), [0,1])
152
+ pose = tgm.rotation_matrix_to_angle_axis(homogen_matrot).view(batch_size, 1, -1, 3).contiguous()
153
+ return pose
154
+
155
+ @staticmethod
156
+ def aa2matrot(pose):
157
+ '''
158
+ :param Nx1xnum_jointsx3
159
+ :return: pose_matrot: Nx1xnum_jointsx9
160
+ '''
161
+ batch_size = pose.size(0)
162
+ pose_body_matrot = tgm.angle_axis_to_rotation_matrix(pose.reshape(-1, 3))[:, :3, :3].contiguous().view(batch_size, 1, -1, 9)
163
+ return pose_body_matrot
164
+