Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,775 Bytes
8ed2f16 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
# PD-FGC motion encoder, modified from https://github.com/Dorniwang/PD-FGC-inference
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from .FAN_feature_extractor import FAN_use
class FanEncoder(nn.Module):
def __init__(self):
super(FanEncoder, self).__init__()
# pose_dim = self.opt.model.net_motion.pose_dim
# eye_dim = self.opt.model.net_motion.eye_dim
pose_dim = 6
eye_dim = 6
self.model = FAN_use()
self.to_mouth = nn.Sequential(nn.Linear(512, 512), nn.ReLU(), nn.BatchNorm1d(512), nn.Linear(512, 512))
self.mouth_embed = nn.Sequential(nn.ReLU(), nn.Linear(512, 512-pose_dim-eye_dim))
self.to_headpose = nn.Sequential(nn.Linear(512, 512), nn.ReLU(), nn.BatchNorm1d(512), nn.Linear(512, 512))
self.headpose_embed = nn.Sequential(nn.ReLU(), nn.Linear(512, pose_dim))
self.to_eye = nn.Sequential(nn.Linear(512, 512), nn.ReLU(), nn.BatchNorm1d(512), nn.Linear(512, 512))
self.eye_embed = nn.Sequential(nn.ReLU(), nn.Linear(512, eye_dim))
self.to_emo = nn.Sequential(nn.Linear(512, 512), nn.ReLU(), nn.BatchNorm1d(512), nn.Linear(512, 512))
self.emo_embed = nn.Sequential(nn.ReLU(), nn.Linear(512, 30))
def forward_feature(self, x):
net = self.model(x)
return net
def forward(self, x):
x = self.model(x)
mouth_feat = self.to_mouth(x)
headpose_feat = self.to_headpose(x)
headpose_emb = self.headpose_embed(headpose_feat)
eye_feat = self.to_eye(x)
eye_embed = self.eye_embed(eye_feat)
emo_feat = self.to_emo(x)
emo_embed = self.emo_embed(emo_feat)
return headpose_emb, eye_embed, emo_embed, mouth_feat
|