fffiloni commited on
Commit
a6028c9
·
verified ·
1 Parent(s): 16048a2

Migrated from GitHub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. ORIGINAL_README.md +118 -0
  3. dataset_process/audio.py +156 -0
  4. dataset_process/croper.py +154 -0
  5. dataset_process/norm.npz +3 -0
  6. example/audio_driven/WDA_BenCardin1_000.wav +3 -0
  7. example/audio_driven/WRA_MarkwayneMullin_000.wav +3 -0
  8. example/audio_driven/WRA_MikeJohanns1_000.wav +3 -0
  9. example/source_image/WDA_BenCardin1_000.png +0 -0
  10. example/source_image/WRA_MarkwayneMullin_000.png +0 -0
  11. example/source_image/WRA_MikeJohanns1_000.png +0 -0
  12. inference.py +383 -0
  13. model/__init__.py +6 -0
  14. model/model.py +230 -0
  15. model/model_utils.py +33 -0
  16. model/point_model.py +38 -0
  17. model/temporaltrans/temptrans.py +267 -0
  18. model/temporaltrans/transformer_utils.py +147 -0
  19. requirements.txt +44 -0
  20. src/config/__init__.py +0 -0
  21. src/config/argument_config.py +48 -0
  22. src/config/base_config.py +29 -0
  23. src/config/crop_config.py +29 -0
  24. src/config/inference_config.py +52 -0
  25. src/config/models.yaml +43 -0
  26. src/gradio_pipeline.py +117 -0
  27. src/live_portrait_pipeline.py +285 -0
  28. src/live_portrait_wrapper.py +318 -0
  29. src/modules/__init__.py +0 -0
  30. src/modules/appearance_feature_extractor.py +48 -0
  31. src/modules/convnextv2.py +149 -0
  32. src/modules/dense_motion.py +104 -0
  33. src/modules/motion_extractor.py +35 -0
  34. src/modules/spade_generator.py +59 -0
  35. src/modules/stitching_retargeting_network.py +38 -0
  36. src/modules/util.py +441 -0
  37. src/modules/warping_network.py +77 -0
  38. src/utils/__init__.py +0 -0
  39. src/utils/camera.py +73 -0
  40. src/utils/crop.py +398 -0
  41. src/utils/cropper.py +196 -0
  42. src/utils/dependencies/insightface/__init__.py +20 -0
  43. src/utils/dependencies/insightface/app/__init__.py +1 -0
  44. src/utils/dependencies/insightface/app/common.py +49 -0
  45. src/utils/dependencies/insightface/app/face_analysis.py +110 -0
  46. src/utils/dependencies/insightface/data/__init__.py +2 -0
  47. src/utils/dependencies/insightface/data/image.py +27 -0
  48. src/utils/dependencies/insightface/data/images/Tom_Hanks_54745.png +0 -0
  49. src/utils/dependencies/insightface/data/images/mask_black.jpg +0 -0
  50. src/utils/dependencies/insightface/data/images/mask_blue.jpg +0 -0
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ example/audio_driven/WDA_BenCardin1_000.wav filter=lfs diff=lfs merge=lfs -text
37
+ example/audio_driven/WRA_MarkwayneMullin_000.wav filter=lfs diff=lfs merge=lfs -text
38
+ example/audio_driven/WRA_MikeJohanns1_000.wav filter=lfs diff=lfs merge=lfs -text
39
+ src/utils/dependencies/insightface/data/images/t1.jpg filter=lfs diff=lfs merge=lfs -text
ORIGINAL_README.md ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+
3
+ # Unlock Pose Diversity: Accurate and Efficient Implicit Keypoint-based Spatiotemporal Diffusion for Audio-driven Talking Portrait
4
+ [![arXiv](https://img.shields.io/badge/arXiv-KDTalker-9065CA.svg?logo=arXiv)](https://arxiv.org/abs/2503.12963)
5
+ [![License](https://img.shields.io/badge/license-CC--BY--NC%204.0-green)](https://creativecommons.org/licenses/by-nc/4.0/)
6
+ [![GitHub Stars](https://img.shields.io/github/stars/chaolongy/KDTalker?style=social)](https://github.com/chaolongy/KDTalker)
7
+
8
+ <div>
9
+ <a href='https://chaolongy.github.io/' target='_blank'>Chaolong Yang <sup>1,3*</sup> </a>&emsp;
10
+ <a href='https://kaiseem.github.io/' target='_blank'>Kai Yao <sup>2*</a>&emsp;
11
+ <a href='https://scholar.xjtlu.edu.cn/en/persons/YuyaoYan' target='_blank'>Yuyao Yan <sup>3</sup> </a>&emsp;
12
+ <a href='https://scholar.google.com/citations?hl=zh-CN&user=HDO58yUAAAAJ' target='_blank'>Chenru Jiang <sup>4</sup> </a>&emsp;
13
+ <a href='https://weiguangzhao.github.io/' target='_blank'>Weiguang Zhao <sup>1,3</sup> </a>&emsp; </br>
14
+ <a href='https://scholar.google.com/citations?hl=zh-CN&user=c-x5M2QAAAAJ' target='_blank'>Jie Sun <sup>3</sup> </a>&emsp;
15
+ <a href='https://sites.google.com/view/guangliangcheng' target='_blank'>Guangliang Cheng <sup>1</sup> </a>&emsp;
16
+ <a href='https://scholar.google.com/schhp?hl=zh-CN' target='_blank'>Yifei Zhang <sup>5</sup> </a>&emsp;
17
+ <a href='https://scholar.google.com/citations?hl=zh-CN&user=JNRMVNYAAAAJ&view_op=list_works&sortby=pubdate' target='_blank'>Bin Dong <sup>4</sup> </a>&emsp;
18
+ <a href='https://sites.google.com/view/kaizhu-huang-homepage/home' target='_blank'>Kaizhu Huang <sup>4</sup> </a>&emsp;
19
+ </div>
20
+ <br>
21
+
22
+ <div>
23
+ <sup>1</sup> University of Liverpool &emsp; <sup>2</sup> Ant Group &emsp; <sup>3</sup> Xi’an Jiaotong-Liverpool University &emsp; </br>
24
+ <sup>4</sup> Duke Kunshan University &emsp; <sup>5</sup> Ricoh Software Research Center &emsp;
25
+ </div>
26
+
27
+
28
+ <div align="justify">
29
+
30
+ # Comparative videos
31
+ https://github.com/user-attachments/assets/08ebc6e0-41c5-4bf4-8ee8-2f7d317d92cd
32
+
33
+
34
+ # Demo
35
+ Gradio Demo [`KDTalker`](https://kdtalker.com/). The model was trained using only 4,282 video clips from [`VoxCeleb`](https://www.robots.ox.ac.uk/~vgg/data/voxceleb/).
36
+
37
+ ![shot](https://github.com/user-attachments/assets/810e9dc8-ab66-4187-ab4f-bf92759621fa)
38
+
39
+ # To Do List
40
+ - [ ] Train a community version using more datasets
41
+ - [ ] Release training code
42
+
43
+
44
+ # Environment
45
+ Our KDTalker could be conducted on one RTX4090 or RTX3090.
46
+
47
+ ### 1. Clone the code and prepare the environment
48
+
49
+ **Note:** Make sure your system has [`git`](https://git-scm.com/), [`conda`](https://anaconda.org/anaconda/conda), and [`FFmpeg`](https://ffmpeg.org/download.html) installed.
50
+
51
+ ```
52
+ git clone https://github.com/chaolongy/KDTalker
53
+ cd KDTalker
54
+
55
+ # create env using conda
56
+ conda create -n KDTalker python=3.9
57
+ conda activate KDTalker
58
+
59
+ conda install pytorch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 pytorch-cuda=11.8 -c pytorch -c nvidia
60
+
61
+ pip install -r requirements.txt
62
+ ```
63
+
64
+ ### 2. Download pretrained weights
65
+
66
+ First, you can download all LiverPorait pretrained weights from [Google Drive](https://drive.google.com/drive/folders/1UtKgzKjFAOmZkhNK-OYT0caJ_w2XAnib). Unzip and place them in `./pretrained_weights`.
67
+ Ensuring the directory structure is as follows:
68
+ ```text
69
+ pretrained_weights
70
+ ├── insightface
71
+ │ └── models
72
+ │ └── buffalo_l
73
+ │ ├── 2d106det.onnx
74
+ │ └── det_10g.onnx
75
+ └── liveportrait
76
+ ├── base_models
77
+ │ ├── appearance_feature_extractor.pth
78
+ │ ├── motion_extractor.pth
79
+ │ ├── spade_generator.pth
80
+ │ └── warping_module.pth
81
+ ├── landmark.onnx
82
+ └── retargeting_models
83
+ └── stitching_retargeting_module.pth
84
+ ```
85
+ You can download the weights for the face detector, audio extractor and KDTalker from [Google Drive](https://drive.google.com/drive/folders/1OkfiFArUCsnkF_0tI2SCEAwVCBLSjzd6?hl=zh-CN). Put them in `./ckpts`.
86
+
87
+ OR, you can download above all weights in [Huggingface](https://huggingface.co/ChaolongYang/KDTalker/tree/main).
88
+
89
+
90
+
91
+ # Inference
92
+ ```
93
+ python inference.py -source_image ./example/source_image/WDA_BenCardin1_000.png -driven_audio ./example/driven_audio/WDA_BenCardin1_000.wav -output ./results/output.mp4
94
+ ```
95
+
96
+
97
+ # Contact
98
+ Our code is under the CC-BY-NC 4.0 license and intended solely for research purposes. If you have any questions or wish to use it for commercial purposes, please contact us at [email protected]
99
+
100
+
101
+ # Citation
102
+ If you find this code helpful for your research, please cite:
103
+ ```
104
+ @misc{yang2025kdtalker,
105
+ title={Unlock Pose Diversity: Accurate and Efficient Implicit Keypoint-based Spatiotemporal Diffusion for Audio-driven Talking Portrait},
106
+ author={Chaolong Yang and Kai Yao and Yuyao Yan and Chenru Jiang and Weiguang Zhao and Jie Sun and Guangliang Cheng and Yifei Zhang and Bin Dong and Kaizhu Huang},
107
+ year={2025},
108
+ eprint={2503.12963},
109
+ archivePrefix={arXiv},
110
+ primaryClass={cs.CV},
111
+ url={https://arxiv.org/abs/2503.12963},
112
+ }
113
+ ```
114
+
115
+
116
+ # Acknowledge
117
+ We acknowledge these works for their public code and selfless help: [SadTalker](https://github.com/OpenTalker/SadTalker), [LivePortrait](https://github.com/KwaiVGI/LivePortrait), [Wav2Lip](https://github.com/Rudrabha/Wav2Lip), [Face-vid2vid](https://github.com/zhanglonghao1992/One-Shot_Free-View_Neural_Talking_Head_Synthesis) etc.
118
+ </div>
dataset_process/audio.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import librosa
2
+ import librosa.filters
3
+ import numpy as np
4
+ # import tensorflow as tf
5
+ from scipy import signal
6
+ from scipy.io import wavfile
7
+ from src.utils.hparams import hparams as hp
8
+
9
+
10
+ def load_wav(path, sr):
11
+ return librosa.core.load(path, sr=sr)[0]
12
+
13
+
14
+ def save_wav(wav, path, sr):
15
+ wav *= 32767 / max(0.01, np.max(np.abs(wav)))
16
+ # proposed by @dsmiller
17
+ wavfile.write(path, sr, wav.astype(np.int16))
18
+
19
+
20
+ def save_wavenet_wav(wav, path, sr):
21
+ librosa.output.write_wav(path, wav, sr=sr)
22
+
23
+
24
+ def preemphasis(wav, k, preemphasize=True):
25
+ if preemphasize:
26
+ return signal.lfilter([1, -k], [1], wav)
27
+ return wav
28
+
29
+
30
+ def inv_preemphasis(wav, k, inv_preemphasize=True):
31
+ if inv_preemphasize:
32
+ return signal.lfilter([1], [1, -k], wav)
33
+ return wav
34
+
35
+
36
+ def get_hop_size():
37
+ hop_size = hp.hop_size
38
+ if hop_size is None:
39
+ assert hp.frame_shift_ms is not None
40
+ hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate)
41
+ return hop_size
42
+
43
+
44
+ def linearspectrogram(wav):
45
+ D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
46
+ S = _amp_to_db(np.abs(D)) - hp.ref_level_db
47
+
48
+ if hp.signal_normalization:
49
+ return _normalize(S)
50
+ return S
51
+
52
+
53
+ def melspectrogram(wav):
54
+ D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
55
+ S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db
56
+
57
+ if hp.signal_normalization:
58
+ return _normalize(S)
59
+ return S
60
+
61
+
62
+ def _lws_processor():
63
+ import lws
64
+ return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode="speech")
65
+
66
+
67
+ def _stft(y):
68
+ if hp.use_lws:
69
+ return _lws_processor(hp).stft(y).T
70
+ else:
71
+ return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size)
72
+
73
+
74
+ ##########################################################
75
+ # Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
76
+ def num_frames(length, fsize, fshift):
77
+ """Compute number of time frames of spectrogram
78
+ """
79
+ pad = (fsize - fshift)
80
+ if length % fshift == 0:
81
+ M = (length + pad * 2 - fsize) // fshift + 1
82
+ else:
83
+ M = (length + pad * 2 - fsize) // fshift + 2
84
+ return M
85
+
86
+
87
+ def pad_lr(x, fsize, fshift):
88
+ """Compute left and right padding
89
+ """
90
+ M = num_frames(len(x), fsize, fshift)
91
+ pad = (fsize - fshift)
92
+ T = len(x) + 2 * pad
93
+ r = (M - 1) * fshift + fsize - T
94
+ return pad, pad + r
95
+
96
+
97
+ ##########################################################
98
+ # Librosa correct padding
99
+ def librosa_pad_lr(x, fsize, fshift):
100
+ return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
101
+
102
+
103
+ # Conversions
104
+ _mel_basis = None
105
+
106
+
107
+ def _linear_to_mel(spectogram):
108
+ global _mel_basis
109
+ if _mel_basis is None:
110
+ _mel_basis = _build_mel_basis()
111
+ return np.dot(_mel_basis, spectogram)
112
+
113
+
114
+ def _build_mel_basis():
115
+ assert hp.fmax <= hp.sample_rate // 2
116
+ return librosa.filters.mel(sr=hp.sample_rate, n_fft=hp.n_fft, n_mels=hp.num_mels,
117
+ fmin=hp.fmin, fmax=hp.fmax)
118
+
119
+
120
+ def _amp_to_db(x):
121
+ min_level = np.exp(hp.min_level_db / 20 * np.log(10))
122
+ return 20 * np.log10(np.maximum(min_level, x))
123
+
124
+
125
+ def _db_to_amp(x):
126
+ return np.power(10.0, (x) * 0.05)
127
+
128
+
129
+ def _normalize(S):
130
+ if hp.allow_clipping_in_normalization:
131
+ if hp.symmetric_mels:
132
+ return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value,
133
+ -hp.max_abs_value, hp.max_abs_value)
134
+ else:
135
+ return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value)
136
+
137
+ assert S.max() <= 0 and S.min() - hp.min_level_db >= 0
138
+ if hp.symmetric_mels:
139
+ return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value
140
+ else:
141
+ return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db))
142
+
143
+
144
+ def _denormalize(D):
145
+ if hp.allow_clipping_in_normalization:
146
+ if hp.symmetric_mels:
147
+ return (((np.clip(D, -hp.max_abs_value,
148
+ hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value))
149
+ + hp.min_level_db)
150
+ else:
151
+ return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
152
+
153
+ if hp.symmetric_mels:
154
+ return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db)
155
+ else:
156
+ return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
dataset_process/croper.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+
3
+ """
4
+ brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset)
5
+ author: lzhbrian (https://lzhbrian.me)
6
+ date: 2020.1.5
7
+ note: code is heavily borrowed from
8
+ https://github.com/NVlabs/ffhq-dataset
9
+ http://dlib.net/face_landmark_detection.py.html
10
+ requirements:
11
+ apt install cmake
12
+ conda install Pillow numpy scipy
13
+ pip install dlib
14
+ # download face landmark model from:
15
+ # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
16
+ """
17
+
18
+ import numpy as np
19
+ from PIL import Image
20
+ import dlib
21
+
22
+
23
+ class Croper:
24
+ def __init__(self, path_of_lm):
25
+ # download model from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
26
+ self.predictor = dlib.shape_predictor(path_of_lm)
27
+
28
+ def get_landmark(self, img_np):
29
+ """get landmark with dlib
30
+ :return: np.array shape=(68, 2)
31
+ """
32
+ detector = dlib.get_frontal_face_detector()
33
+ dets = detector(img_np, 1)
34
+ # print("Number of faces detected: {}".format(len(dets)))
35
+ # for k, d in enumerate(dets):
36
+ if len(dets) == 0:
37
+ return None
38
+ d = dets[0]
39
+ # Get the landmarks/parts for the face in box d.
40
+ shape = self.predictor(img_np, d)
41
+ # print("Part 0: {}, Part 1: {} ...".format(shape.part(0), shape.part(1)))
42
+ t = list(shape.parts())
43
+ a = []
44
+ for tt in t:
45
+ a.append([tt.x, tt.y])
46
+ lm = np.array(a)
47
+ # lm is a shape=(68,2) np.array
48
+ return lm
49
+
50
+ def align_face(self, img, lm, output_size=1024):
51
+ """
52
+ :param filepath: str
53
+ :return: PIL Image
54
+ """
55
+ lm_chin = lm[0: 17] # left-right
56
+ lm_eyebrow_left = lm[17: 22] # left-right
57
+ lm_eyebrow_right = lm[22: 27] # left-right
58
+ lm_nose = lm[27: 31] # top-down
59
+ lm_nostrils = lm[31: 36] # top-down
60
+ lm_eye_left = lm[36: 42] # left-clockwise
61
+ lm_eye_right = lm[42: 48] # left-clockwise
62
+ lm_mouth_outer = lm[48: 60] # left-clockwise
63
+ lm_mouth_inner = lm[60: 68] # left-clockwise
64
+
65
+ # Calculate auxiliary vectors.
66
+ eye_left = np.mean(lm_eye_left, axis=0)
67
+ eye_right = np.mean(lm_eye_right, axis=0)
68
+ eye_avg = (eye_left + eye_right) * 0.5
69
+ eye_to_eye = eye_right - eye_left
70
+ mouth_left = lm_mouth_outer[0]
71
+ mouth_right = lm_mouth_outer[6]
72
+ mouth_avg = (mouth_left + mouth_right) * 0.5
73
+ eye_to_mouth = mouth_avg - eye_avg
74
+
75
+ # Choose oriented crop rectangle.
76
+ x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] # Addition of binocular difference and double mouth difference
77
+ x /= np.hypot(*x)
78
+ x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
79
+ y = np.flipud(x) * [-1, 1]
80
+ c = eye_avg + eye_to_mouth * 0.1
81
+ quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
82
+ qsize = np.hypot(*x) * 2
83
+
84
+ # Shrink.
85
+ shrink = int(np.floor(qsize / output_size * 0.5))
86
+ if shrink > 1:
87
+ rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
88
+ img = img.resize(rsize, Image.ANTIALIAS)
89
+ quad /= shrink
90
+ qsize /= shrink
91
+ else:
92
+ rsize = (int(np.rint(float(img.size[0]))), int(np.rint(float(img.size[1]))))
93
+
94
+ # Crop.
95
+ border = max(int(np.rint(qsize * 0.1)), 3)
96
+ crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
97
+ int(np.ceil(max(quad[:, 1]))))
98
+ crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
99
+ min(crop[3] + border, img.size[1]))
100
+ if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
101
+ # img = img.crop(crop)
102
+ quad -= crop[0:2]
103
+
104
+ # Pad.
105
+ pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
106
+ int(np.ceil(max(quad[:, 1]))))
107
+ pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
108
+ max(pad[3] - img.size[1] + border, 0))
109
+ # if enable_padding and max(pad) > border - 4:
110
+ # pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
111
+ # img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
112
+ # h, w, _ = img.shape
113
+ # y, x, _ = np.ogrid[:h, :w, :1]
114
+ # mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
115
+ # 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
116
+ # blur = qsize * 0.02
117
+ # img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
118
+ # img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
119
+ # img = Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
120
+ # quad += pad[:2]
121
+
122
+ # Transform.
123
+ quad = (quad + 0.5).flatten()
124
+ lx = max(min(quad[0], quad[2]), 0)
125
+ ly = max(min(quad[1], quad[7]), 0)
126
+ rx = min(max(quad[4], quad[6]), img.size[0])
127
+ ry = min(max(quad[3], quad[5]), img.size[0])
128
+ # img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(),
129
+ # Image.BILINEAR)
130
+ # if output_size < transform_size:
131
+ # img = img.resize((output_size, output_size), Image.ANTIALIAS)
132
+
133
+ # Save aligned image.
134
+ return rsize, crop, [lx, ly, rx, ry]
135
+
136
+ def crop(self, img_np_list, still=False, xsize=512): # first frame for all video
137
+ img_np = img_np_list[0]
138
+ lm = self.get_landmark(img_np)
139
+ if lm is None:
140
+ raise 'can not detect the landmark from source image'
141
+ rsize, crop, quad = self.align_face(img=Image.fromarray(img_np), lm=lm, output_size=xsize)
142
+ clx, cly, crx, cry = crop
143
+ lx, ly, rx, ry = quad
144
+ lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)
145
+ for _i in range(len(img_np_list)):
146
+ _inp = img_np_list[_i]
147
+ _inp = cv2.resize(_inp, (rsize[0], rsize[1]))
148
+ _inp = _inp[cly:cry, clx:crx]
149
+ # cv2.imwrite('test1.jpg', _inp)
150
+ if not still:
151
+ _inp = _inp[ly:ry, lx:rx]
152
+ # cv2.imwrite('test2.jpg', _inp)
153
+ img_np_list[_i] = _inp
154
+ return img_np_list, crop, quad
dataset_process/norm.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9422e503e75df9d1bd455d8e0f9f5e2826b12956cdedbb5566097c0151bddafb
3
+ size 5580
example/audio_driven/WDA_BenCardin1_000.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46cba9e5aa26d94ce13ff5eeef3e40e8086337e07f6c3d553497ea1b9f8a5e23
3
+ size 512774
example/audio_driven/WRA_MarkwayneMullin_000.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:791104d8663ca5af3d11dde1c042cf3f42957c6356c044d6bd8b8ee311442fc5
3
+ size 512774
example/audio_driven/WRA_MikeJohanns1_000.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9fc02acff776d4bd59bad02fbb773d1201948616e279ffc853b83753f4f4f2b
3
+ size 512774
example/source_image/WDA_BenCardin1_000.png ADDED
example/source_image/WRA_MarkwayneMullin_000.png ADDED
example/source_image/WRA_MikeJohanns1_000.png ADDED
inference.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: UTF-8 -*-
2
+ import os
3
+ os.environ['HYDRA_FULL_ERROR']='1'
4
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
5
+
6
+ import argparse
7
+ import shutil
8
+ import uuid
9
+ import os
10
+ import numpy as np
11
+ from tqdm import tqdm
12
+ import cv2
13
+ from rich.progress import track
14
+ import tyro
15
+
16
+
17
+ from PIL import Image
18
+ import time
19
+ import torch
20
+ import torch.nn.functional as F
21
+ from torch import nn
22
+ import imageio
23
+ from pydub import AudioSegment
24
+ from pykalman import KalmanFilter
25
+
26
+
27
+ from src.config.argument_config import ArgumentConfig
28
+ from src.config.inference_config import InferenceConfig
29
+ from src.config.crop_config import CropConfig
30
+ from src.live_portrait_pipeline import LivePortraitPipeline
31
+ from src.utils.camera import get_rotation_matrix
32
+ from dataset_process import audio
33
+
34
+ from dataset_process.croper import Croper
35
+
36
+
37
+ def parse_audio_length(audio_length, sr, fps):
38
+ bit_per_frames = sr / fps
39
+ num_frames = int(audio_length / bit_per_frames)
40
+ audio_length = int(num_frames * bit_per_frames)
41
+ return audio_length, num_frames
42
+
43
+ def crop_pad_audio(wav, audio_length):
44
+ if len(wav) > audio_length:
45
+ wav = wav[:audio_length]
46
+ elif len(wav) < audio_length:
47
+ wav = np.pad(wav, [0, audio_length - len(wav)], mode='constant', constant_values=0)
48
+ return wav
49
+
50
+ class Conv2d(nn.Module):
51
+ def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, use_act=True, *args, **kwargs):
52
+ super().__init__(*args, **kwargs)
53
+ self.conv_block = nn.Sequential(
54
+ nn.Conv2d(cin, cout, kernel_size, stride, padding),
55
+ nn.BatchNorm2d(cout)
56
+ )
57
+ self.act = nn.ReLU()
58
+ self.residual = residual
59
+ self.use_act = use_act
60
+
61
+ def forward(self, x):
62
+ out = self.conv_block(x)
63
+ if self.residual:
64
+ out += x
65
+
66
+ if self.use_act:
67
+ return self.act(out)
68
+ else:
69
+ return out
70
+
71
+ class AudioEncoder(nn.Module):
72
+ def __init__(self, wav2lip_checkpoint, device):
73
+ super(AudioEncoder, self).__init__()
74
+
75
+ self.audio_encoder = nn.Sequential(
76
+ Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
77
+ Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
78
+ Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
79
+
80
+ Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1),
81
+ Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
82
+ Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
83
+
84
+ Conv2d(64, 128, kernel_size=3, stride=3, padding=1),
85
+ Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
86
+ Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
87
+
88
+ Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1),
89
+ Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),
90
+
91
+ Conv2d(256, 512, kernel_size=3, stride=1, padding=0),
92
+ Conv2d(512, 512, kernel_size=1, stride=1, padding=0),)
93
+
94
+ #### load the pre-trained audio_encoder
95
+ wav2lip_state_dict = torch.load(wav2lip_checkpoint, map_location=torch.device(device))['state_dict']
96
+ state_dict = self.audio_encoder.state_dict()
97
+
98
+ for k,v in wav2lip_state_dict.items():
99
+ if 'audio_encoder' in k:
100
+ state_dict[k.replace('module.audio_encoder.', '')] = v
101
+ self.audio_encoder.load_state_dict(state_dict)
102
+
103
+ def forward(self, audio_sequences):
104
+ B = audio_sequences.size(0)
105
+
106
+ audio_sequences = torch.cat([audio_sequences[:, i] for i in range(audio_sequences.size(1))], dim=0)
107
+
108
+ audio_embedding = self.audio_encoder(audio_sequences) # B, 512, 1, 1
109
+ dim = audio_embedding.shape[1]
110
+ audio_embedding = audio_embedding.reshape((B, -1, dim, 1, 1))
111
+
112
+ return audio_embedding.squeeze(-1).squeeze(-1) #B seq_len+1 512
113
+
114
+ def partial_fields(target_class, kwargs):
115
+ return target_class(**{k: v for k, v in kwargs.items() if hasattr(target_class, k)})
116
+
117
+ def dct2device(dct: dict, device):
118
+ for key in dct:
119
+ dct[key] = torch.tensor(dct[key]).to(device)
120
+ return dct
121
+
122
+ def save_video_with_watermark(video, audio, save_path):
123
+ temp_file = str(uuid.uuid4())+'.mp4'
124
+ cmd = r'ffmpeg -y -i "%s" -i "%s" -vcodec copy "%s"' % (video, audio, temp_file)
125
+ os.system(cmd)
126
+ shutil.move(temp_file, save_path)
127
+
128
+ class Inferencer(object):
129
+ def __init__(self):
130
+ st=time.time()
131
+ print('#'*25+'Start initialization'+'#'*25)
132
+ self.device = 'cuda'
133
+
134
+ from model import get_model
135
+ self.point_diffusion = get_model()
136
+ ckpt = torch.load('KDTalker.pth')
137
+
138
+ self.point_diffusion.load_state_dict(ckpt['model'])
139
+ self.point_diffusion.eval()
140
+ self.point_diffusion.to(self.device)
141
+
142
+ lm_croper_checkpoint = 'ckpts/shape_predictor_68_face_landmarks.dat'
143
+ self.croper = Croper(lm_croper_checkpoint)
144
+
145
+ self.norm_info = dict(np.load('dataset_process/norm.npz'))
146
+
147
+ wav2lip_checkpoint = 'ckpts/wav2lip.pth'
148
+ self.wav2lip_model = AudioEncoder(wav2lip_checkpoint, 'cuda')
149
+ self.wav2lip_model.cuda()
150
+ self.wav2lip_model.eval()
151
+
152
+ # set tyro theme
153
+ tyro.extras.set_accent_color("bright_cyan")
154
+ args = tyro.cli(ArgumentConfig)
155
+
156
+ # specify configs for inference
157
+ self.inf_cfg = partial_fields(InferenceConfig, args.__dict__) # use attribute of args to initial InferenceConfig
158
+ self.crop_cfg = partial_fields(CropConfig, args.__dict__) # use attribute of args to initial CropConfig
159
+
160
+ self.live_portrait_pipeline = LivePortraitPipeline(inference_cfg=self.inf_cfg, crop_cfg=self.crop_cfg)
161
+
162
+ def _norm(self, data_dict):
163
+ for k in data_dict.keys():
164
+ if k in ['yaw', 'pitch', 'roll', 't', 'exp', 'scale', 'kp', ]:
165
+ v=data_dict[k]
166
+ data_dict[k] = (v - self.norm_info[k+'_mean'])/self.norm_info[k+'_std']
167
+ return data_dict
168
+
169
+ def _denorm(self, data_dict):
170
+ for k in data_dict.keys():
171
+ if k in ['yaw', 'pitch', 'roll', 't', 'exp', 'scale', 'kp']:
172
+ v=data_dict[k]
173
+ data_dict[k] = v * self.norm_info[k+'_std'] + self.norm_info[k+'_mean']
174
+ return data_dict
175
+
176
+ def output_to_dict(self, data):
177
+ output = {}
178
+ output['scale'] = data[:, 0]
179
+ output['yaw'] = data[:, 1, None]
180
+ output['pitch'] = data[:, 2, None]
181
+ output['roll'] = data[:, 3, None]
182
+ output['t'] = data[:, 4:7]
183
+ output['exp'] = data[:, 7:]
184
+ return output
185
+
186
+ def extract_mel_from_audio(self, audio_file_path):
187
+ syncnet_mel_step_size = 16
188
+ fps = 25
189
+ wav = audio.load_wav(audio_file_path, 16000)
190
+ wav_length, num_frames = parse_audio_length(len(wav), 16000, 25)
191
+ wav = crop_pad_audio(wav, wav_length)
192
+ orig_mel = audio.melspectrogram(wav).T
193
+ spec = orig_mel.copy()
194
+ indiv_mels = []
195
+
196
+ for i in tqdm(range(num_frames), 'mel:'):
197
+ start_frame_num = i - 2
198
+ start_idx = int(80. * (start_frame_num / float(fps)))
199
+ end_idx = start_idx + syncnet_mel_step_size
200
+ seq = list(range(start_idx, end_idx))
201
+ seq = [min(max(item, 0), orig_mel.shape[0] - 1) for item in seq]
202
+ m = spec[seq, :]
203
+ indiv_mels.append(m.T)
204
+ indiv_mels = np.asarray(indiv_mels) # T 80 16
205
+ return indiv_mels
206
+
207
+ def extract_wav2lip_from_audio(self, audio_file_path):
208
+ asd_mel = self.extract_mel_from_audio(audio_file_path)
209
+ asd_mel = torch.FloatTensor(asd_mel).cuda().unsqueeze(0).unsqueeze(2)
210
+ with torch.no_grad():
211
+ hidden = self.wav2lip_model(asd_mel)
212
+ return hidden[0].cpu().detach().numpy()
213
+
214
+ def headpose_pred_to_degree(self, pred):
215
+ device = pred.device
216
+ idx_tensor = [idx for idx in range(66)]
217
+ idx_tensor = torch.FloatTensor(idx_tensor).to(device)
218
+ pred = F.softmax(pred)
219
+ degree = torch.sum(pred * idx_tensor, 1) * 3 - 99
220
+ return degree
221
+
222
+ @torch.no_grad()
223
+ def generate_with_audio_img(self, image_path, audio_path, save_path):
224
+ image = np.array(Image.open(image_path).convert('RGB'))
225
+ cropped_image, crop, quad = self.croper.crop([image], still=False, xsize=512)
226
+ input_image = cv2.resize(cropped_image[0], (256, 256))
227
+
228
+ I_s = torch.FloatTensor(input_image.transpose((2, 0, 1))).unsqueeze(0).cuda() / 255
229
+
230
+ x_s_info = self.live_portrait_pipeline.live_portrait_wrapper.get_kp_info(I_s)
231
+ x_c_s = x_s_info['kp'].reshape(1, 21, -1)
232
+ R_s = get_rotation_matrix(x_s_info['pitch'], x_s_info['yaw'], x_s_info['roll'])
233
+ f_s = self.live_portrait_pipeline.live_portrait_wrapper.extract_feature_3d(I_s)
234
+ x_s = self.live_portrait_pipeline.live_portrait_wrapper.transform_keypoint(x_s_info)
235
+
236
+ ######## process driving info ########
237
+ kp_info = {}
238
+ for k in x_s_info.keys():
239
+ kp_info[k] = x_s_info[k].cpu().numpy()
240
+
241
+ kp_info = self._norm(kp_info)
242
+
243
+ ori_kp = torch.cat([torch.zeros([1, 7]), torch.Tensor(kp_info['kp'])], -1).cuda()
244
+
245
+ input_x = np.concatenate([kp_info[k] for k in ['scale', 'yaw', 'pitch', 'roll', 't', 'exp']], 1)
246
+ input_x = np.expand_dims(input_x, -1)
247
+ input_x = np.expand_dims(input_x, 0)
248
+ input_x = np.concatenate([input_x, input_x, input_x], -1)
249
+
250
+ aud_feat = self.extract_wav2lip_from_audio(audio_path)
251
+
252
+ sample_frame = 64
253
+ padding_size = (sample_frame - aud_feat.shape[0] % sample_frame) % sample_frame
254
+
255
+ if padding_size > 0:
256
+ aud_feat = np.concatenate((aud_feat, aud_feat[:padding_size, :]), axis=0)
257
+ else:
258
+ aud_feat = aud_feat
259
+
260
+ outputs = [input_x]
261
+
262
+ sample_frame = 64
263
+ for i in range(0, aud_feat.shape[0] - 1, sample_frame):
264
+ input_mel = torch.Tensor(aud_feat[i: i + sample_frame]).unsqueeze(0).cuda()
265
+ kp0 = torch.Tensor(outputs[-1])[:, -1].cuda()
266
+ pred_kp = self.point_diffusion.forward_sample(70, ref_kps=kp0, ori_kps=ori_kp, aud_feat=input_mel,
267
+ scheduler='ddim', num_inference_steps=50)
268
+ outputs.append(pred_kp.cpu().numpy())
269
+
270
+ outputs = np.mean(np.concatenate(outputs, 1)[0, 1:aud_feat.shape[0] - padding_size + 1], -1)
271
+ output_dict = self.output_to_dict(outputs)
272
+ output_dict = self._denorm(output_dict)
273
+
274
+ num_frame = output_dict['yaw'].shape[0]
275
+ x_d_info = {}
276
+ for key in output_dict:
277
+ x_d_info[key] = torch.tensor(output_dict[key]).cuda()
278
+
279
+ # smooth
280
+ def smooth(sequence, n_dim_state=1):
281
+ kf = KalmanFilter(initial_state_mean=sequence[0],
282
+ transition_covariance=0.05 * np.eye(n_dim_state),
283
+ observation_covariance=0.001 * np.eye(n_dim_state))
284
+ state_means, _ = kf.smooth(sequence)
285
+ return state_means
286
+
287
+ yaw_data = x_d_info['yaw'].cpu().numpy()
288
+ pitch_data = x_d_info['pitch'].cpu().numpy()
289
+ roll_data = x_d_info['roll'].cpu().numpy()
290
+ t_data = x_d_info['t'].cpu().numpy()
291
+ exp_data = x_d_info['exp'].cpu().numpy()
292
+
293
+ smoothed_pitch = smooth(pitch_data, n_dim_state=1)
294
+ smoothed_yaw = smooth(yaw_data, n_dim_state=1)
295
+ smoothed_roll = smooth(roll_data, n_dim_state=1)
296
+ smoothed_t = smooth(t_data, n_dim_state=3)
297
+ smoothed_exp = smooth(exp_data, n_dim_state=63)
298
+
299
+ x_d_info['pitch'] = torch.Tensor(smoothed_pitch).cuda()
300
+ x_d_info['yaw'] = torch.Tensor(smoothed_yaw).cuda()
301
+ x_d_info['roll'] = torch.Tensor(smoothed_roll).cuda()
302
+ x_d_info['t'] = torch.Tensor(smoothed_t).cuda()
303
+ x_d_info['exp'] = torch.Tensor(smoothed_exp).cuda()
304
+
305
+ template_dct = {'motion': [], 'c_d_eyes_lst': [], 'c_d_lip_lst': []}
306
+ for i in track(range(num_frame), description='Making motion templates...', total=num_frame):
307
+ x_d_i_info = x_d_info
308
+ R_d_i = get_rotation_matrix(x_d_i_info['pitch'][i], x_d_i_info['yaw'][i], x_d_i_info['roll'][i])
309
+
310
+ item_dct = {
311
+ 'scale': x_d_i_info['scale'][i].cpu().numpy().astype(np.float32),
312
+ 'R_d': R_d_i.cpu().numpy().astype(np.float32),
313
+ 'exp': x_d_i_info['exp'][i].reshape(1, 21, -1).cpu().numpy().astype(np.float32),
314
+ 't': x_d_i_info['t'][i].cpu().numpy().astype(np.float32),
315
+ }
316
+
317
+ template_dct['motion'].append(item_dct)
318
+
319
+ I_p_lst = []
320
+ R_d_0, x_d_0_info = None, None
321
+
322
+ for i in track(range(num_frame), description='🚀Animating...', total=num_frame):
323
+ x_d_i_info = template_dct['motion'][i]
324
+ for key in x_d_i_info:
325
+ x_d_i_info[key] = torch.tensor(x_d_i_info[key]).cuda()
326
+ R_d_i = x_d_i_info['R_d']
327
+
328
+ if i == 0:
329
+ R_d_0 = R_d_i
330
+ x_d_0_info = x_d_i_info
331
+
332
+ if self.inf_cfg.flag_relative_motion:
333
+ R_new = (R_d_i @ R_d_0.permute(0, 2, 1)) @ R_s
334
+ delta_new = x_s_info['exp'].reshape(1, 21, -1) + (x_d_i_info['exp'] - x_d_0_info['exp'])
335
+ scale_new = x_s_info['scale'] * (x_d_i_info['scale'] / x_d_0_info['scale'])
336
+ t_new = x_s_info['t'] + (x_d_i_info['t'] - x_d_0_info['t'])
337
+ else:
338
+ R_new = R_d_i
339
+ delta_new = x_d_i_info['exp']
340
+ scale_new = x_s_info['scale']
341
+ t_new = x_d_i_info['t']
342
+
343
+ t_new[..., 2].fill_(0)
344
+ x_d_i_new = scale_new * (x_c_s @ R_new + delta_new) + t_new
345
+
346
+ out = self.live_portrait_pipeline.live_portrait_wrapper.warp_decode(f_s, x_s, x_d_i_new)
347
+ I_p_i = self.live_portrait_pipeline.live_portrait_wrapper.parse_output(out['out'])[0]
348
+ I_p_lst.append(I_p_i)
349
+
350
+ video_name = save_path.split('/')[-1]
351
+ video_save_dir = os.path.dirname(save_path)
352
+ path = os.path.join(video_save_dir, 'temp_' + video_name)
353
+
354
+ imageio.mimsave(path, I_p_lst, fps=float(25))
355
+
356
+ audio_name = audio_path.split('/')[-1]
357
+ new_audio_path = os.path.join(video_save_dir, audio_name)
358
+ start_time = 0
359
+ sound = AudioSegment.from_file(audio_path)
360
+ end_time = start_time + num_frame * 1 / 25 * 1000
361
+ word1 = sound.set_frame_rate(16000)
362
+ word = word1[start_time:end_time]
363
+ word.export(new_audio_path, format="wav")
364
+
365
+ save_video_with_watermark(path, new_audio_path, save_path, watermark=False)
366
+ print(f'The generated video is named {video_save_dir}/{video_name}')
367
+
368
+ os.remove(path)
369
+ os.remove(new_audio_path)
370
+
371
+
372
+ if __name__ == '__main__':
373
+ parser = argparse.ArgumentParser()
374
+ parser.add_argument("-source_image", type=str, default="example/source_image/WDA_BenCardin1_000.png",
375
+ help="source image")
376
+ parser.add_argument("-driven_audio", type=str, default="example/driven_audio/WDA_BenCardin1_000.wav",
377
+ help="driving audio")
378
+ parser.add_argument("-output", type=str, default="results/output.mp4", help="output video file name", )
379
+
380
+ args = parser.parse_args()
381
+
382
+ Infer = Inferencer()
383
+ Infer.generate_with_audio_img(args.source_image, args.driven_audio, args.output)
model/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .model import ConditionalPointCloudDiffusionModel
2
+
3
+ def get_model():
4
+ model = ConditionalPointCloudDiffusionModel()
5
+ return model
6
+
model/model.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Optional
3
+ from einops import rearrange
4
+ import torch
5
+ import torch.nn.functional as F
6
+ from diffusers.schedulers.scheduling_ddpm import DDPMScheduler
7
+ from diffusers.schedulers.scheduling_ddim import DDIMScheduler
8
+ from diffusers.schedulers.scheduling_pndm import PNDMScheduler
9
+
10
+ from torch import Tensor
11
+ from tqdm import tqdm
12
+ from diffusers import ModelMixin
13
+ from .model_utils import get_custom_betas
14
+ from .point_model import PointModel
15
+ import copy
16
+ import torch.nn as nn
17
+
18
+ class TemporalSmoothnessLoss(nn.Module):
19
+ def __init__(self):
20
+ super(TemporalSmoothnessLoss, self).__init__()
21
+
22
+ def forward(self, input):
23
+ # Calculate the difference between consecutive frames
24
+ diff = input[:, 1:, :] - input[:, :-1, :]
25
+
26
+ # Compute the L2 norm (squared) of the differences
27
+ smoothness_loss = torch.mean(torch.sum(diff ** 2, dim=2))
28
+
29
+ return smoothness_loss
30
+
31
+ class ConditionalPointCloudDiffusionModel(ModelMixin):
32
+ def __init__(
33
+ self,
34
+ beta_start: float = 1e-5,
35
+ beta_end: float = 8e-3,
36
+ beta_schedule: str = 'linear',
37
+ point_cloud_model: str = 'simple',
38
+ point_cloud_model_embed_dim: int = 64,
39
+ ):
40
+ super().__init__()
41
+ self.in_channels = 70 # 3 for 3D point positions
42
+ self.out_channels = 70
43
+
44
+ # Checks
45
+ # Create diffusion model schedulers which define the sampling timesteps
46
+ scheduler_kwargs = {}
47
+ if beta_schedule == 'custom':
48
+ scheduler_kwargs.update(dict(trained_betas=get_custom_betas(beta_start=beta_start, beta_end=beta_end)))
49
+ else:
50
+ scheduler_kwargs.update(dict(beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule))
51
+ self.schedulers_map = {
52
+ 'ddpm': DDPMScheduler(**scheduler_kwargs, clip_sample=False),
53
+ 'ddim': DDIMScheduler(**scheduler_kwargs, clip_sample=False),
54
+ 'pndm': PNDMScheduler(**scheduler_kwargs),
55
+ }
56
+ self.scheduler = self.schedulers_map['ddim'] # this can be changed for inference
57
+
58
+ # Create point cloud model for processing point cloud at each diffusion step
59
+ self.point_model = PointModel(
60
+ model_type=point_cloud_model,
61
+ embed_dim=point_cloud_model_embed_dim,
62
+ in_channels=self.in_channels,
63
+ out_channels=self.out_channels,
64
+ )
65
+
66
+ def forward_train(
67
+ self,
68
+ pc: Optional[Tensor],
69
+ ref_kps: Optional[Tensor],
70
+ ori_kps: Optional[Tensor],
71
+ aud_feat: Optional[Tensor],
72
+ mode: str = 'train',
73
+ return_intermediate_steps: bool = False
74
+ ):
75
+
76
+ # Normalize colors and convert to tensor
77
+ x_0 = pc
78
+ B, Nf, Np, D = x_0.shape# batch, nums of frames, nums of points, 3
79
+
80
+
81
+ x_0=x_0[:,:,:,0]# batch, nums of frames, 70
82
+
83
+ # Sample random noise
84
+ noise = torch.randn_like(x_0)
85
+
86
+ # Sample random timesteps for each point_cloud
87
+ timestep = torch.randint(0, self.scheduler.num_train_timesteps, (B,),
88
+ device=self.device, dtype=torch.long)
89
+
90
+ # Add noise to points
91
+ x_t = self.scheduler.add_noise(x_0, noise, timestep)
92
+
93
+ # Conditioning
94
+ ref_kps = ref_kps[:, :, 0]
95
+
96
+ x_t_input = torch.cat([ori_kps.unsqueeze(1), ref_kps.unsqueeze(1), x_t], dim=1)
97
+
98
+ aud_feat = torch.cat([torch.zeros(B, 2, 512).cuda(), aud_feat], 1)
99
+
100
+ # Augmentation for audio feature
101
+ if mode in 'train':
102
+ if torch.rand(1) > 0.3:
103
+ mean = torch.mean(aud_feat)
104
+ std = torch.std(aud_feat)
105
+ sample = torch.normal(mean=torch.full(aud_feat.shape, mean), std=torch.full(aud_feat.shape, std)).cuda()
106
+ aud_feat = sample + aud_feat
107
+ else:
108
+ pass
109
+ else:
110
+ pass
111
+
112
+ # Forward
113
+ noise_pred = self.point_model(x_t_input, timestep, context=aud_feat) #torch.cat([mel_feat,style_embed],-1))
114
+ noise_pred = noise_pred[:, 2:]
115
+
116
+ # Check
117
+ if not noise_pred.shape == noise.shape:
118
+ raise ValueError(f'{noise_pred.shape=} and {noise.shape=}')
119
+
120
+ loss = F.mse_loss(noise_pred, noise)
121
+
122
+ loss_pose = F.mse_loss(noise_pred[:, :, 1:7], noise[:, :, 1:7])
123
+ loss_exp = F.mse_loss(noise_pred[:, :, 7:], noise[:, :, 7:])
124
+
125
+
126
+ # Whether to return intermediate steps
127
+ if return_intermediate_steps:
128
+ return loss, (x_0, x_t, noise, noise_pred)
129
+
130
+ return loss, loss_exp, loss_pose
131
+
132
+ @torch.no_grad()
133
+ def forward_sample(
134
+ self,
135
+ num_points: int,
136
+ ref_kps: Optional[Tensor],
137
+ ori_kps: Optional[Tensor],
138
+ aud_feat: Optional[Tensor],
139
+ # Optional overrides
140
+ scheduler: Optional[str] = 'ddpm',
141
+ # Inference parameters
142
+ num_inference_steps: Optional[int] = 50,
143
+ eta: Optional[float] = 0.0, # for DDIM
144
+ # Whether to return all the intermediate steps in generation
145
+ return_sample_every_n_steps: int = -1,
146
+ # Whether to disable tqdm
147
+ disable_tqdm: bool = False,
148
+ ):
149
+
150
+ # Get scheduler from mapping, or use self.scheduler if None
151
+ scheduler = self.scheduler if scheduler is None else self.schedulers_map[scheduler]
152
+
153
+ # Get the size of the noise
154
+ Np = num_points
155
+ Nf = aud_feat.size(1)
156
+ B = 1
157
+ D = 3
158
+ device = self.device
159
+
160
+ # Sample noise
161
+ x_t = torch.randn(B, Nf, Np, D, device=device)
162
+
163
+ x_t = x_t[:, :, :, 0]
164
+
165
+ ref_kps = ref_kps[:,:,0]
166
+
167
+ # Set timesteps
168
+ accepts_offset = "offset" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
169
+ extra_set_kwargs = {"offset": 1} if accepts_offset else {}
170
+ scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
171
+
172
+ accepts_eta = "eta" in set(inspect.signature(scheduler.step).parameters.keys())
173
+ extra_step_kwargs = {"eta": eta} if accepts_eta else {}
174
+
175
+ # Loop over timesteps
176
+ all_outputs = []
177
+ return_all_outputs = (return_sample_every_n_steps > 0)
178
+ progress_bar = tqdm(scheduler.timesteps.to(device), desc=f'Sampling ({x_t.shape})', disable=disable_tqdm)
179
+
180
+ aud_feat = torch.cat([torch.zeros(B, 2, 512).cuda(), aud_feat], 1)
181
+
182
+ for i, t in enumerate(progress_bar):
183
+ x_t_input = torch.cat([ori_kps.unsqueeze(1).detach(),ref_kps.unsqueeze(1).detach(), x_t], dim=1)
184
+
185
+ # Forward
186
+ noise_pred = self.point_model(x_t_input, t.reshape(1).expand(B), context=aud_feat)[:, 2:]
187
+
188
+ # Step
189
+ x_t = scheduler.step(noise_pred, t, x_t, **extra_step_kwargs).prev_sample
190
+
191
+ # Append to output list if desired
192
+ if (return_all_outputs and (i % return_sample_every_n_steps == 0 or i == len(scheduler.timesteps) - 1)):
193
+ all_outputs.append(x_t)
194
+
195
+ # Convert output back into a point cloud, undoing normalization and scaling
196
+ output = x_t
197
+ output = torch.stack([output,output,output],-1)
198
+ if return_all_outputs:
199
+ all_outputs = torch.stack(all_outputs, dim=1) # (B, sample_steps, N, D)
200
+ return (output, all_outputs) if return_all_outputs else output
201
+
202
+ def forward(self, batch: dict, mode: str = 'train', **kwargs):
203
+ """A wrapper around the forward method for training and inference"""
204
+
205
+ if mode == 'train':
206
+ return self.forward_train(
207
+ pc=batch['sequence_keypoints'],
208
+ ref_kps=batch['ref_keypoint'],
209
+ ori_kps=batch['ori_keypoint'],
210
+ aud_feat=batch['aud_feat'],
211
+ mode='train',
212
+ **kwargs)
213
+ elif mode == 'val':
214
+ return self.forward_train(
215
+ pc=batch['sequence_keypoints'],
216
+ ref_kps=batch['ref_keypoint'],
217
+ ori_kps=batch['ori_keypoint'],
218
+ aud_feat=batch['aud_feat'],
219
+ mode='val',
220
+ **kwargs)
221
+ elif mode == 'sample':
222
+ num_points = 70
223
+ return self.forward_sample(
224
+ num_points=num_points,
225
+ ref_kps=batch['ref_keypoint'],
226
+ ori_kps=batch['ori_keypoint'],
227
+ aud_feat=batch['aud_feat'],
228
+ **kwargs)
229
+ else:
230
+ raise NotImplementedError()
model/model_utils.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+ def set_requires_grad(module: nn.Module, requires_grad: bool):
7
+ for p in module.parameters():
8
+ p.requires_grad_(requires_grad)
9
+
10
+
11
+ def compute_distance_transform(mask: torch.Tensor):
12
+ image_size = mask.shape[-1]
13
+ distance_transform = torch.stack([
14
+ torch.from_numpy(cv2.distanceTransform(
15
+ (1 - m), distanceType=cv2.DIST_L2, maskSize=cv2.DIST_MASK_3
16
+ ) / (image_size / 2))
17
+ for m in mask.squeeze(1).detach().cpu().numpy().astype(np.uint8)
18
+ ]).unsqueeze(1).clip(0, 1).to(mask.device)
19
+ return distance_transform
20
+
21
+
22
+ def default(x, d):
23
+ return d if x is None else x
24
+
25
+ def get_custom_betas(beta_start: float, beta_end: float, warmup_frac: float = 0.3, num_train_timesteps: int = 1000):
26
+ """Custom beta schedule"""
27
+ betas = np.linspace(beta_start, beta_end, num_train_timesteps, dtype=np.float32)
28
+ warmup_frac = 0.3
29
+ warmup_time = int(num_train_timesteps * warmup_frac)
30
+ warmup_steps = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)
31
+ warmup_time = min(warmup_time, num_train_timesteps)
32
+ betas[:warmup_time] = warmup_steps[:warmup_time]
33
+ return betas
model/point_model.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
3
+ from diffusers import ModelMixin
4
+ from torch import Tensor
5
+
6
+ from .temporaltrans.temptrans import SimpleTransModel
7
+
8
+ class PointModel(ModelMixin, ConfigMixin):
9
+ @register_to_config
10
+ def __init__(
11
+ self,
12
+ model_type: str = 'pvcnn',
13
+ in_channels: int = 3,
14
+ out_channels: int = 3,
15
+ embed_dim: int = 64,
16
+ dropout: float = 0.1,
17
+ width_multiplier: int = 1,
18
+ voxel_resolution_multiplier: int = 1,
19
+ ):
20
+ super().__init__()
21
+ self.model_type = model_type
22
+ if self.model_type == 'simple':
23
+ self.autocast_context = torch.autocast('cuda', dtype=torch.float32)
24
+ self.model = SimpleTransModel(
25
+ embed_dim=embed_dim,
26
+ num_classes=out_channels,
27
+ extra_feature_channels=(in_channels - 3),
28
+ )
29
+ self.model.output_projection.bias.data.normal_(0, 1e-6)
30
+ self.model.output_projection.weight.data.normal_(0, 1e-6)
31
+ else:
32
+ raise NotImplementedError()
33
+
34
+ def forward(self, inputs: Tensor, t: Tensor, context=None) -> Tensor:
35
+ """ Receives input of shape (B, N, in_channels) and returns output
36
+ of shape (B, N, out_channels) """
37
+ with self.autocast_context:
38
+ return self.model(inputs, t, context)
model/temporaltrans/temptrans.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ from torch import nn
4
+ from einops import rearrange
5
+ from .transformer_utils import BaseTemperalPointModel
6
+ import math
7
+ from einops_exts import check_shape, rearrange_many
8
+ from functools import partial
9
+ from rotary_embedding_torch import RotaryEmbedding
10
+
11
+ def exists(x):
12
+ return x is not None
13
+
14
+ class SinusoidalPosEmb(nn.Module):
15
+ def __init__(self, dim):
16
+ super().__init__()
17
+ self.dim = dim
18
+
19
+ def forward(self, x):
20
+ device = x.device
21
+ half_dim = self.dim // 2
22
+ emb = math.log(10000) / (half_dim - 1)
23
+ emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
24
+ emb = x[:, None] * emb[None, :]
25
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
26
+ return emb
27
+
28
+
29
+ class RelativePositionBias(nn.Module):
30
+ def __init__(
31
+ self,
32
+ heads = 8,
33
+ num_buckets = 32,
34
+ max_distance = 128
35
+ ):
36
+ super().__init__()
37
+ self.num_buckets = num_buckets
38
+ self.max_distance = max_distance
39
+ self.relative_attention_bias = nn.Embedding(num_buckets, heads)
40
+
41
+ @staticmethod
42
+ def _relative_position_bucket(relative_position, num_buckets = 32, max_distance = 128):
43
+ ret = 0
44
+ n = -relative_position
45
+
46
+ num_buckets //= 2
47
+ ret += (n < 0).long() * num_buckets
48
+ n = torch.abs(n)
49
+
50
+ max_exact = num_buckets // 2
51
+ is_small = n < max_exact
52
+
53
+ val_if_large = max_exact + (
54
+ torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
55
+ ).long()
56
+ val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
57
+
58
+ ret += torch.where(is_small, n, val_if_large)
59
+ return ret
60
+
61
+ def forward(self, n, device):
62
+ q_pos = torch.arange(n, dtype = torch.long, device = device)
63
+ k_pos = torch.arange(n, dtype = torch.long, device = device)
64
+ rel_pos = rearrange(k_pos, 'j -> 1 j') - rearrange(q_pos, 'i -> i 1')
65
+ rp_bucket = self._relative_position_bucket(rel_pos, num_buckets = self.num_buckets, max_distance = self.max_distance)
66
+ values = self.relative_attention_bias(rp_bucket)
67
+ return rearrange(values, 'i j h -> h i j')
68
+
69
+
70
+ class Residual(nn.Module):
71
+ def __init__(self, fn):
72
+ super().__init__()
73
+ self.fn = fn
74
+
75
+ def forward(self, x, *args, **kwargs):
76
+ return self.fn(x, *args, **kwargs) + x
77
+
78
+
79
+ class LayerNorm(nn.Module):
80
+ def __init__(self, dim, eps = 1e-5):
81
+ super().__init__()
82
+ self.eps = eps
83
+ self.gamma = nn.Parameter(torch.ones(1, 1, dim))
84
+ self.beta = nn.Parameter(torch.zeros(1, 1, dim))
85
+
86
+ def forward(self, x):
87
+ var = torch.var(x, dim = -1, unbiased = False, keepdim = True)
88
+ mean = torch.mean(x, dim = -1, keepdim = True)
89
+ return (x - mean) / (var + self.eps).sqrt() * self.gamma + self.beta
90
+
91
+
92
+ class PreNorm(nn.Module):
93
+ def __init__(self, dim, fn):
94
+ super().__init__()
95
+ self.fn = fn
96
+ self.norm = LayerNorm(dim)
97
+
98
+ def forward(self, x, **kwargs):
99
+ x = self.norm(x)
100
+ return self.fn(x, **kwargs)
101
+
102
+
103
+ class EinopsToAndFrom(nn.Module):
104
+ def __init__(self, from_einops, to_einops, fn):
105
+ super().__init__()
106
+ self.from_einops = from_einops
107
+ self.to_einops = to_einops
108
+ self.fn = fn
109
+
110
+ def forward(self, x, **kwargs):
111
+ shape = x.shape
112
+ reconstitute_kwargs = dict(tuple(zip(self.from_einops.split(' '), shape)))
113
+ x = rearrange(x, f'{self.from_einops} -> {self.to_einops}')
114
+ x = self.fn(x, **kwargs)
115
+ x = rearrange(x, f'{self.to_einops} -> {self.from_einops}', **reconstitute_kwargs)
116
+ return x
117
+
118
+
119
+ class Attention(nn.Module):
120
+ def __init__(
121
+ self, dim, heads=4, attn_head_dim=None, casual_attn=False,rotary_emb = None):
122
+ super().__init__()
123
+ self.num_heads = heads
124
+ head_dim = dim // heads
125
+ self.casual_attn = casual_attn
126
+
127
+ if attn_head_dim is not None:
128
+ head_dim = attn_head_dim
129
+
130
+ all_head_dim = head_dim * self.num_heads
131
+ self.scale = head_dim ** -0.5
132
+ self.to_qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
133
+ self.proj = nn.Linear(all_head_dim, dim)
134
+ self.rotary_emb = rotary_emb
135
+
136
+ def forward(self, x, pos_bias = None):
137
+ N, device = x.shape[-2], x.device
138
+ qkv = self.to_qkv(x).chunk(3, dim = -1)
139
+
140
+ q, k, v = rearrange_many(qkv, '... n (h d) -> ... h n d', h=self.num_heads)
141
+
142
+ q = q * self.scale
143
+
144
+ if exists(self.rotary_emb):
145
+ q = self.rotary_emb.rotate_queries_or_keys(q)
146
+ k = self.rotary_emb.rotate_queries_or_keys(k)
147
+
148
+ sim = torch.einsum('... h i d, ... h j d -> ... h i j', q, k)
149
+
150
+ if exists(pos_bias):
151
+ sim = sim + pos_bias
152
+
153
+ if self.casual_attn:
154
+ mask = torch.tril(torch.ones(sim.size(-1), sim.size(-2))).to(device)
155
+ sim = sim.masked_fill(mask[..., :, :] == 0, float('-inf'))
156
+
157
+ attn = sim.softmax(dim = -1)
158
+ x = torch.einsum('... h i j, ... h j d -> ... h i d', attn, v)
159
+ x = rearrange(x, '... h n d -> ... n (h d)')
160
+ x = self.proj(x)
161
+ return x
162
+
163
+
164
+ class Block(nn.Module):
165
+ def __init__(self, dim, dim_out):
166
+ super().__init__()
167
+ self.proj = nn.Linear(dim, dim_out)
168
+ self.norm = LayerNorm(dim)
169
+ self.act = nn.SiLU()
170
+
171
+ def forward(self, x, scale_shift=None):
172
+ x = self.proj(x)
173
+
174
+ if exists(scale_shift):
175
+ x = self.norm(x)
176
+ scale, shift = scale_shift
177
+ x = x * (scale + 1) + shift
178
+ return self.act(x)
179
+
180
+
181
+ class ResnetBlock(nn.Module):
182
+ def __init__(self, dim, dim_out, cond_dim=None):
183
+ super().__init__()
184
+ self.mlp = nn.Sequential(
185
+ nn.SiLU(),
186
+ nn.Linear(cond_dim, dim_out * 2)
187
+ ) if exists(cond_dim) else None
188
+
189
+ self.block1 = Block(dim, dim_out)
190
+ self.block2 = Block(dim_out, dim_out)
191
+
192
+ def forward(self, x, cond_emb=None):
193
+ scale_shift = None
194
+ if exists(self.mlp):
195
+ assert exists(cond_emb), 'time emb must be passed in'
196
+ cond_emb = self.mlp(cond_emb)
197
+ #cond_emb = rearrange(cond_emb, 'b f c -> b f 1 c')
198
+ scale_shift = cond_emb.chunk(2, dim=-1)
199
+
200
+ h = self.block1(x, scale_shift=scale_shift)
201
+ h = self.block2(h)
202
+ return h + x
203
+
204
+ class SimpleTransModel(BaseTemperalPointModel):
205
+ """
206
+ A simple model that processes a point cloud by applying a series of MLPs to each point
207
+ individually, along with some pooled global features.
208
+ """
209
+
210
+ def get_layers(self):
211
+ self.input_projection = nn.Linear(
212
+ in_features=70,
213
+ out_features=self.dim
214
+ )
215
+
216
+ cond_dim = 512 + self.timestep_embed_dim
217
+
218
+ num_head = self.dim//64
219
+
220
+ rotary_emb = RotaryEmbedding(min(32, num_head))
221
+
222
+ self.time_rel_pos_bias = RelativePositionBias(heads=num_head, max_distance=128) # realistically will not be able to generate that many frames of video... yet
223
+
224
+ temporal_casual_attn = lambda dim: Attention(dim, heads=num_head, casual_attn=False,rotary_emb=rotary_emb)
225
+
226
+ cond_block = partial(ResnetBlock, cond_dim=cond_dim)
227
+
228
+ layers = nn.ModuleList([])
229
+
230
+ for _ in range(self.num_layers):
231
+ layers.append(nn.ModuleList([
232
+ cond_block(self.dim, self.dim),
233
+ cond_block(self.dim, self.dim),
234
+ Residual(PreNorm(self.dim, temporal_casual_attn(self.dim)))
235
+ ]))
236
+
237
+ return layers
238
+
239
+ def forward(self, inputs: torch.Tensor, timesteps: torch.Tensor, context=None):
240
+ """
241
+ Apply the model to an input batch.
242
+ :param x: an [N x C x ...] Tensor of inputs.
243
+ :param timesteps: a 1-D batch of timesteps.
244
+ :param context: conditioning plugged in via crossattn
245
+ """
246
+ # Prepare inputs
247
+
248
+ batch, num_frames, channels = inputs.size()
249
+
250
+ device = inputs.device
251
+ x = self.input_projection(inputs)
252
+
253
+ t_emb = self.time_mlp(timesteps) if exists(self.time_mlp) else None
254
+ t_emb = t_emb[:,None,:].expand(-1, num_frames, -1) # b f c
255
+ if context is not None:
256
+ t_emb = torch.cat([t_emb, context],-1)
257
+
258
+ time_rel_pos_bias = self.time_rel_pos_bias(num_frames, device=device)
259
+
260
+ for block1, block2, temporal_attn in self.layers:
261
+ x = block1(x, t_emb)
262
+ x = block2(x, t_emb)
263
+ x = temporal_attn(x, pos_bias=time_rel_pos_bias)
264
+
265
+ # Project
266
+ x = self.output_projection(x)
267
+ return x
model/temporaltrans/transformer_utils.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch import nn
5
+ from einops import rearrange
6
+ import math
7
+ from einops_exts import check_shape, rearrange_many
8
+ from torch import Size, Tensor, nn
9
+
10
+ class SinusoidalPosEmb(nn.Module):
11
+ def __init__(self, dim):
12
+ super().__init__()
13
+ self.dim = dim
14
+
15
+ def forward(self, x):
16
+ device = x.device
17
+ half_dim = self.dim // 2
18
+ emb = math.log(10000) / (half_dim - 1)
19
+ emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
20
+ emb = x[:, None] * emb[None, :]
21
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
22
+ return emb
23
+
24
+
25
+ def map_positional_encoding(v: Tensor, freq_bands: Tensor) -> Tensor:
26
+ """Map v to positional encoding representation phi(v)
27
+
28
+ Arguments:
29
+ v (Tensor): input features (B, IFeatures)
30
+ freq_bands (Tensor): frequency bands (N_freqs, )
31
+
32
+ Returns:
33
+ phi(v) (Tensor): fourrier features (B, 3 + (2 * N_freqs) * 3)
34
+ """
35
+ pe = [v]
36
+ for freq in freq_bands:
37
+ fv = freq * v
38
+ pe += [torch.sin(fv), torch.cos(fv)]
39
+ return torch.cat(pe, dim=-1)
40
+
41
+ class FeatureMapping(nn.Module):
42
+ """FeatureMapping nn.Module
43
+
44
+ Maps v to features following transformation phi(v)
45
+
46
+ Arguments:
47
+ i_dim (int): input dimensions
48
+ o_dim (int): output dimensions
49
+ """
50
+
51
+ def __init__(self, i_dim: int, o_dim: int) -> None:
52
+ super().__init__()
53
+ self.i_dim = i_dim
54
+ self.o_dim = o_dim
55
+
56
+ def forward(self, v: Tensor) -> Tensor:
57
+ """FeratureMapping forward pass
58
+
59
+ Arguments:
60
+ v (Tensor): input features (B, IFeatures)
61
+
62
+ Returns:
63
+ phi(v) (Tensor): mapped features (B, OFeatures)
64
+ """
65
+ raise NotImplementedError("Forward pass not implemented yet!")
66
+
67
+ class PositionalEncoding(FeatureMapping):
68
+ """PositionalEncoding module
69
+
70
+ Maps v to positional encoding representation phi(v)
71
+
72
+ Arguments:
73
+ i_dim (int): input dimension for v
74
+ N_freqs (int): #frequency to sample (default: 10)
75
+ """
76
+
77
+ def __init__(
78
+ self,
79
+ i_dim: int,
80
+ N_freqs: int = 10,
81
+ ) -> None:
82
+ super().__init__(i_dim, 3 + (2 * N_freqs) * 3)
83
+ self.N_freqs = N_freqs
84
+
85
+ a, b = 1, self.N_freqs - 1
86
+ freq_bands = 2 ** torch.linspace(a, b, self.N_freqs)
87
+ self.register_buffer("freq_bands", freq_bands)
88
+
89
+ def forward(self, v: Tensor) -> Tensor:
90
+ """Map v to positional encoding representation phi(v)
91
+
92
+ Arguments:
93
+ v (Tensor): input features (B, IFeatures)
94
+
95
+ Returns:
96
+ phi(v) (Tensor): fourrier features (B, 3 + (2 * N_freqs) * 3)
97
+ """
98
+ return map_positional_encoding(v, self.freq_bands)
99
+
100
+ class BaseTemperalPointModel(nn.Module):
101
+ """ A base class providing useful methods for point cloud processing. """
102
+
103
+ def __init__(
104
+ self,
105
+ *,
106
+ num_classes,
107
+ embed_dim,
108
+ extra_feature_channels,
109
+ dim: int = 768,
110
+ num_layers: int = 6
111
+ ):
112
+ super().__init__()
113
+
114
+ self.extra_feature_channels = extra_feature_channels
115
+ self.timestep_embed_dim = 256
116
+ self.output_dim = num_classes
117
+ self.dim = dim
118
+ self.num_layers = num_layers
119
+
120
+
121
+ self.time_mlp = nn.Sequential(
122
+ SinusoidalPosEmb(dim),
123
+ nn.Linear(dim, self.timestep_embed_dim ),
124
+ nn.SiLU(),
125
+ nn.Linear(self.timestep_embed_dim , self.timestep_embed_dim )
126
+ )
127
+
128
+ self.positional_encoding = PositionalEncoding(i_dim=3, N_freqs=10)
129
+ positional_encoding_d_out = 3 + (2 * 10) * 3
130
+
131
+ # Input projection (point coords, point coord encodings, other features, and timestep embeddings)
132
+
133
+ self.input_projection = nn.Linear(
134
+ in_features=(3 + positional_encoding_d_out),
135
+ out_features=self.dim
136
+ )#b f p c
137
+
138
+ # Transformer layers
139
+ self.layers = self.get_layers()
140
+
141
+ # Output projection
142
+ self.output_projection = nn.Linear(self.dim, self.output_dim)
143
+ def get_layers(self):
144
+ raise NotImplementedError('This method should be implemented by subclasses')
145
+
146
+ def forward(self, inputs: torch.Tensor, t: torch.Tensor):
147
+ raise NotImplementedError('This method should be implemented by subclasses')
requirements.txt ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ onnxruntime-gpu==1.18.0
2
+ transformers==4.33.0
3
+ pyyaml==6.0.1
4
+ scipy==1.10.0
5
+ imageio==2.34.2
6
+ lmdb==1.4.1
7
+ tqdm==4.64.1
8
+ rich==13.7.1
9
+ ffmpeg-python==0.2.0
10
+ protobuf==3.20.2
11
+ onnx==1.16.1
12
+ scikit-image==0.24.0
13
+ scikit-learn==1.3.2
14
+ albumentations==1.4.10
15
+ matplotlib==3.7.0
16
+ imageio-ffmpeg==0.5.1
17
+ tyro==0.8.5
18
+ pykalman==0.9.7
19
+ pillow>=10.2.0
20
+ pytorch_fid
21
+ cpbd
22
+
23
+ wandb==0.17.5
24
+ accelerate==0.23.0
25
+ basicsr==1.4.2
26
+ diffusers==0.10.2
27
+ einops==0.6.0
28
+ einops_exts==0.0.4
29
+ hydra-core==1.3.2
30
+ librosa==0.10.0.post2
31
+ lws==1.2.7
32
+ moviepy==1.0.3
33
+ omegaconf==2.3.0
34
+ opencv_python_headless>=4.9.0.80
35
+ pydub==0.25.1
36
+ PyYAML==6.0.1
37
+ realesrgan==0.3.0
38
+ rotary_embedding_torch==0.3.0
39
+ timm==0.4.12
40
+ torch_ema==0.3
41
+ warmup_scheduler==0.3
42
+ yacs==0.1.8
43
+ numpy==1.24.4
44
+ dlib==19.24.99
src/config/__init__.py ADDED
File without changes
src/config/argument_config.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ All configs for user
5
+ """
6
+
7
+ from dataclasses import dataclass
8
+ import tyro
9
+ from typing_extensions import Annotated
10
+ from typing import Optional
11
+ from .base_config import PrintableConfig, make_abs_path
12
+
13
+
14
+ @dataclass(repr=False) # use repr from PrintableConfig
15
+ class ArgumentConfig(PrintableConfig):
16
+ ########## input arguments ##########
17
+ source_image: Annotated[str, tyro.conf.arg(aliases=["-s"])] = make_abs_path('../../assets/examples/source/s6.jpg') # path to the source portrait
18
+ driving_info: Annotated[str, tyro.conf.arg(aliases=["-d"])] = make_abs_path('../../assets/examples/driving/d12.mp4') # path to driving video or template (.pkl format)
19
+ output_dir: Annotated[str, tyro.conf.arg(aliases=["-o"])] = 'animations/' # directory to save output video
20
+
21
+ ########## inference arguments ##########
22
+ flag_use_half_precision: bool = False # whether to use half precision (FP16). If black boxes appear, it might be due to GPU incompatibility; set to False.
23
+ flag_crop_driving_video: bool = False # whether to crop the driving video, if the given driving info is a video
24
+ device_id: int = 0 # gpu device id
25
+ flag_force_cpu: bool = False # force cpu inference, WIP!
26
+ flag_lip_zero: bool = False # whether let the lip to close state before animation, only take effect when flag_eye_retargeting and flag_lip_retargeting is False
27
+ flag_eye_retargeting: bool = False # not recommend to be True, WIP
28
+ flag_lip_retargeting: bool = False # not recommend to be True, WIP
29
+ flag_stitching: bool = False # recommend to True if head movement is small, False if head movement is large
30
+ flag_relative_motion: bool = False # whether to use relative motion
31
+ flag_pasteback: bool = False # whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space
32
+ flag_do_crop: bool = False # whether to crop the source portrait to the face-cropping space
33
+ flag_do_rot: bool = False # whether to conduct the rotation when flag_do_crop is True
34
+
35
+ ########## crop arguments ##########
36
+ scale: float = 2.3 # the ratio of face area is smaller if scale is larger
37
+ vx_ratio: float = 0 # the ratio to move the face to left or right in cropping space
38
+ vy_ratio: float = -0.125 # the ratio to move the face to up or down in cropping space
39
+
40
+ scale_crop_video: float = 2.2 # scale factor for cropping video
41
+ vx_ratio_crop_video: float = 0. # adjust y offset
42
+ vy_ratio_crop_video: float = -0.1 # adjust x offset
43
+
44
+ ########## gradio arguments ##########
45
+ server_port: Annotated[int, tyro.conf.arg(aliases=["-p"])] = 8890 # port for gradio server
46
+ share: bool = False # whether to share the server to public
47
+ server_name: Optional[str] = "127.0.0.1" # set the local server name, "0.0.0.0" to broadcast all
48
+ flag_do_torch_compile: bool = False # whether to use torch.compile to accelerate generation
src/config/base_config.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ pretty printing class
5
+ """
6
+
7
+ from __future__ import annotations
8
+ import os.path as osp
9
+ from typing import Tuple
10
+
11
+
12
+ def make_abs_path(fn):
13
+ return osp.join(osp.dirname(osp.realpath(__file__)), fn)
14
+
15
+
16
+ class PrintableConfig: # pylint: disable=too-few-public-methods
17
+ """Printable Config defining str function"""
18
+
19
+ def __repr__(self):
20
+ lines = [self.__class__.__name__ + ":"]
21
+ for key, val in vars(self).items():
22
+ if isinstance(val, Tuple):
23
+ flattened_val = "["
24
+ for item in val:
25
+ flattened_val += str(item) + "\n"
26
+ flattened_val = flattened_val.rstrip("\n")
27
+ val = flattened_val + "]"
28
+ lines += f"{key}: {str(val)}".split("\n")
29
+ return "\n ".join(lines)
src/config/crop_config.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ parameters used for crop faces
5
+ """
6
+
7
+ from dataclasses import dataclass
8
+
9
+ from .base_config import PrintableConfig
10
+
11
+
12
+ @dataclass(repr=False) # use repr from PrintableConfig
13
+ class CropConfig(PrintableConfig):
14
+ insightface_root: str = "../../pretrained_weights/insightface"
15
+ landmark_ckpt_path: str = "../../pretrained_weights/liveportrait/landmark.onnx"
16
+ device_id: int = 0 # gpu device id
17
+ flag_force_cpu: bool = False # force cpu inference, WIP
18
+ ########## source image cropping option ##########
19
+ dsize: int = 512 # crop size
20
+ scale: float = 2.0 # scale factor
21
+ vx_ratio: float = 0 # vx ratio
22
+ vy_ratio: float = -0.125 # vy ratio +up, -down
23
+ max_face_num: int = 0 # max face number, 0 mean no limit
24
+
25
+ ########## driving video auto cropping option ##########
26
+ scale_crop_video: float = 2.2 # 2.0 # scale factor for cropping video
27
+ vx_ratio_crop_video: float = 0.0 # adjust y offset
28
+ vy_ratio_crop_video: float = -0.1 # adjust x offset
29
+ direction: str = "large-small" # direction of cropping
src/config/inference_config.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ config dataclass used for inference
5
+ """
6
+
7
+ import os.path as osp
8
+ import cv2
9
+ from numpy import ndarray
10
+ from dataclasses import dataclass
11
+ from typing import Literal, Tuple
12
+ from .base_config import PrintableConfig, make_abs_path
13
+
14
+
15
+ @dataclass(repr=False) # use repr from PrintableConfig
16
+ class InferenceConfig(PrintableConfig):
17
+ # MODEL CONFIG, NOT EXPORTED PARAMS
18
+ models_config: str = make_abs_path('./models.yaml') # portrait animation config
19
+ checkpoint_F: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/appearance_feature_extractor.pth') # path to checkpoint of F
20
+ checkpoint_M: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/motion_extractor.pth') # path to checkpoint pf M
21
+ checkpoint_G: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/spade_generator.pth') # path to checkpoint of G
22
+ checkpoint_W: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/warping_module.pth') # path to checkpoint of W
23
+ checkpoint_S: str = make_abs_path('../../pretrained_weights/liveportrait/retargeting_models/stitching_retargeting_module.pth') # path to checkpoint to S and R_eyes, R_lip
24
+
25
+ # EXPORTED PARAMS
26
+ flag_use_half_precision: bool = True
27
+ flag_crop_driving_video: bool = False
28
+ device_id: int = 0
29
+ flag_lip_zero: bool = False
30
+ flag_eye_retargeting: bool = False
31
+ flag_lip_retargeting: bool = False
32
+ flag_stitching: bool = False
33
+ flag_relative_motion: bool = False
34
+ flag_pasteback: bool = False
35
+ flag_do_crop: bool = False
36
+ flag_do_rot: bool = False
37
+ flag_force_cpu: bool = False
38
+ flag_do_torch_compile: bool = False
39
+
40
+ # NOT EXPORTED PARAMS
41
+ lip_zero_threshold: float = 0.03 # threshold for flag_lip_zero
42
+ anchor_frame: int = 0 # TO IMPLEMENT
43
+
44
+ input_shape: Tuple[int, int] = (256, 256) # input shape
45
+ output_format: Literal['mp4', 'gif'] = 'mp4' # output video format
46
+ crf: int = 15 # crf for output video
47
+ output_fps: int = 25 # default output fps
48
+
49
+ mask_crop: ndarray = cv2.imread(make_abs_path('../utils/resources/mask_template.png'), cv2.IMREAD_COLOR)
50
+ size_gif: int = 256 # default gif size, TO IMPLEMENT
51
+ source_max_dim: int = 1280 # the max dim of height and width of source image
52
+ source_division: int = 2 # make sure the height and width of source image can be divided by this number
src/config/models.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_params:
2
+ appearance_feature_extractor_params: # the F in the paper
3
+ image_channel: 3
4
+ block_expansion: 64
5
+ num_down_blocks: 2
6
+ max_features: 512
7
+ reshape_channel: 32
8
+ reshape_depth: 16
9
+ num_resblocks: 6
10
+ motion_extractor_params: # the M in the paper
11
+ num_kp: 21
12
+ backbone: convnextv2_tiny
13
+ warping_module_params: # the W in the paper
14
+ num_kp: 21
15
+ block_expansion: 64
16
+ max_features: 512
17
+ num_down_blocks: 2
18
+ reshape_channel: 32
19
+ estimate_occlusion_map: True
20
+ dense_motion_params:
21
+ block_expansion: 32
22
+ max_features: 1024
23
+ num_blocks: 5
24
+ reshape_depth: 16
25
+ compress: 4
26
+ spade_generator_params: # the G in the paper
27
+ upscale: 2 # represents upsample factor 256x256 -> 512x512
28
+ block_expansion: 64
29
+ max_features: 512
30
+ num_down_blocks: 2
31
+ stitching_retargeting_module_params: # the S in the paper
32
+ stitching:
33
+ input_size: 126 # (21*3)*2
34
+ hidden_sizes: [128, 128, 64]
35
+ output_size: 65 # (21*3)+2(tx,ty)
36
+ lip:
37
+ input_size: 65 # (21*3)+2
38
+ hidden_sizes: [128, 128, 64]
39
+ output_size: 63 # (21*3)
40
+ eye:
41
+ input_size: 66 # (21*3)+3
42
+ hidden_sizes: [256, 256, 128, 128, 64]
43
+ output_size: 63 # (21*3)
src/gradio_pipeline.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ Pipeline for gradio
5
+ """
6
+ import gradio as gr
7
+
8
+ from .config.argument_config import ArgumentConfig
9
+ from .live_portrait_pipeline import LivePortraitPipeline
10
+ from .utils.io import load_img_online
11
+ from .utils.rprint import rlog as log
12
+ from .utils.crop import prepare_paste_back, paste_back
13
+ from .utils.camera import get_rotation_matrix
14
+
15
+
16
+ def update_args(args, user_args):
17
+ """update the args according to user inputs
18
+ """
19
+ for k, v in user_args.items():
20
+ if hasattr(args, k):
21
+ setattr(args, k, v)
22
+ return args
23
+
24
+
25
+ class GradioPipeline(LivePortraitPipeline):
26
+
27
+ def __init__(self, inference_cfg, crop_cfg, args: ArgumentConfig):
28
+ super().__init__(inference_cfg, crop_cfg)
29
+ # self.live_portrait_wrapper = self.live_portrait_wrapper
30
+ self.args = args
31
+
32
+ def execute_video(
33
+ self,
34
+ input_image_path,
35
+ input_video_path,
36
+ flag_relative_input,
37
+ flag_do_crop_input,
38
+ flag_remap_input,
39
+ flag_crop_driving_video_input
40
+ ):
41
+ """ for video driven potrait animation
42
+ """
43
+ if input_image_path is not None and input_video_path is not None:
44
+ args_user = {
45
+ 'source_image': input_image_path,
46
+ 'driving_info': input_video_path,
47
+ 'flag_relative': flag_relative_input,
48
+ 'flag_do_crop': flag_do_crop_input,
49
+ 'flag_pasteback': flag_remap_input,
50
+ 'flag_crop_driving_video': flag_crop_driving_video_input
51
+ }
52
+ # update config from user input
53
+ self.args = update_args(self.args, args_user)
54
+ self.live_portrait_wrapper.update_config(self.args.__dict__)
55
+ self.cropper.update_config(self.args.__dict__)
56
+ # video driven animation
57
+ video_path, video_path_concat = self.execute(self.args)
58
+ gr.Info("Run successfully!", duration=2)
59
+ return video_path, video_path_concat,
60
+ else:
61
+ raise gr.Error("The input source portrait or driving video hasn't been prepared yet 💥!", duration=5)
62
+
63
+ def execute_image(self, input_eye_ratio: float, input_lip_ratio: float, input_image, flag_do_crop=True):
64
+ """ for single image retargeting
65
+ """
66
+ # disposable feature
67
+ f_s_user, x_s_user, source_lmk_user, crop_M_c2o, mask_ori, img_rgb = \
68
+ self.prepare_retargeting(input_image, flag_do_crop)
69
+
70
+ if input_eye_ratio is None or input_lip_ratio is None:
71
+ raise gr.Error("Invalid ratio input 💥!", duration=5)
72
+ else:
73
+ inference_cfg = self.live_portrait_wrapper.inference_cfg
74
+ x_s_user = x_s_user.to(self.live_portrait_wrapper.device)
75
+ f_s_user = f_s_user.to(self.live_portrait_wrapper.device)
76
+ # ∆_eyes,i = R_eyes(x_s; c_s,eyes, c_d,eyes,i)
77
+ combined_eye_ratio_tensor = self.live_portrait_wrapper.calc_combined_eye_ratio([[input_eye_ratio]], source_lmk_user)
78
+ eyes_delta = self.live_portrait_wrapper.retarget_eye(x_s_user, combined_eye_ratio_tensor)
79
+ # ∆_lip,i = R_lip(x_s; c_s,lip, c_d,lip,i)
80
+ combined_lip_ratio_tensor = self.live_portrait_wrapper.calc_combined_lip_ratio([[input_lip_ratio]], source_lmk_user)
81
+ lip_delta = self.live_portrait_wrapper.retarget_lip(x_s_user, combined_lip_ratio_tensor)
82
+ num_kp = x_s_user.shape[1]
83
+ # default: use x_s
84
+ x_d_new = x_s_user + eyes_delta.reshape(-1, num_kp, 3) + lip_delta.reshape(-1, num_kp, 3)
85
+ # D(W(f_s; x_s, x′_d))
86
+ out = self.live_portrait_wrapper.warp_decode(f_s_user, x_s_user, x_d_new)
87
+ out = self.live_portrait_wrapper.parse_output(out['out'])[0]
88
+ out_to_ori_blend = paste_back(out, crop_M_c2o, img_rgb, mask_ori)
89
+ gr.Info("Run successfully!", duration=2)
90
+ return out, out_to_ori_blend
91
+
92
+ def prepare_retargeting(self, input_image, flag_do_crop=True):
93
+ """ for single image retargeting
94
+ """
95
+ if input_image is not None:
96
+ # gr.Info("Upload successfully!", duration=2)
97
+ inference_cfg = self.live_portrait_wrapper.inference_cfg
98
+ ######## process source portrait ########
99
+ img_rgb = load_img_online(input_image, mode='rgb', max_dim=1280, n=16)
100
+ log(f"Load source image from {input_image}.")
101
+ crop_info = self.cropper.crop_source_image(img_rgb, self.cropper.crop_cfg)
102
+ if flag_do_crop:
103
+ I_s = self.live_portrait_wrapper.prepare_source(crop_info['img_crop_256x256'])
104
+ else:
105
+ I_s = self.live_portrait_wrapper.prepare_source(img_rgb)
106
+ x_s_info = self.live_portrait_wrapper.get_kp_info(I_s)
107
+ R_s = get_rotation_matrix(x_s_info['pitch'], x_s_info['yaw'], x_s_info['roll'])
108
+ ############################################
109
+ f_s_user = self.live_portrait_wrapper.extract_feature_3d(I_s)
110
+ x_s_user = self.live_portrait_wrapper.transform_keypoint(x_s_info)
111
+ source_lmk_user = crop_info['lmk_crop']
112
+ crop_M_c2o = crop_info['M_c2o']
113
+ mask_ori = prepare_paste_back(inference_cfg.mask_crop, crop_info['M_c2o'], dsize=(img_rgb.shape[1], img_rgb.shape[0]))
114
+ return f_s_user, x_s_user, source_lmk_user, crop_M_c2o, mask_ori, img_rgb
115
+ else:
116
+ # when press the clear button, go here
117
+ raise gr.Error("The retargeting input hasn't been prepared yet 💥!", duration=5)
src/live_portrait_pipeline.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ Pipeline of LivePortrait
5
+ """
6
+
7
+ import torch
8
+ torch.backends.cudnn.benchmark = True # disable CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR warning
9
+
10
+ import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False)
11
+ import numpy as np
12
+ import os
13
+ import os.path as osp
14
+ from rich.progress import track
15
+
16
+ from .config.argument_config import ArgumentConfig
17
+ from .config.inference_config import InferenceConfig
18
+ from .config.crop_config import CropConfig
19
+ from .utils.cropper import Cropper
20
+ from .utils.camera import get_rotation_matrix
21
+ from .utils.video import images2video, concat_frames, get_fps, add_audio_to_video, has_audio_stream
22
+ from .utils.crop import prepare_paste_back, paste_back
23
+ from .utils.io import load_image_rgb, load_driving_info, resize_to_limit, dump, load
24
+ from .utils.helper import mkdir, basename, dct2device, is_video, is_template, remove_suffix
25
+ from .utils.rprint import rlog as log
26
+ # from .utils.viz import viz_lmk
27
+ from .live_portrait_wrapper import LivePortraitWrapper
28
+
29
+
30
+ def make_abs_path(fn):
31
+ return osp.join(osp.dirname(osp.realpath(__file__)), fn)
32
+
33
+
34
+ class LivePortraitPipeline(object):
35
+
36
+ def __init__(self, inference_cfg: InferenceConfig, crop_cfg: CropConfig):
37
+ self.live_portrait_wrapper: LivePortraitWrapper = LivePortraitWrapper(inference_cfg=inference_cfg)
38
+ self.cropper: Cropper = Cropper(crop_cfg=crop_cfg)
39
+
40
+ def execute(self, args: ArgumentConfig):
41
+ # for convenience
42
+ inf_cfg = self.live_portrait_wrapper.inference_cfg
43
+ device = self.live_portrait_wrapper.device
44
+ crop_cfg = self.cropper.crop_cfg
45
+
46
+ ######## process source portrait ########
47
+ img_rgb = load_image_rgb(args.source_image)
48
+ img_rgb = resize_to_limit(img_rgb, inf_cfg.source_max_dim, inf_cfg.source_division)
49
+ log(f"Load source image from {args.source_image}")
50
+
51
+ crop_info = self.cropper.crop_source_image(img_rgb, crop_cfg)
52
+ if crop_info is None:
53
+ raise Exception("No face detected in the source image!")
54
+ source_lmk = crop_info['lmk_crop']
55
+ img_crop, img_crop_256x256 = crop_info['img_crop'], crop_info['img_crop_256x256']
56
+
57
+ if inf_cfg.flag_do_crop:
58
+ I_s = self.live_portrait_wrapper.prepare_source(img_crop_256x256)
59
+ else:
60
+ img_crop_256x256 = cv2.resize(img_rgb, (256, 256)) # force to resize to 256x256
61
+ I_s = self.live_portrait_wrapper.prepare_source(img_crop_256x256)
62
+ x_s_info = self.live_portrait_wrapper.get_kp_info(I_s)
63
+ x_c_s = x_s_info['kp']
64
+ R_s = get_rotation_matrix(x_s_info['pitch'], x_s_info['yaw'], x_s_info['roll'])
65
+ f_s = self.live_portrait_wrapper.extract_feature_3d(I_s)
66
+ x_s = self.live_portrait_wrapper.transform_keypoint(x_s_info)
67
+
68
+ flag_lip_zero = inf_cfg.flag_lip_zero # not overwrite
69
+ if flag_lip_zero:
70
+ # let lip-open scalar to be 0 at first
71
+ c_d_lip_before_animation = [0.]
72
+ combined_lip_ratio_tensor_before_animation = self.live_portrait_wrapper.calc_combined_lip_ratio(c_d_lip_before_animation, source_lmk)
73
+ if combined_lip_ratio_tensor_before_animation[0][0] < inf_cfg.lip_zero_threshold:
74
+ flag_lip_zero = False
75
+ else:
76
+ lip_delta_before_animation = self.live_portrait_wrapper.retarget_lip(x_s, combined_lip_ratio_tensor_before_animation)
77
+ ############################################
78
+
79
+ ######## process driving info ########
80
+ flag_load_from_template = is_template(args.driving_info)
81
+ driving_rgb_crop_256x256_lst = None
82
+ wfp_template = None
83
+
84
+ if flag_load_from_template:
85
+ # NOTE: load from template, it is fast, but the cropping video is None
86
+ log(f"Load from template: {args.driving_info}, NOT the video, so the cropping video and audio are both NULL.", style='bold green')
87
+ template_dct = load(args.driving_info)
88
+ n_frames = template_dct['n_frames']
89
+
90
+ # set output_fps
91
+ output_fps = template_dct.get('output_fps', inf_cfg.output_fps)
92
+ log(f'The FPS of template: {output_fps}')
93
+
94
+ if args.flag_crop_driving_video:
95
+ log("Warning: flag_crop_driving_video is True, but the driving info is a template, so it is ignored.")
96
+
97
+ elif osp.exists(args.driving_info) and is_video(args.driving_info):
98
+ # load from video file, AND make motion template
99
+ log(f"Load video: {args.driving_info}")
100
+ if osp.isdir(args.driving_info):
101
+ output_fps = inf_cfg.output_fps
102
+ else:
103
+ output_fps = int(get_fps(args.driving_info))
104
+ log(f'The FPS of {args.driving_info} is: {output_fps}')
105
+
106
+ log(f"Load video file (mp4 mov avi etc...): {args.driving_info}")
107
+ driving_rgb_lst = load_driving_info(args.driving_info)
108
+
109
+ ######## make motion template ########
110
+ log("Start making motion template...")
111
+ if inf_cfg.flag_crop_driving_video:
112
+ ret = self.cropper.crop_driving_video(driving_rgb_lst)
113
+ log(f'Driving video is cropped, {len(ret["frame_crop_lst"])} frames are processed.')
114
+ driving_rgb_crop_lst, driving_lmk_crop_lst = ret['frame_crop_lst'], ret['lmk_crop_lst']
115
+ driving_rgb_crop_256x256_lst = [cv2.resize(_, (256, 256)) for _ in driving_rgb_crop_lst]
116
+ else:
117
+ driving_lmk_crop_lst = self.cropper.calc_lmks_from_cropped_video(driving_rgb_lst)
118
+ driving_rgb_crop_256x256_lst = [cv2.resize(_, (256, 256)) for _ in driving_rgb_lst] # force to resize to 256x256
119
+
120
+ c_d_eyes_lst, c_d_lip_lst = self.live_portrait_wrapper.calc_driving_ratio(driving_lmk_crop_lst)
121
+ # save the motion template
122
+ I_d_lst = self.live_portrait_wrapper.prepare_driving_videos(driving_rgb_crop_256x256_lst)
123
+ template_dct = self.make_motion_template(I_d_lst, c_d_eyes_lst, c_d_lip_lst, output_fps=output_fps)
124
+
125
+ wfp_template = remove_suffix(args.driving_info) + '.pkl'
126
+ dump(wfp_template, template_dct)
127
+ log(f"Dump motion template to {wfp_template}")
128
+
129
+ n_frames = I_d_lst.shape[0]
130
+ else:
131
+ raise Exception(f"{args.driving_info} not exists or unsupported driving info types!")
132
+ #########################################
133
+
134
+ ######## prepare for pasteback ########
135
+ I_p_pstbk_lst = None
136
+ if inf_cfg.flag_pasteback and inf_cfg.flag_do_crop and inf_cfg.flag_stitching:
137
+ mask_ori_float = prepare_paste_back(inf_cfg.mask_crop, crop_info['M_c2o'], dsize=(img_rgb.shape[1], img_rgb.shape[0]))
138
+ I_p_pstbk_lst = []
139
+ log("Prepared pasteback mask done.")
140
+ #########################################
141
+
142
+ I_p_lst = []
143
+ R_d_0, x_d_0_info = None, None
144
+
145
+ for i in track(range(n_frames), description='🚀Animating...', total=n_frames):
146
+ x_d_i_info = template_dct['motion'][i]
147
+ x_d_i_info = dct2device(x_d_i_info, device)
148
+ R_d_i = x_d_i_info['R_d']
149
+
150
+ if i == 0:
151
+ R_d_0 = R_d_i
152
+ x_d_0_info = x_d_i_info
153
+
154
+ if inf_cfg.flag_relative_motion:
155
+ R_new = (R_d_i @ R_d_0.permute(0, 2, 1)) @ R_s
156
+ delta_new = x_s_info['exp'] + (x_d_i_info['exp'] - x_d_0_info['exp'])
157
+ scale_new = x_s_info['scale'] * (x_d_i_info['scale'] / x_d_0_info['scale'])
158
+ t_new = x_s_info['t'] + (x_d_i_info['t'] - x_d_0_info['t'])
159
+ else:
160
+ R_new = R_d_i
161
+ delta_new = x_d_i_info['exp']
162
+ scale_new = x_s_info['scale']
163
+ t_new = x_d_i_info['t']
164
+
165
+ t_new[..., 2].fill_(0) # zero tz
166
+ x_d_i_new = scale_new * (x_c_s @ R_new + delta_new) + t_new
167
+
168
+ # Algorithm 1:
169
+ if not inf_cfg.flag_stitching and not inf_cfg.flag_eye_retargeting and not inf_cfg.flag_lip_retargeting:
170
+ # without stitching or retargeting
171
+ if flag_lip_zero:
172
+ x_d_i_new += lip_delta_before_animation.reshape(-1, x_s.shape[1], 3)
173
+ else:
174
+ pass
175
+ elif inf_cfg.flag_stitching and not inf_cfg.flag_eye_retargeting and not inf_cfg.flag_lip_retargeting:
176
+ # with stitching and without retargeting
177
+ if flag_lip_zero:
178
+ x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new) + lip_delta_before_animation.reshape(-1, x_s.shape[1], 3)
179
+ else:
180
+ x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new)
181
+ else:
182
+ eyes_delta, lip_delta = None, None
183
+ if inf_cfg.flag_eye_retargeting:
184
+ c_d_eyes_i = c_d_eyes_lst[i]
185
+ combined_eye_ratio_tensor = self.live_portrait_wrapper.calc_combined_eye_ratio(c_d_eyes_i, source_lmk)
186
+ # ∆_eyes,i = R_eyes(x_s; c_s,eyes, c_d,eyes,i)
187
+ eyes_delta = self.live_portrait_wrapper.retarget_eye(x_s, combined_eye_ratio_tensor)
188
+ if inf_cfg.flag_lip_retargeting:
189
+ c_d_lip_i = c_d_lip_lst[i]
190
+ combined_lip_ratio_tensor = self.live_portrait_wrapper.calc_combined_lip_ratio(c_d_lip_i, source_lmk)
191
+ # ∆_lip,i = R_lip(x_s; c_s,lip, c_d,lip,i)
192
+ lip_delta = self.live_portrait_wrapper.retarget_lip(x_s, combined_lip_ratio_tensor)
193
+
194
+ if inf_cfg.flag_relative_motion: # use x_s
195
+ x_d_i_new = x_s + \
196
+ (eyes_delta.reshape(-1, x_s.shape[1], 3) if eyes_delta is not None else 0) + \
197
+ (lip_delta.reshape(-1, x_s.shape[1], 3) if lip_delta is not None else 0)
198
+ else: # use x_d,i
199
+ x_d_i_new = x_d_i_new + \
200
+ (eyes_delta.reshape(-1, x_s.shape[1], 3) if eyes_delta is not None else 0) + \
201
+ (lip_delta.reshape(-1, x_s.shape[1], 3) if lip_delta is not None else 0)
202
+
203
+ if inf_cfg.flag_stitching:
204
+ x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new)
205
+
206
+ out = self.live_portrait_wrapper.warp_decode(f_s, x_s, x_d_i_new)
207
+ I_p_i = self.live_portrait_wrapper.parse_output(out['out'])[0]
208
+ I_p_lst.append(I_p_i)
209
+
210
+ if inf_cfg.flag_pasteback and inf_cfg.flag_do_crop and inf_cfg.flag_stitching:
211
+ # TODO: pasteback is slow, considering optimize it using multi-threading or GPU
212
+ I_p_pstbk = paste_back(I_p_i, crop_info['M_c2o'], img_rgb, mask_ori_float)
213
+ I_p_pstbk_lst.append(I_p_pstbk)
214
+
215
+ mkdir(args.output_dir)
216
+ wfp_concat = None
217
+ flag_has_audio = (not flag_load_from_template) and has_audio_stream(args.driving_info)
218
+
219
+ ######### build final concact result #########
220
+ # driving frame | source image | generation, or source image | generation
221
+ frames_concatenated = concat_frames(driving_rgb_crop_256x256_lst, img_crop_256x256, I_p_lst)
222
+ wfp_concat = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_concat.mp4')
223
+ images2video(frames_concatenated, wfp=wfp_concat, fps=output_fps)
224
+
225
+ if flag_has_audio:
226
+ # final result with concact
227
+ wfp_concat_with_audio = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_concat_with_audio.mp4')
228
+ add_audio_to_video(wfp_concat, args.driving_info, wfp_concat_with_audio)
229
+ os.replace(wfp_concat_with_audio, wfp_concat)
230
+ log(f"Replace {wfp_concat} with {wfp_concat_with_audio}")
231
+
232
+ # save drived result
233
+ wfp = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}.mp4')
234
+ if I_p_pstbk_lst is not None and len(I_p_pstbk_lst) > 0:
235
+ images2video(I_p_pstbk_lst, wfp=wfp, fps=output_fps)
236
+ else:
237
+ images2video(I_p_lst, wfp=wfp, fps=output_fps)
238
+
239
+ ######### build final result #########
240
+ if flag_has_audio:
241
+ wfp_with_audio = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_with_audio.mp4')
242
+ add_audio_to_video(wfp, args.driving_info, wfp_with_audio)
243
+ os.replace(wfp_with_audio, wfp)
244
+ log(f"Replace {wfp} with {wfp_with_audio}")
245
+
246
+ # final log
247
+ if wfp_template not in (None, ''):
248
+ log(f'Animated template: {wfp_template}, you can specify `-d` argument with this template path next time to avoid cropping video, motion making and protecting privacy.', style='bold green')
249
+ log(f'Animated video: {wfp}')
250
+ log(f'Animated video with concact: {wfp_concat}')
251
+
252
+ return wfp, wfp_concat
253
+
254
+ def make_motion_template(self, I_d_lst, c_d_eyes_lst, c_d_lip_lst, **kwargs):
255
+ n_frames = I_d_lst.shape[0]
256
+ template_dct = {
257
+ 'n_frames': n_frames,
258
+ 'output_fps': kwargs.get('output_fps', 25),
259
+ 'motion': [],
260
+ 'c_d_eyes_lst': [],
261
+ 'c_d_lip_lst': [],
262
+ }
263
+
264
+ for i in track(range(n_frames), description='Making motion templates...', total=n_frames):
265
+ # collect s_d, R_d, δ_d and t_d for inference
266
+ I_d_i = I_d_lst[i]
267
+ x_d_i_info = self.live_portrait_wrapper.get_kp_info(I_d_i)
268
+ R_d_i = get_rotation_matrix(x_d_i_info['pitch'], x_d_i_info['yaw'], x_d_i_info['roll'])
269
+
270
+ item_dct = {
271
+ 'scale': x_d_i_info['scale'].cpu().numpy().astype(np.float32),
272
+ 'R_d': R_d_i.cpu().numpy().astype(np.float32),
273
+ 'exp': x_d_i_info['exp'].cpu().numpy().astype(np.float32),
274
+ 't': x_d_i_info['t'].cpu().numpy().astype(np.float32),
275
+ }
276
+
277
+ template_dct['motion'].append(item_dct)
278
+
279
+ c_d_eyes = c_d_eyes_lst[i].astype(np.float32)
280
+ template_dct['c_d_eyes_lst'].append(c_d_eyes)
281
+
282
+ c_d_lip = c_d_lip_lst[i].astype(np.float32)
283
+ template_dct['c_d_lip_lst'].append(c_d_lip)
284
+
285
+ return template_dct
src/live_portrait_wrapper.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ Wrapper for LivePortrait core functions
5
+ """
6
+
7
+ import os.path as osp
8
+ import numpy as np
9
+ import cv2
10
+ import torch
11
+ import yaml
12
+
13
+ from .utils.timer import Timer
14
+ from .utils.helper import load_model, concat_feat
15
+ from .utils.camera import headpose_pred_to_degree, get_rotation_matrix
16
+ from .utils.retargeting_utils import calc_eye_close_ratio, calc_lip_close_ratio
17
+ from .config.inference_config import InferenceConfig
18
+ from .utils.rprint import rlog as log
19
+
20
+
21
+ class LivePortraitWrapper(object):
22
+
23
+ def __init__(self, inference_cfg: InferenceConfig):
24
+
25
+ self.inference_cfg = inference_cfg
26
+ self.device_id = inference_cfg.device_id
27
+ self.compile = inference_cfg.flag_do_torch_compile
28
+ if inference_cfg.flag_force_cpu:
29
+ self.device = 'cpu'
30
+ else:
31
+ self.device = 'cuda:' + str(self.device_id)
32
+
33
+ model_config = yaml.load(open(inference_cfg.models_config, 'r'), Loader=yaml.SafeLoader)
34
+ # init F
35
+ self.appearance_feature_extractor = load_model(inference_cfg.checkpoint_F, model_config, self.device, 'appearance_feature_extractor')
36
+ log(f'Load appearance_feature_extractor done.')
37
+ # init M
38
+ self.motion_extractor = load_model(inference_cfg.checkpoint_M, model_config, self.device, 'motion_extractor')
39
+ log(f'Load motion_extractor done.')
40
+ # init W
41
+ self.warping_module = load_model(inference_cfg.checkpoint_W, model_config, self.device, 'warping_module')
42
+ log(f'Load warping_module done.')
43
+ # init G
44
+ self.spade_generator = load_model(inference_cfg.checkpoint_G, model_config, self.device, 'spade_generator')
45
+ log(f'Load spade_generator done.')
46
+ # init S and R
47
+ if inference_cfg.checkpoint_S is not None and osp.exists(inference_cfg.checkpoint_S):
48
+ self.stitching_retargeting_module = load_model(inference_cfg.checkpoint_S, model_config, self.device, 'stitching_retargeting_module')
49
+ log(f'Load stitching_retargeting_module done.')
50
+ else:
51
+ self.stitching_retargeting_module = None
52
+ # Optimize for inference
53
+ if self.compile:
54
+ self.warping_module = torch.compile(self.warping_module, mode='max-autotune')
55
+ self.spade_generator = torch.compile(self.spade_generator, mode='max-autotune')
56
+
57
+ self.timer = Timer()
58
+
59
+ def update_config(self, user_args):
60
+ for k, v in user_args.items():
61
+ if hasattr(self.inference_cfg, k):
62
+ setattr(self.inference_cfg, k, v)
63
+
64
+ def prepare_source(self, img: np.ndarray) -> torch.Tensor:
65
+ """ construct the input as standard
66
+ img: HxWx3, uint8, 256x256
67
+ """
68
+ h, w = img.shape[:2]
69
+ if h != self.inference_cfg.input_shape[0] or w != self.inference_cfg.input_shape[1]:
70
+ x = cv2.resize(img, (self.inference_cfg.input_shape[0], self.inference_cfg.input_shape[1]))
71
+ else:
72
+ x = img.copy()
73
+
74
+ if x.ndim == 3:
75
+ x = x[np.newaxis].astype(np.float32) / 255. # HxWx3 -> 1xHxWx3, normalized to 0~1
76
+ elif x.ndim == 4:
77
+ x = x.astype(np.float32) / 255. # BxHxWx3, normalized to 0~1
78
+ else:
79
+ raise ValueError(f'img ndim should be 3 or 4: {x.ndim}')
80
+ x = np.clip(x, 0, 1) # clip to 0~1
81
+ x = torch.from_numpy(x).permute(0, 3, 1, 2) # 1xHxWx3 -> 1x3xHxW
82
+ x = x.to(self.device)
83
+ return x
84
+
85
+ def prepare_driving_videos(self, imgs) -> torch.Tensor:
86
+ """ construct the input as standard
87
+ imgs: NxBxHxWx3, uint8
88
+ """
89
+ if isinstance(imgs, list):
90
+ _imgs = np.array(imgs)[..., np.newaxis] # TxHxWx3x1
91
+ elif isinstance(imgs, np.ndarray):
92
+ _imgs = imgs
93
+ else:
94
+ raise ValueError(f'imgs type error: {type(imgs)}')
95
+
96
+ y = _imgs.astype(np.float32) / 255.
97
+ y = np.clip(y, 0, 1) # clip to 0~1
98
+ y = torch.from_numpy(y).permute(0, 4, 3, 1, 2) # TxHxWx3x1 -> Tx1x3xHxW
99
+ y = y.to(self.device)
100
+
101
+ return y
102
+
103
+ def extract_feature_3d(self, x: torch.Tensor) -> torch.Tensor:
104
+ """ get the appearance feature of the image by F
105
+ x: Bx3xHxW, normalized to 0~1
106
+ """
107
+ with torch.no_grad():
108
+ with torch.autocast(device_type=self.device[:4], dtype=torch.float16, enabled=self.inference_cfg.flag_use_half_precision):
109
+ feature_3d = self.appearance_feature_extractor(x)
110
+
111
+ return feature_3d.float()
112
+
113
+ def get_kp_info(self, x: torch.Tensor, **kwargs) -> dict:
114
+ """ get the implicit keypoint information
115
+ x: Bx3xHxW, normalized to 0~1
116
+ flag_refine_info: whether to trandform the pose to degrees and the dimention of the reshape
117
+ return: A dict contains keys: 'pitch', 'yaw', 'roll', 't', 'exp', 'scale', 'kp'
118
+ """
119
+ with torch.no_grad():
120
+ with torch.autocast(device_type=self.device[:4], dtype=torch.float16, enabled=self.inference_cfg.flag_use_half_precision):
121
+ kp_info = self.motion_extractor(x)
122
+
123
+ if self.inference_cfg.flag_use_half_precision:
124
+ # float the dict
125
+ for k, v in kp_info.items():
126
+ if isinstance(v, torch.Tensor):
127
+ kp_info[k] = v.float()
128
+
129
+ flag_refine_info: bool = kwargs.get('flag_refine_info', True)
130
+ if flag_refine_info:
131
+ bs = kp_info['kp'].shape[0]
132
+ kp_info['pitch'] = headpose_pred_to_degree(kp_info['pitch'])[:, None] # Bx1
133
+ kp_info['yaw'] = headpose_pred_to_degree(kp_info['yaw'])[:, None] # Bx1
134
+ kp_info['roll'] = headpose_pred_to_degree(kp_info['roll'])[:, None] # Bx1
135
+ kp_info['kp'] = kp_info['kp'].reshape(bs, -1) # B,Nx3
136
+ kp_info['exp'] = kp_info['exp'].reshape(bs, -1) # B,Nx3
137
+
138
+ return kp_info
139
+
140
+ def get_pose_dct(self, kp_info: dict) -> dict:
141
+ pose_dct = dict(
142
+ pitch=headpose_pred_to_degree(kp_info['pitch']).item(),
143
+ yaw=headpose_pred_to_degree(kp_info['yaw']).item(),
144
+ roll=headpose_pred_to_degree(kp_info['roll']).item(),
145
+ )
146
+ return pose_dct
147
+
148
+ def get_fs_and_kp_info(self, source_prepared, driving_first_frame):
149
+
150
+ # get the canonical keypoints of source image by M
151
+ source_kp_info = self.get_kp_info(source_prepared, flag_refine_info=True)
152
+ source_rotation = get_rotation_matrix(source_kp_info['pitch'], source_kp_info['yaw'], source_kp_info['roll'])
153
+
154
+ # get the canonical keypoints of first driving frame by M
155
+ driving_first_frame_kp_info = self.get_kp_info(driving_first_frame, flag_refine_info=True)
156
+ driving_first_frame_rotation = get_rotation_matrix(
157
+ driving_first_frame_kp_info['pitch'],
158
+ driving_first_frame_kp_info['yaw'],
159
+ driving_first_frame_kp_info['roll']
160
+ )
161
+
162
+ # get feature volume by F
163
+ source_feature_3d = self.extract_feature_3d(source_prepared)
164
+
165
+ return source_kp_info, source_rotation, source_feature_3d, driving_first_frame_kp_info, driving_first_frame_rotation
166
+
167
+ def transform_keypoint(self, kp_info: dict):
168
+ """
169
+ transform the implicit keypoints with the pose, shift, and expression deformation
170
+ kp: BxNx3
171
+ """
172
+ kp = kp_info['kp'] # (bs, k, 3)
173
+ pitch, yaw, roll = kp_info['pitch'], kp_info['yaw'], kp_info['roll']
174
+
175
+ t, exp = kp_info['t'], kp_info['exp']
176
+ scale = kp_info['scale']
177
+
178
+ pitch = headpose_pred_to_degree(pitch)
179
+ yaw = headpose_pred_to_degree(yaw)
180
+ roll = headpose_pred_to_degree(roll)
181
+
182
+ bs = kp.shape[0]
183
+ if kp.ndim == 2:
184
+ num_kp = kp.shape[1] // 3 # Bx(num_kpx3)
185
+ else:
186
+ num_kp = kp.shape[1] # Bxnum_kpx3
187
+
188
+ rot_mat = get_rotation_matrix(pitch, yaw, roll) # (bs, 3, 3)
189
+
190
+ # Eqn.2: s * (R * x_c,s + exp) + t
191
+ kp_transformed = kp.view(bs, num_kp, 3) @ rot_mat + exp.view(bs, num_kp, 3)
192
+ kp_transformed *= scale[..., None] # (bs, k, 3) * (bs, 1, 1) = (bs, k, 3)
193
+ kp_transformed[:, :, 0:2] += t[:, None, 0:2] # remove z, only apply tx ty
194
+ # kp_transformed[:, :, :] += t[:, None, :]
195
+
196
+ return kp_transformed
197
+
198
+ def retarget_eye(self, kp_source: torch.Tensor, eye_close_ratio: torch.Tensor) -> torch.Tensor:
199
+ """
200
+ kp_source: BxNx3
201
+ eye_close_ratio: Bx3
202
+ Return: Bx(3*num_kp+2)
203
+ """
204
+ feat_eye = concat_feat(kp_source, eye_close_ratio)
205
+
206
+ with torch.no_grad():
207
+ delta = self.stitching_retargeting_module['eye'](feat_eye)
208
+
209
+ return delta
210
+
211
+ def retarget_lip(self, kp_source: torch.Tensor, lip_close_ratio: torch.Tensor) -> torch.Tensor:
212
+ """
213
+ kp_source: BxNx3
214
+ lip_close_ratio: Bx2
215
+ """
216
+ feat_lip = concat_feat(kp_source, lip_close_ratio)
217
+
218
+ with torch.no_grad():
219
+ delta = self.stitching_retargeting_module['lip'](feat_lip)
220
+
221
+ return delta
222
+
223
+ def stitch(self, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
224
+ """
225
+ kp_source: BxNx3
226
+ kp_driving: BxNx3
227
+ Return: Bx(3*num_kp+2)
228
+ """
229
+ feat_stiching = concat_feat(kp_source, kp_driving)
230
+
231
+ with torch.no_grad():
232
+ delta = self.stitching_retargeting_module['stitching'](feat_stiching)
233
+
234
+ return delta
235
+
236
+ def stitching(self, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
237
+ """ conduct the stitching
238
+ kp_source: Bxnum_kpx3
239
+ kp_driving: Bxnum_kpx3
240
+ """
241
+
242
+ if self.stitching_retargeting_module is not None:
243
+
244
+ bs, num_kp = kp_source.shape[:2]
245
+
246
+ kp_driving_new = kp_driving.clone()
247
+ delta = self.stitch(kp_source, kp_driving_new)
248
+
249
+ delta_exp = delta[..., :3*num_kp].reshape(bs, num_kp, 3) # 1x20x3
250
+ delta_tx_ty = delta[..., 3*num_kp:3*num_kp+2].reshape(bs, 1, 2) # 1x1x2
251
+
252
+ kp_driving_new += delta_exp
253
+ kp_driving_new[..., :2] += delta_tx_ty
254
+
255
+ return kp_driving_new
256
+
257
+ return kp_driving
258
+
259
+ def warp_decode(self, feature_3d: torch.Tensor, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
260
+ """ get the image after the warping of the implicit keypoints
261
+ feature_3d: Bx32x16x64x64, feature volume
262
+ kp_source: BxNx3
263
+ kp_driving: BxNx3
264
+ """
265
+ # The line 18 in Algorithm 1: D(W(f_s; x_s, x′_d,i))
266
+ with torch.no_grad():
267
+ with torch.autocast(device_type=self.device[:4], dtype=torch.float16, enabled=self.inference_cfg.flag_use_half_precision):
268
+ if self.compile:
269
+ # Mark the beginning of a new CUDA Graph step
270
+ torch.compiler.cudagraph_mark_step_begin()
271
+ # get decoder input
272
+ ret_dct = self.warping_module(feature_3d, kp_source=kp_source, kp_driving=kp_driving)
273
+ # decode
274
+ ret_dct['out'] = self.spade_generator(feature=ret_dct['out'])
275
+
276
+ # float the dict
277
+ if self.inference_cfg.flag_use_half_precision:
278
+ for k, v in ret_dct.items():
279
+ if isinstance(v, torch.Tensor):
280
+ ret_dct[k] = v.float()
281
+
282
+ return ret_dct
283
+
284
+ def parse_output(self, out: torch.Tensor) -> np.ndarray:
285
+ """ construct the output as standard
286
+ return: 1xHxWx3, uint8
287
+ """
288
+ out = np.transpose(out.data.cpu().numpy(), [0, 2, 3, 1]) # 1x3xHxW -> 1xHxWx3
289
+ out = np.clip(out, 0, 1) # clip to 0~1
290
+ out = np.clip(out * 255, 0, 255).astype(np.uint8) # 0~1 -> 0~255
291
+
292
+ return out
293
+
294
+ def calc_driving_ratio(self, driving_lmk_lst):
295
+ input_eye_ratio_lst = []
296
+ input_lip_ratio_lst = []
297
+ for lmk in driving_lmk_lst:
298
+ # for eyes retargeting
299
+ input_eye_ratio_lst.append(calc_eye_close_ratio(lmk[None]))
300
+ # for lip retargeting
301
+ input_lip_ratio_lst.append(calc_lip_close_ratio(lmk[None]))
302
+ return input_eye_ratio_lst, input_lip_ratio_lst
303
+
304
+ def calc_combined_eye_ratio(self, c_d_eyes_i, source_lmk):
305
+ c_s_eyes = calc_eye_close_ratio(source_lmk[None])
306
+ c_s_eyes_tensor = torch.from_numpy(c_s_eyes).float().to(self.device)
307
+ c_d_eyes_i_tensor = torch.Tensor([c_d_eyes_i[0][0]]).reshape(1, 1).to(self.device)
308
+ # [c_s,eyes, c_d,eyes,i]
309
+ combined_eye_ratio_tensor = torch.cat([c_s_eyes_tensor, c_d_eyes_i_tensor], dim=1)
310
+ return combined_eye_ratio_tensor
311
+
312
+ def calc_combined_lip_ratio(self, c_d_lip_i, source_lmk):
313
+ c_s_lip = calc_lip_close_ratio(source_lmk[None])
314
+ c_s_lip_tensor = torch.from_numpy(c_s_lip).float().to(self.device)
315
+ c_d_lip_i_tensor = torch.Tensor([c_d_lip_i[0]]).to(self.device).reshape(1, 1) # 1x1
316
+ # [c_s,lip, c_d,lip,i]
317
+ combined_lip_ratio_tensor = torch.cat([c_s_lip_tensor, c_d_lip_i_tensor], dim=1) # 1x2
318
+ return combined_lip_ratio_tensor
src/modules/__init__.py ADDED
File without changes
src/modules/appearance_feature_extractor.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ Appearance extractor(F) defined in paper, which maps the source image s to a 3D appearance feature volume.
5
+ """
6
+
7
+ import torch
8
+ from torch import nn
9
+ from .util import SameBlock2d, DownBlock2d, ResBlock3d
10
+
11
+
12
+ class AppearanceFeatureExtractor(nn.Module):
13
+
14
+ def __init__(self, image_channel, block_expansion, num_down_blocks, max_features, reshape_channel, reshape_depth, num_resblocks):
15
+ super(AppearanceFeatureExtractor, self).__init__()
16
+ self.image_channel = image_channel
17
+ self.block_expansion = block_expansion
18
+ self.num_down_blocks = num_down_blocks
19
+ self.max_features = max_features
20
+ self.reshape_channel = reshape_channel
21
+ self.reshape_depth = reshape_depth
22
+
23
+ self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(3, 3), padding=(1, 1))
24
+
25
+ down_blocks = []
26
+ for i in range(num_down_blocks):
27
+ in_features = min(max_features, block_expansion * (2 ** i))
28
+ out_features = min(max_features, block_expansion * (2 ** (i + 1)))
29
+ down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
30
+ self.down_blocks = nn.ModuleList(down_blocks)
31
+
32
+ self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)
33
+
34
+ self.resblocks_3d = torch.nn.Sequential()
35
+ for i in range(num_resblocks):
36
+ self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))
37
+
38
+ def forward(self, source_image):
39
+ out = self.first(source_image) # Bx3x256x256 -> Bx64x256x256
40
+
41
+ for i in range(len(self.down_blocks)):
42
+ out = self.down_blocks[i](out)
43
+ out = self.second(out)
44
+ bs, c, h, w = out.shape # ->Bx512x64x64
45
+
46
+ f_s = out.view(bs, self.reshape_channel, self.reshape_depth, h, w) # ->Bx32x16x64x64
47
+ f_s = self.resblocks_3d(f_s) # ->Bx32x16x64x64
48
+ return f_s
src/modules/convnextv2.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ This moudle is adapted to the ConvNeXtV2 version for the extraction of implicit keypoints, poses, and expression deformation.
5
+ """
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ # from timm.models.layers import trunc_normal_, DropPath
10
+ from .util import LayerNorm, DropPath, trunc_normal_, GRN
11
+
12
+ __all__ = ['convnextv2_tiny']
13
+
14
+
15
+ class Block(nn.Module):
16
+ """ ConvNeXtV2 Block.
17
+
18
+ Args:
19
+ dim (int): Number of input channels.
20
+ drop_path (float): Stochastic depth rate. Default: 0.0
21
+ """
22
+
23
+ def __init__(self, dim, drop_path=0.):
24
+ super().__init__()
25
+ self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
26
+ self.norm = LayerNorm(dim, eps=1e-6)
27
+ self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
28
+ self.act = nn.GELU()
29
+ self.grn = GRN(4 * dim)
30
+ self.pwconv2 = nn.Linear(4 * dim, dim)
31
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
32
+
33
+ def forward(self, x):
34
+ input = x
35
+ x = self.dwconv(x)
36
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
37
+ x = self.norm(x)
38
+ x = self.pwconv1(x)
39
+ x = self.act(x)
40
+ x = self.grn(x)
41
+ x = self.pwconv2(x)
42
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
43
+
44
+ x = input + self.drop_path(x)
45
+ return x
46
+
47
+
48
+ class ConvNeXtV2(nn.Module):
49
+ """ ConvNeXt V2
50
+
51
+ Args:
52
+ in_chans (int): Number of input image channels. Default: 3
53
+ num_classes (int): Number of classes for classification head. Default: 1000
54
+ depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
55
+ dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
56
+ drop_path_rate (float): Stochastic depth rate. Default: 0.
57
+ head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
58
+ """
59
+
60
+ def __init__(
61
+ self,
62
+ in_chans=3,
63
+ depths=[3, 3, 9, 3],
64
+ dims=[96, 192, 384, 768],
65
+ drop_path_rate=0.,
66
+ **kwargs
67
+ ):
68
+ super().__init__()
69
+ self.depths = depths
70
+ self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
71
+ stem = nn.Sequential(
72
+ nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
73
+ LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
74
+ )
75
+ self.downsample_layers.append(stem)
76
+ for i in range(3):
77
+ downsample_layer = nn.Sequential(
78
+ LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
79
+ nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
80
+ )
81
+ self.downsample_layers.append(downsample_layer)
82
+
83
+ self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
84
+ dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
85
+ cur = 0
86
+ for i in range(4):
87
+ stage = nn.Sequential(
88
+ *[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
89
+ )
90
+ self.stages.append(stage)
91
+ cur += depths[i]
92
+
93
+ self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
94
+
95
+ # NOTE: the output semantic items
96
+ num_bins = kwargs.get('num_bins', 66)
97
+ num_kp = kwargs.get('num_kp', 24) # the number of implicit keypoints
98
+ self.fc_kp = nn.Linear(dims[-1], 3 * num_kp) # implicit keypoints
99
+
100
+ # print('dims[-1]: ', dims[-1])
101
+ self.fc_scale = nn.Linear(dims[-1], 1) # scale
102
+ self.fc_pitch = nn.Linear(dims[-1], num_bins) # pitch bins
103
+ self.fc_yaw = nn.Linear(dims[-1], num_bins) # yaw bins
104
+ self.fc_roll = nn.Linear(dims[-1], num_bins) # roll bins
105
+ self.fc_t = nn.Linear(dims[-1], 3) # translation
106
+ self.fc_exp = nn.Linear(dims[-1], 3 * num_kp) # expression / delta
107
+
108
+ def _init_weights(self, m):
109
+ if isinstance(m, (nn.Conv2d, nn.Linear)):
110
+ trunc_normal_(m.weight, std=.02)
111
+ nn.init.constant_(m.bias, 0)
112
+
113
+ def forward_features(self, x):
114
+ for i in range(4):
115
+ x = self.downsample_layers[i](x)
116
+ x = self.stages[i](x)
117
+ return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)
118
+
119
+ def forward(self, x):
120
+ x = self.forward_features(x)
121
+
122
+ # implicit keypoints
123
+ kp = self.fc_kp(x)
124
+
125
+ # pose and expression deformation
126
+ pitch = self.fc_pitch(x)
127
+ yaw = self.fc_yaw(x)
128
+ roll = self.fc_roll(x)
129
+ t = self.fc_t(x)
130
+ exp = self.fc_exp(x)
131
+ scale = self.fc_scale(x)
132
+
133
+ ret_dct = {
134
+ 'pitch': pitch,
135
+ 'yaw': yaw,
136
+ 'roll': roll,
137
+ 't': t,
138
+ 'exp': exp,
139
+ 'scale': scale,
140
+
141
+ 'kp': kp, # canonical keypoint
142
+ }
143
+
144
+ return ret_dct
145
+
146
+
147
+ def convnextv2_tiny(**kwargs):
148
+ model = ConvNeXtV2(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
149
+ return model
src/modules/dense_motion.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ The module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving
5
+ """
6
+
7
+ from torch import nn
8
+ import torch.nn.functional as F
9
+ import torch
10
+ from .util import Hourglass, make_coordinate_grid, kp2gaussian
11
+
12
+
13
+ class DenseMotionNetwork(nn.Module):
14
+ def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress, estimate_occlusion_map=True):
15
+ super(DenseMotionNetwork, self).__init__()
16
+ self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks) # ~60+G
17
+
18
+ self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3) # 65G! NOTE: computation cost is large
19
+ self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1) # 0.8G
20
+ self.norm = nn.BatchNorm3d(compress, affine=True)
21
+ self.num_kp = num_kp
22
+ self.flag_estimate_occlusion_map = estimate_occlusion_map
23
+
24
+ if self.flag_estimate_occlusion_map:
25
+ self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3)
26
+ else:
27
+ self.occlusion = None
28
+
29
+ def create_sparse_motions(self, feature, kp_driving, kp_source):
30
+ bs, _, d, h, w = feature.shape # (bs, 4, 16, 64, 64)
31
+ identity_grid = make_coordinate_grid((d, h, w), ref=kp_source) # (16, 64, 64, 3)
32
+ identity_grid = identity_grid.view(1, 1, d, h, w, 3) # (1, 1, d=16, h=64, w=64, 3)
33
+ coordinate_grid = identity_grid - kp_driving.view(bs, self.num_kp, 1, 1, 1, 3)
34
+
35
+ k = coordinate_grid.shape[1]
36
+
37
+ # NOTE: there lacks an one-order flow
38
+ driving_to_source = coordinate_grid + kp_source.view(bs, self.num_kp, 1, 1, 1, 3) # (bs, num_kp, d, h, w, 3)
39
+
40
+ # adding background feature
41
+ identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1)
42
+ sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1) # (bs, 1+num_kp, d, h, w, 3)
43
+ return sparse_motions
44
+
45
+ def create_deformed_feature(self, feature, sparse_motions):
46
+ bs, _, d, h, w = feature.shape
47
+ feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1) # (bs, num_kp+1, 1, c, d, h, w)
48
+ feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w) # (bs*(num_kp+1), c, d, h, w)
49
+ sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1)) # (bs*(num_kp+1), d, h, w, 3)
50
+ sparse_deformed = F.grid_sample(feature_repeat, sparse_motions, align_corners=False)
51
+ sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w)) # (bs, num_kp+1, c, d, h, w)
52
+
53
+ return sparse_deformed
54
+
55
+ def create_heatmap_representations(self, feature, kp_driving, kp_source):
56
+ spatial_size = feature.shape[3:] # (d=16, h=64, w=64)
57
+ gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01) # (bs, num_kp, d, h, w)
58
+ gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01) # (bs, num_kp, d, h, w)
59
+ heatmap = gaussian_driving - gaussian_source # (bs, num_kp, d, h, w)
60
+
61
+ # adding background feature
62
+ zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.type()).to(heatmap.device)
63
+ heatmap = torch.cat([zeros, heatmap], dim=1)
64
+ heatmap = heatmap.unsqueeze(2) # (bs, 1+num_kp, 1, d, h, w)
65
+ return heatmap
66
+
67
+ def forward(self, feature, kp_driving, kp_source):
68
+ bs, _, d, h, w = feature.shape # (bs, 32, 16, 64, 64)
69
+
70
+ feature = self.compress(feature) # (bs, 4, 16, 64, 64)
71
+ feature = self.norm(feature) # (bs, 4, 16, 64, 64)
72
+ feature = F.relu(feature) # (bs, 4, 16, 64, 64)
73
+
74
+ out_dict = dict()
75
+
76
+ # 1. deform 3d feature
77
+ sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source) # (bs, 1+num_kp, d, h, w, 3)
78
+ deformed_feature = self.create_deformed_feature(feature, sparse_motion) # (bs, 1+num_kp, c=4, d=16, h=64, w=64)
79
+
80
+ # 2. (bs, 1+num_kp, d, h, w)
81
+ heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source) # (bs, 1+num_kp, 1, d, h, w)
82
+
83
+ input = torch.cat([heatmap, deformed_feature], dim=2) # (bs, 1+num_kp, c=5, d=16, h=64, w=64)
84
+ input = input.view(bs, -1, d, h, w) # (bs, (1+num_kp)*c=105, d=16, h=64, w=64)
85
+
86
+ prediction = self.hourglass(input)
87
+
88
+ mask = self.mask(prediction)
89
+ mask = F.softmax(mask, dim=1) # (bs, 1+num_kp, d=16, h=64, w=64)
90
+ out_dict['mask'] = mask
91
+ mask = mask.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)
92
+ sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4) # (bs, num_kp+1, 3, d, h, w)
93
+ deformation = (sparse_motion * mask).sum(dim=1) # (bs, 3, d, h, w) mask take effect in this place
94
+ deformation = deformation.permute(0, 2, 3, 4, 1) # (bs, d, h, w, 3)
95
+
96
+ out_dict['deformation'] = deformation
97
+
98
+ if self.flag_estimate_occlusion_map:
99
+ bs, _, d, h, w = prediction.shape
100
+ prediction_reshape = prediction.view(bs, -1, h, w)
101
+ occlusion_map = torch.sigmoid(self.occlusion(prediction_reshape)) # Bx1x64x64
102
+ out_dict['occlusion_map'] = occlusion_map
103
+
104
+ return out_dict
src/modules/motion_extractor.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ Motion extractor(M), which directly predicts the canonical keypoints, head pose and expression deformation of the input image
5
+ """
6
+
7
+ from torch import nn
8
+ import torch
9
+
10
+ from .convnextv2 import convnextv2_tiny
11
+ from .util import filter_state_dict
12
+
13
+ model_dict = {
14
+ 'convnextv2_tiny': convnextv2_tiny,
15
+ }
16
+
17
+
18
+ class MotionExtractor(nn.Module):
19
+ def __init__(self, **kwargs):
20
+ super(MotionExtractor, self).__init__()
21
+
22
+ # default is convnextv2_base
23
+ backbone = kwargs.get('backbone', 'convnextv2_tiny')
24
+ self.detector = model_dict.get(backbone)(**kwargs)
25
+
26
+ def load_pretrained(self, init_path: str):
27
+ if init_path not in (None, ''):
28
+ state_dict = torch.load(init_path, map_location=lambda storage, loc: storage)['model']
29
+ state_dict = filter_state_dict(state_dict, remove_name='head')
30
+ ret = self.detector.load_state_dict(state_dict, strict=False)
31
+ print(f'Load pretrained model from {init_path}, ret: {ret}')
32
+
33
+ def forward(self, x):
34
+ out = self.detector(x)
35
+ return out
src/modules/spade_generator.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ Spade decoder(G) defined in the paper, which input the warped feature to generate the animated image.
5
+ """
6
+
7
+ import torch
8
+ from torch import nn
9
+ import torch.nn.functional as F
10
+ from .util import SPADEResnetBlock
11
+
12
+
13
+ class SPADEDecoder(nn.Module):
14
+ def __init__(self, upscale=1, max_features=256, block_expansion=64, out_channels=64, num_down_blocks=2):
15
+ for i in range(num_down_blocks):
16
+ input_channels = min(max_features, block_expansion * (2 ** (i + 1)))
17
+ self.upscale = upscale
18
+ super().__init__()
19
+ norm_G = 'spadespectralinstance'
20
+ label_num_channels = input_channels # 256
21
+
22
+ self.fc = nn.Conv2d(input_channels, 2 * input_channels, 3, padding=1)
23
+ self.G_middle_0 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
24
+ self.G_middle_1 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
25
+ self.G_middle_2 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
26
+ self.G_middle_3 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
27
+ self.G_middle_4 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
28
+ self.G_middle_5 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
29
+ self.up_0 = SPADEResnetBlock(2 * input_channels, input_channels, norm_G, label_num_channels)
30
+ self.up_1 = SPADEResnetBlock(input_channels, out_channels, norm_G, label_num_channels)
31
+ self.up = nn.Upsample(scale_factor=2)
32
+
33
+ if self.upscale is None or self.upscale <= 1:
34
+ self.conv_img = nn.Conv2d(out_channels, 3, 3, padding=1)
35
+ else:
36
+ self.conv_img = nn.Sequential(
37
+ nn.Conv2d(out_channels, 3 * (2 * 2), kernel_size=3, padding=1),
38
+ nn.PixelShuffle(upscale_factor=2)
39
+ )
40
+
41
+ def forward(self, feature):
42
+ seg = feature # Bx256x64x64
43
+ x = self.fc(feature) # Bx512x64x64
44
+ x = self.G_middle_0(x, seg)
45
+ x = self.G_middle_1(x, seg)
46
+ x = self.G_middle_2(x, seg)
47
+ x = self.G_middle_3(x, seg)
48
+ x = self.G_middle_4(x, seg)
49
+ x = self.G_middle_5(x, seg)
50
+
51
+ x = self.up(x) # Bx512x64x64 -> Bx512x128x128
52
+ x = self.up_0(x, seg) # Bx512x128x128 -> Bx256x128x128
53
+ x = self.up(x) # Bx256x128x128 -> Bx256x256x256
54
+ x = self.up_1(x, seg) # Bx256x256x256 -> Bx64x256x256
55
+
56
+ x = self.conv_img(F.leaky_relu(x, 2e-1)) # Bx64x256x256 -> Bx3xHxW
57
+ x = torch.sigmoid(x) # Bx3xHxW
58
+
59
+ return x
src/modules/stitching_retargeting_network.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ Stitching module(S) and two retargeting modules(R) defined in the paper.
5
+
6
+ - The stitching module pastes the animated portrait back into the original image space without pixel misalignment, such as in
7
+ the stitching region.
8
+
9
+ - The eyes retargeting module is designed to address the issue of incomplete eye closure during cross-id reenactment, especially
10
+ when a person with small eyes drives a person with larger eyes.
11
+
12
+ - The lip retargeting module is designed similarly to the eye retargeting module, and can also normalize the input by ensuring that
13
+ the lips are in a closed state, which facilitates better animation driving.
14
+ """
15
+ from torch import nn
16
+
17
+
18
+ class StitchingRetargetingNetwork(nn.Module):
19
+ def __init__(self, input_size, hidden_sizes, output_size):
20
+ super(StitchingRetargetingNetwork, self).__init__()
21
+ layers = []
22
+ for i in range(len(hidden_sizes)):
23
+ if i == 0:
24
+ layers.append(nn.Linear(input_size, hidden_sizes[i]))
25
+ else:
26
+ layers.append(nn.Linear(hidden_sizes[i - 1], hidden_sizes[i]))
27
+ layers.append(nn.ReLU(inplace=True))
28
+ layers.append(nn.Linear(hidden_sizes[-1], output_size))
29
+ self.mlp = nn.Sequential(*layers)
30
+
31
+ def initialize_weights_to_zero(self):
32
+ for m in self.modules():
33
+ if isinstance(m, nn.Linear):
34
+ nn.init.zeros_(m.weight)
35
+ nn.init.zeros_(m.bias)
36
+
37
+ def forward(self, x):
38
+ return self.mlp(x)
src/modules/util.py ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ This file defines various neural network modules and utility functions, including convolutional and residual blocks,
5
+ normalizations, and functions for spatial transformation and tensor manipulation.
6
+ """
7
+
8
+ from torch import nn
9
+ import torch.nn.functional as F
10
+ import torch
11
+ import torch.nn.utils.spectral_norm as spectral_norm
12
+ import math
13
+ import warnings
14
+
15
+
16
+ def kp2gaussian(kp, spatial_size, kp_variance):
17
+ """
18
+ Transform a keypoint into gaussian like representation
19
+ """
20
+ mean = kp
21
+
22
+ coordinate_grid = make_coordinate_grid(spatial_size, mean)
23
+ number_of_leading_dimensions = len(mean.shape) - 1
24
+ shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape
25
+ coordinate_grid = coordinate_grid.view(*shape)
26
+ repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 1)
27
+ coordinate_grid = coordinate_grid.repeat(*repeats)
28
+
29
+ # Preprocess kp shape
30
+ shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 3)
31
+ mean = mean.view(*shape)
32
+
33
+ mean_sub = (coordinate_grid - mean)
34
+
35
+ out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
36
+
37
+ return out
38
+
39
+
40
+ def make_coordinate_grid(spatial_size, ref, **kwargs):
41
+ d, h, w = spatial_size
42
+ x = torch.arange(w).type(ref.dtype).to(ref.device)
43
+ y = torch.arange(h).type(ref.dtype).to(ref.device)
44
+ z = torch.arange(d).type(ref.dtype).to(ref.device)
45
+
46
+ # NOTE: must be right-down-in
47
+ x = (2 * (x / (w - 1)) - 1) # the x axis faces to the right
48
+ y = (2 * (y / (h - 1)) - 1) # the y axis faces to the bottom
49
+ z = (2 * (z / (d - 1)) - 1) # the z axis faces to the inner
50
+
51
+ yy = y.view(1, -1, 1).repeat(d, 1, w)
52
+ xx = x.view(1, 1, -1).repeat(d, h, 1)
53
+ zz = z.view(-1, 1, 1).repeat(1, h, w)
54
+
55
+ meshed = torch.cat([xx.unsqueeze_(3), yy.unsqueeze_(3), zz.unsqueeze_(3)], 3)
56
+
57
+ return meshed
58
+
59
+
60
+ class ConvT2d(nn.Module):
61
+ """
62
+ Upsampling block for use in decoder.
63
+ """
64
+
65
+ def __init__(self, in_features, out_features, kernel_size=3, stride=2, padding=1, output_padding=1):
66
+ super(ConvT2d, self).__init__()
67
+
68
+ self.convT = nn.ConvTranspose2d(in_features, out_features, kernel_size=kernel_size, stride=stride,
69
+ padding=padding, output_padding=output_padding)
70
+ self.norm = nn.InstanceNorm2d(out_features)
71
+
72
+ def forward(self, x):
73
+ out = self.convT(x)
74
+ out = self.norm(out)
75
+ out = F.leaky_relu(out)
76
+ return out
77
+
78
+
79
+ class ResBlock3d(nn.Module):
80
+ """
81
+ Res block, preserve spatial resolution.
82
+ """
83
+
84
+ def __init__(self, in_features, kernel_size, padding):
85
+ super(ResBlock3d, self).__init__()
86
+ self.conv1 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=padding)
87
+ self.conv2 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=padding)
88
+ self.norm1 = nn.BatchNorm3d(in_features, affine=True)
89
+ self.norm2 = nn.BatchNorm3d(in_features, affine=True)
90
+
91
+ def forward(self, x):
92
+ out = self.norm1(x)
93
+ out = F.relu(out)
94
+ out = self.conv1(out)
95
+ out = self.norm2(out)
96
+ out = F.relu(out)
97
+ out = self.conv2(out)
98
+ out += x
99
+ return out
100
+
101
+
102
+ class UpBlock3d(nn.Module):
103
+ """
104
+ Upsampling block for use in decoder.
105
+ """
106
+
107
+ def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
108
+ super(UpBlock3d, self).__init__()
109
+
110
+ self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
111
+ padding=padding, groups=groups)
112
+ self.norm = nn.BatchNorm3d(out_features, affine=True)
113
+
114
+ def forward(self, x):
115
+ out = F.interpolate(x, scale_factor=(1, 2, 2))
116
+ out = self.conv(out)
117
+ out = self.norm(out)
118
+ out = F.relu(out)
119
+ return out
120
+
121
+
122
+ class DownBlock2d(nn.Module):
123
+ """
124
+ Downsampling block for use in encoder.
125
+ """
126
+
127
+ def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
128
+ super(DownBlock2d, self).__init__()
129
+ self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, padding=padding, groups=groups)
130
+ self.norm = nn.BatchNorm2d(out_features, affine=True)
131
+ self.pool = nn.AvgPool2d(kernel_size=(2, 2))
132
+
133
+ def forward(self, x):
134
+ out = self.conv(x)
135
+ out = self.norm(out)
136
+ out = F.relu(out)
137
+ out = self.pool(out)
138
+ return out
139
+
140
+
141
+ class DownBlock3d(nn.Module):
142
+ """
143
+ Downsampling block for use in encoder.
144
+ """
145
+
146
+ def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
147
+ super(DownBlock3d, self).__init__()
148
+ '''
149
+ self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
150
+ padding=padding, groups=groups, stride=(1, 2, 2))
151
+ '''
152
+ self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
153
+ padding=padding, groups=groups)
154
+ self.norm = nn.BatchNorm3d(out_features, affine=True)
155
+ self.pool = nn.AvgPool3d(kernel_size=(1, 2, 2))
156
+
157
+ def forward(self, x):
158
+ out = self.conv(x)
159
+ out = self.norm(out)
160
+ out = F.relu(out)
161
+ out = self.pool(out)
162
+ return out
163
+
164
+
165
+ class SameBlock2d(nn.Module):
166
+ """
167
+ Simple block, preserve spatial resolution.
168
+ """
169
+
170
+ def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1, lrelu=False):
171
+ super(SameBlock2d, self).__init__()
172
+ self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, padding=padding, groups=groups)
173
+ self.norm = nn.BatchNorm2d(out_features, affine=True)
174
+ if lrelu:
175
+ self.ac = nn.LeakyReLU()
176
+ else:
177
+ self.ac = nn.ReLU()
178
+
179
+ def forward(self, x):
180
+ out = self.conv(x)
181
+ out = self.norm(out)
182
+ out = self.ac(out)
183
+ return out
184
+
185
+
186
+ class Encoder(nn.Module):
187
+ """
188
+ Hourglass Encoder
189
+ """
190
+
191
+ def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
192
+ super(Encoder, self).__init__()
193
+
194
+ down_blocks = []
195
+ for i in range(num_blocks):
196
+ down_blocks.append(DownBlock3d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)), min(max_features, block_expansion * (2 ** (i + 1))), kernel_size=3, padding=1))
197
+ self.down_blocks = nn.ModuleList(down_blocks)
198
+
199
+ def forward(self, x):
200
+ outs = [x]
201
+ for down_block in self.down_blocks:
202
+ outs.append(down_block(outs[-1]))
203
+ return outs
204
+
205
+
206
+ class Decoder(nn.Module):
207
+ """
208
+ Hourglass Decoder
209
+ """
210
+
211
+ def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
212
+ super(Decoder, self).__init__()
213
+
214
+ up_blocks = []
215
+
216
+ for i in range(num_blocks)[::-1]:
217
+ in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
218
+ out_filters = min(max_features, block_expansion * (2 ** i))
219
+ up_blocks.append(UpBlock3d(in_filters, out_filters, kernel_size=3, padding=1))
220
+
221
+ self.up_blocks = nn.ModuleList(up_blocks)
222
+ self.out_filters = block_expansion + in_features
223
+
224
+ self.conv = nn.Conv3d(in_channels=self.out_filters, out_channels=self.out_filters, kernel_size=3, padding=1)
225
+ self.norm = nn.BatchNorm3d(self.out_filters, affine=True)
226
+
227
+ def forward(self, x):
228
+ out = x.pop()
229
+ for up_block in self.up_blocks:
230
+ out = up_block(out)
231
+ skip = x.pop()
232
+ out = torch.cat([out, skip], dim=1)
233
+ out = self.conv(out)
234
+ out = self.norm(out)
235
+ out = F.relu(out)
236
+ return out
237
+
238
+
239
+ class Hourglass(nn.Module):
240
+ """
241
+ Hourglass architecture.
242
+ """
243
+
244
+ def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
245
+ super(Hourglass, self).__init__()
246
+ self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
247
+ self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
248
+ self.out_filters = self.decoder.out_filters
249
+
250
+ def forward(self, x):
251
+ return self.decoder(self.encoder(x))
252
+
253
+
254
+ class SPADE(nn.Module):
255
+ def __init__(self, norm_nc, label_nc):
256
+ super().__init__()
257
+
258
+ self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
259
+ nhidden = 128
260
+
261
+ self.mlp_shared = nn.Sequential(
262
+ nn.Conv2d(label_nc, nhidden, kernel_size=3, padding=1),
263
+ nn.ReLU())
264
+ self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
265
+ self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
266
+
267
+ def forward(self, x, segmap):
268
+ normalized = self.param_free_norm(x)
269
+ segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
270
+ actv = self.mlp_shared(segmap)
271
+ gamma = self.mlp_gamma(actv)
272
+ beta = self.mlp_beta(actv)
273
+ out = normalized * (1 + gamma) + beta
274
+ return out
275
+
276
+
277
+ class SPADEResnetBlock(nn.Module):
278
+ def __init__(self, fin, fout, norm_G, label_nc, use_se=False, dilation=1):
279
+ super().__init__()
280
+ # Attributes
281
+ self.learned_shortcut = (fin != fout)
282
+ fmiddle = min(fin, fout)
283
+ self.use_se = use_se
284
+ # create conv layers
285
+ self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation)
286
+ self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation)
287
+ if self.learned_shortcut:
288
+ self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
289
+ # apply spectral norm if specified
290
+ if 'spectral' in norm_G:
291
+ self.conv_0 = spectral_norm(self.conv_0)
292
+ self.conv_1 = spectral_norm(self.conv_1)
293
+ if self.learned_shortcut:
294
+ self.conv_s = spectral_norm(self.conv_s)
295
+ # define normalization layers
296
+ self.norm_0 = SPADE(fin, label_nc)
297
+ self.norm_1 = SPADE(fmiddle, label_nc)
298
+ if self.learned_shortcut:
299
+ self.norm_s = SPADE(fin, label_nc)
300
+
301
+ def forward(self, x, seg1):
302
+ x_s = self.shortcut(x, seg1)
303
+ dx = self.conv_0(self.actvn(self.norm_0(x, seg1)))
304
+ dx = self.conv_1(self.actvn(self.norm_1(dx, seg1)))
305
+ out = x_s + dx
306
+ return out
307
+
308
+ def shortcut(self, x, seg1):
309
+ if self.learned_shortcut:
310
+ x_s = self.conv_s(self.norm_s(x, seg1))
311
+ else:
312
+ x_s = x
313
+ return x_s
314
+
315
+ def actvn(self, x):
316
+ return F.leaky_relu(x, 2e-1)
317
+
318
+
319
+ def filter_state_dict(state_dict, remove_name='fc'):
320
+ new_state_dict = {}
321
+ for key in state_dict:
322
+ if remove_name in key:
323
+ continue
324
+ new_state_dict[key] = state_dict[key]
325
+ return new_state_dict
326
+
327
+
328
+ class GRN(nn.Module):
329
+ """ GRN (Global Response Normalization) layer
330
+ """
331
+
332
+ def __init__(self, dim):
333
+ super().__init__()
334
+ self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
335
+ self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))
336
+
337
+ def forward(self, x):
338
+ Gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True)
339
+ Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
340
+ return self.gamma * (x * Nx) + self.beta + x
341
+
342
+
343
+ class LayerNorm(nn.Module):
344
+ r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
345
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
346
+ shape (batch_size, height, width, channels) while channels_first corresponds to inputs
347
+ with shape (batch_size, channels, height, width).
348
+ """
349
+
350
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
351
+ super().__init__()
352
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
353
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
354
+ self.eps = eps
355
+ self.data_format = data_format
356
+ if self.data_format not in ["channels_last", "channels_first"]:
357
+ raise NotImplementedError
358
+ self.normalized_shape = (normalized_shape, )
359
+
360
+ def forward(self, x):
361
+ if self.data_format == "channels_last":
362
+ return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
363
+ elif self.data_format == "channels_first":
364
+ u = x.mean(1, keepdim=True)
365
+ s = (x - u).pow(2).mean(1, keepdim=True)
366
+ x = (x - u) / torch.sqrt(s + self.eps)
367
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
368
+ return x
369
+
370
+
371
+ def _no_grad_trunc_normal_(tensor, mean, std, a, b):
372
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
373
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
374
+ def norm_cdf(x):
375
+ # Computes standard normal cumulative distribution function
376
+ return (1. + math.erf(x / math.sqrt(2.))) / 2.
377
+
378
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
379
+ warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
380
+ "The distribution of values may be incorrect.",
381
+ stacklevel=2)
382
+
383
+ with torch.no_grad():
384
+ # Values are generated by using a truncated uniform distribution and
385
+ # then using the inverse CDF for the normal distribution.
386
+ # Get upper and lower cdf values
387
+ l = norm_cdf((a - mean) / std)
388
+ u = norm_cdf((b - mean) / std)
389
+
390
+ # Uniformly fill tensor with values from [l, u], then translate to
391
+ # [2l-1, 2u-1].
392
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
393
+
394
+ # Use inverse cdf transform for normal distribution to get truncated
395
+ # standard normal
396
+ tensor.erfinv_()
397
+
398
+ # Transform to proper mean, std
399
+ tensor.mul_(std * math.sqrt(2.))
400
+ tensor.add_(mean)
401
+
402
+ # Clamp to ensure it's in the proper range
403
+ tensor.clamp_(min=a, max=b)
404
+ return tensor
405
+
406
+
407
+ def drop_path(x, drop_prob=0., training=False, scale_by_keep=True):
408
+ """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
409
+
410
+ This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
411
+ the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
412
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
413
+ changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
414
+ 'survival rate' as the argument.
415
+
416
+ """
417
+ if drop_prob == 0. or not training:
418
+ return x
419
+ keep_prob = 1 - drop_prob
420
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
421
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
422
+ if keep_prob > 0.0 and scale_by_keep:
423
+ random_tensor.div_(keep_prob)
424
+ return x * random_tensor
425
+
426
+
427
+ class DropPath(nn.Module):
428
+ """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
429
+ """
430
+
431
+ def __init__(self, drop_prob=None, scale_by_keep=True):
432
+ super(DropPath, self).__init__()
433
+ self.drop_prob = drop_prob
434
+ self.scale_by_keep = scale_by_keep
435
+
436
+ def forward(self, x):
437
+ return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
438
+
439
+
440
+ def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
441
+ return _no_grad_trunc_normal_(tensor, mean, std, a, b)
src/modules/warping_network.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ Warping field estimator(W) defined in the paper, which generates a warping field using the implicit
5
+ keypoint representations x_s and x_d, and employs this flow field to warp the source feature volume f_s.
6
+ """
7
+
8
+ from torch import nn
9
+ import torch.nn.functional as F
10
+ from .util import SameBlock2d
11
+ from .dense_motion import DenseMotionNetwork
12
+
13
+
14
+ class WarpingNetwork(nn.Module):
15
+ def __init__(
16
+ self,
17
+ num_kp,
18
+ block_expansion,
19
+ max_features,
20
+ num_down_blocks,
21
+ reshape_channel,
22
+ estimate_occlusion_map=False,
23
+ dense_motion_params=None,
24
+ **kwargs
25
+ ):
26
+ super(WarpingNetwork, self).__init__()
27
+
28
+ self.upscale = kwargs.get('upscale', 1)
29
+ self.flag_use_occlusion_map = kwargs.get('flag_use_occlusion_map', True)
30
+
31
+ if dense_motion_params is not None:
32
+ self.dense_motion_network = DenseMotionNetwork(
33
+ num_kp=num_kp,
34
+ feature_channel=reshape_channel,
35
+ estimate_occlusion_map=estimate_occlusion_map,
36
+ **dense_motion_params
37
+ )
38
+ else:
39
+ self.dense_motion_network = None
40
+
41
+ self.third = SameBlock2d(max_features, block_expansion * (2 ** num_down_blocks), kernel_size=(3, 3), padding=(1, 1), lrelu=True)
42
+ self.fourth = nn.Conv2d(in_channels=block_expansion * (2 ** num_down_blocks), out_channels=block_expansion * (2 ** num_down_blocks), kernel_size=1, stride=1)
43
+
44
+ self.estimate_occlusion_map = estimate_occlusion_map
45
+
46
+ def deform_input(self, inp, deformation):
47
+ return F.grid_sample(inp, deformation, align_corners=False)
48
+
49
+ def forward(self, feature_3d, kp_driving, kp_source):
50
+ if self.dense_motion_network is not None:
51
+ # Feature warper, Transforming feature representation according to deformation and occlusion
52
+ dense_motion = self.dense_motion_network(
53
+ feature=feature_3d, kp_driving=kp_driving, kp_source=kp_source
54
+ )
55
+ if 'occlusion_map' in dense_motion:
56
+ occlusion_map = dense_motion['occlusion_map'] # Bx1x64x64
57
+ else:
58
+ occlusion_map = None
59
+
60
+ deformation = dense_motion['deformation'] # Bx16x64x64x3
61
+ out = self.deform_input(feature_3d, deformation) # Bx32x16x64x64
62
+
63
+ bs, c, d, h, w = out.shape # Bx32x16x64x64
64
+ out = out.view(bs, c * d, h, w) # -> Bx512x64x64
65
+ out = self.third(out) # -> Bx256x64x64
66
+ out = self.fourth(out) # -> Bx256x64x64
67
+
68
+ if self.flag_use_occlusion_map and (occlusion_map is not None):
69
+ out = out * occlusion_map
70
+
71
+ ret_dct = {
72
+ 'occlusion_map': occlusion_map,
73
+ 'deformation': deformation,
74
+ 'out': out,
75
+ }
76
+
77
+ return ret_dct
src/utils/__init__.py ADDED
File without changes
src/utils/camera.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ functions for processing and transforming 3D facial keypoints
5
+ """
6
+
7
+ import numpy as np
8
+ import torch
9
+ import torch.nn.functional as F
10
+
11
+ PI = np.pi
12
+
13
+
14
+ def headpose_pred_to_degree(pred):
15
+ """
16
+ pred: (bs, 66) or (bs, 1) or others
17
+ """
18
+ if pred.ndim > 1 and pred.shape[1] == 66:
19
+ # NOTE: note that the average is modified to 97.5
20
+ device = pred.device
21
+ idx_tensor = [idx for idx in range(0, 66)]
22
+ idx_tensor = torch.FloatTensor(idx_tensor).to(device)
23
+ pred = F.softmax(pred, dim=1)
24
+ degree = torch.sum(pred*idx_tensor, axis=1) * 3 - 97.5
25
+
26
+ return degree
27
+
28
+ return pred
29
+
30
+
31
+ def get_rotation_matrix(pitch_, yaw_, roll_):
32
+ """ the input is in degree
33
+ """
34
+ # transform to radian
35
+ pitch = pitch_ / 180 * PI
36
+ yaw = yaw_ / 180 * PI
37
+ roll = roll_ / 180 * PI
38
+
39
+ device = pitch.device
40
+
41
+ if pitch.ndim == 1:
42
+ pitch = pitch.unsqueeze(1)
43
+ if yaw.ndim == 1:
44
+ yaw = yaw.unsqueeze(1)
45
+ if roll.ndim == 1:
46
+ roll = roll.unsqueeze(1)
47
+
48
+ # calculate the euler matrix
49
+ bs = pitch.shape[0]
50
+ ones = torch.ones([bs, 1]).to(device)
51
+ zeros = torch.zeros([bs, 1]).to(device)
52
+ x, y, z = pitch, yaw, roll
53
+
54
+ rot_x = torch.cat([
55
+ ones, zeros, zeros,
56
+ zeros, torch.cos(x), -torch.sin(x),
57
+ zeros, torch.sin(x), torch.cos(x)
58
+ ], dim=1).reshape([bs, 3, 3])
59
+
60
+ rot_y = torch.cat([
61
+ torch.cos(y), zeros, torch.sin(y),
62
+ zeros, ones, zeros,
63
+ -torch.sin(y), zeros, torch.cos(y)
64
+ ], dim=1).reshape([bs, 3, 3])
65
+
66
+ rot_z = torch.cat([
67
+ torch.cos(z), -torch.sin(z), zeros,
68
+ torch.sin(z), torch.cos(z), zeros,
69
+ zeros, zeros, ones
70
+ ], dim=1).reshape([bs, 3, 3])
71
+
72
+ rot = rot_z @ rot_y @ rot_x
73
+ return rot.permute(0, 2, 1) # transpose
src/utils/crop.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ cropping function and the related preprocess functions for cropping
5
+ """
6
+
7
+ import numpy as np
8
+ import os.path as osp
9
+ from math import sin, cos, acos, degrees
10
+ import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False) # NOTE: enforce single thread
11
+ from .rprint import rprint as print
12
+
13
+ DTYPE = np.float32
14
+ CV2_INTERP = cv2.INTER_LINEAR
15
+
16
+ def make_abs_path(fn):
17
+ return osp.join(osp.dirname(osp.realpath(__file__)), fn)
18
+
19
+ def _transform_img(img, M, dsize, flags=CV2_INTERP, borderMode=None):
20
+ """ conduct similarity or affine transformation to the image, do not do border operation!
21
+ img:
22
+ M: 2x3 matrix or 3x3 matrix
23
+ dsize: target shape (width, height)
24
+ """
25
+ if isinstance(dsize, tuple) or isinstance(dsize, list):
26
+ _dsize = tuple(dsize)
27
+ else:
28
+ _dsize = (dsize, dsize)
29
+
30
+ if borderMode is not None:
31
+ return cv2.warpAffine(img, M[:2, :], dsize=_dsize, flags=flags, borderMode=borderMode, borderValue=(0, 0, 0))
32
+ else:
33
+ return cv2.warpAffine(img, M[:2, :], dsize=_dsize, flags=flags)
34
+
35
+
36
+ def _transform_pts(pts, M):
37
+ """ conduct similarity or affine transformation to the pts
38
+ pts: Nx2 ndarray
39
+ M: 2x3 matrix or 3x3 matrix
40
+ return: Nx2
41
+ """
42
+ return pts @ M[:2, :2].T + M[:2, 2]
43
+
44
+
45
+ def parse_pt2_from_pt101(pt101, use_lip=True):
46
+ """
47
+ parsing the 2 points according to the 101 points, which cancels the roll
48
+ """
49
+ # the former version use the eye center, but it is not robust, now use interpolation
50
+ pt_left_eye = np.mean(pt101[[39, 42, 45, 48]], axis=0) # left eye center
51
+ pt_right_eye = np.mean(pt101[[51, 54, 57, 60]], axis=0) # right eye center
52
+
53
+ if use_lip:
54
+ # use lip
55
+ pt_center_eye = (pt_left_eye + pt_right_eye) / 2
56
+ pt_center_lip = (pt101[75] + pt101[81]) / 2
57
+ pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
58
+ else:
59
+ pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
60
+ return pt2
61
+
62
+
63
+ def parse_pt2_from_pt106(pt106, use_lip=True):
64
+ """
65
+ parsing the 2 points according to the 106 points, which cancels the roll
66
+ """
67
+ pt_left_eye = np.mean(pt106[[33, 35, 40, 39]], axis=0) # left eye center
68
+ pt_right_eye = np.mean(pt106[[87, 89, 94, 93]], axis=0) # right eye center
69
+
70
+ if use_lip:
71
+ # use lip
72
+ pt_center_eye = (pt_left_eye + pt_right_eye) / 2
73
+ pt_center_lip = (pt106[52] + pt106[61]) / 2
74
+ pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
75
+ else:
76
+ pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
77
+ return pt2
78
+
79
+
80
+ def parse_pt2_from_pt203(pt203, use_lip=True):
81
+ """
82
+ parsing the 2 points according to the 203 points, which cancels the roll
83
+ """
84
+ pt_left_eye = np.mean(pt203[[0, 6, 12, 18]], axis=0) # left eye center
85
+ pt_right_eye = np.mean(pt203[[24, 30, 36, 42]], axis=0) # right eye center
86
+ if use_lip:
87
+ # use lip
88
+ pt_center_eye = (pt_left_eye + pt_right_eye) / 2
89
+ pt_center_lip = (pt203[48] + pt203[66]) / 2
90
+ pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
91
+ else:
92
+ pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
93
+ return pt2
94
+
95
+
96
+ def parse_pt2_from_pt68(pt68, use_lip=True):
97
+ """
98
+ parsing the 2 points according to the 68 points, which cancels the roll
99
+ """
100
+ lm_idx = np.array([31, 37, 40, 43, 46, 49, 55], dtype=np.int32) - 1
101
+ if use_lip:
102
+ pt5 = np.stack([
103
+ np.mean(pt68[lm_idx[[1, 2]], :], 0), # left eye
104
+ np.mean(pt68[lm_idx[[3, 4]], :], 0), # right eye
105
+ pt68[lm_idx[0], :], # nose
106
+ pt68[lm_idx[5], :], # lip
107
+ pt68[lm_idx[6], :] # lip
108
+ ], axis=0)
109
+
110
+ pt2 = np.stack([
111
+ (pt5[0] + pt5[1]) / 2,
112
+ (pt5[3] + pt5[4]) / 2
113
+ ], axis=0)
114
+ else:
115
+ pt2 = np.stack([
116
+ np.mean(pt68[lm_idx[[1, 2]], :], 0), # left eye
117
+ np.mean(pt68[lm_idx[[3, 4]], :], 0), # right eye
118
+ ], axis=0)
119
+
120
+ return pt2
121
+
122
+
123
+ def parse_pt2_from_pt5(pt5, use_lip=True):
124
+ """
125
+ parsing the 2 points according to the 5 points, which cancels the roll
126
+ """
127
+ if use_lip:
128
+ pt2 = np.stack([
129
+ (pt5[0] + pt5[1]) / 2,
130
+ (pt5[3] + pt5[4]) / 2
131
+ ], axis=0)
132
+ else:
133
+ pt2 = np.stack([
134
+ pt5[0],
135
+ pt5[1]
136
+ ], axis=0)
137
+ return pt2
138
+
139
+
140
+ def parse_pt2_from_pt_x(pts, use_lip=True):
141
+ if pts.shape[0] == 101:
142
+ pt2 = parse_pt2_from_pt101(pts, use_lip=use_lip)
143
+ elif pts.shape[0] == 106:
144
+ pt2 = parse_pt2_from_pt106(pts, use_lip=use_lip)
145
+ elif pts.shape[0] == 68:
146
+ pt2 = parse_pt2_from_pt68(pts, use_lip=use_lip)
147
+ elif pts.shape[0] == 5:
148
+ pt2 = parse_pt2_from_pt5(pts, use_lip=use_lip)
149
+ elif pts.shape[0] == 203:
150
+ pt2 = parse_pt2_from_pt203(pts, use_lip=use_lip)
151
+ elif pts.shape[0] > 101:
152
+ # take the first 101 points
153
+ pt2 = parse_pt2_from_pt101(pts[:101], use_lip=use_lip)
154
+ else:
155
+ raise Exception(f'Unknow shape: {pts.shape}')
156
+
157
+ if not use_lip:
158
+ # NOTE: to compile with the latter code, need to rotate the pt2 90 degrees clockwise manually
159
+ v = pt2[1] - pt2[0]
160
+ pt2[1, 0] = pt2[0, 0] - v[1]
161
+ pt2[1, 1] = pt2[0, 1] + v[0]
162
+
163
+ return pt2
164
+
165
+
166
+ def parse_rect_from_landmark(
167
+ pts,
168
+ scale=1.5,
169
+ need_square=True,
170
+ vx_ratio=0,
171
+ vy_ratio=0,
172
+ use_deg_flag=False,
173
+ **kwargs
174
+ ):
175
+ """parsing center, size, angle from 101/68/5/x landmarks
176
+ vx_ratio: the offset ratio along the pupil axis x-axis, multiplied by size
177
+ vy_ratio: the offset ratio along the pupil axis y-axis, multiplied by size, which is used to contain more forehead area
178
+
179
+ judge with pts.shape
180
+ """
181
+ pt2 = parse_pt2_from_pt_x(pts, use_lip=kwargs.get('use_lip', True))
182
+
183
+ uy = pt2[1] - pt2[0]
184
+ l = np.linalg.norm(uy)
185
+ if l <= 1e-3:
186
+ uy = np.array([0, 1], dtype=DTYPE)
187
+ else:
188
+ uy /= l
189
+ ux = np.array((uy[1], -uy[0]), dtype=DTYPE)
190
+
191
+ # the rotation degree of the x-axis, the clockwise is positive, the counterclockwise is negative (image coordinate system)
192
+ # print(uy)
193
+ # print(ux)
194
+ angle = acos(ux[0])
195
+ if ux[1] < 0:
196
+ angle = -angle
197
+
198
+ # rotation matrix
199
+ M = np.array([ux, uy])
200
+
201
+ # calculate the size which contains the angle degree of the bbox, and the center
202
+ center0 = np.mean(pts, axis=0)
203
+ rpts = (pts - center0) @ M.T # (M @ P.T).T = P @ M.T
204
+ lt_pt = np.min(rpts, axis=0)
205
+ rb_pt = np.max(rpts, axis=0)
206
+ center1 = (lt_pt + rb_pt) / 2
207
+
208
+ size = rb_pt - lt_pt
209
+ if need_square:
210
+ m = max(size[0], size[1])
211
+ size[0] = m
212
+ size[1] = m
213
+
214
+ size *= scale # scale size
215
+ center = center0 + ux * center1[0] + uy * center1[1] # counterclockwise rotation, equivalent to M.T @ center1.T
216
+ center = center + ux * (vx_ratio * size) + uy * \
217
+ (vy_ratio * size) # considering the offset in vx and vy direction
218
+
219
+ if use_deg_flag:
220
+ angle = degrees(angle)
221
+
222
+ return center, size, angle
223
+
224
+
225
+ def parse_bbox_from_landmark(pts, **kwargs):
226
+ center, size, angle = parse_rect_from_landmark(pts, **kwargs)
227
+ cx, cy = center
228
+ w, h = size
229
+
230
+ # calculate the vertex positions before rotation
231
+ bbox = np.array([
232
+ [cx-w/2, cy-h/2], # left, top
233
+ [cx+w/2, cy-h/2],
234
+ [cx+w/2, cy+h/2], # right, bottom
235
+ [cx-w/2, cy+h/2]
236
+ ], dtype=DTYPE)
237
+
238
+ # construct rotation matrix
239
+ bbox_rot = bbox.copy()
240
+ R = np.array([
241
+ [np.cos(angle), -np.sin(angle)],
242
+ [np.sin(angle), np.cos(angle)]
243
+ ], dtype=DTYPE)
244
+
245
+ # calculate the relative position of each vertex from the rotation center, then rotate these positions, and finally add the coordinates of the rotation center
246
+ bbox_rot = (bbox_rot - center) @ R.T + center
247
+
248
+ return {
249
+ 'center': center, # 2x1
250
+ 'size': size, # scalar
251
+ 'angle': angle, # rad, counterclockwise
252
+ 'bbox': bbox, # 4x2
253
+ 'bbox_rot': bbox_rot, # 4x2
254
+ }
255
+
256
+
257
+ def crop_image_by_bbox(img, bbox, lmk=None, dsize=512, angle=None, flag_rot=False, **kwargs):
258
+ left, top, right, bot = bbox
259
+ if int(right - left) != int(bot - top):
260
+ print(f'right-left {right-left} != bot-top {bot-top}')
261
+ size = right - left
262
+
263
+ src_center = np.array([(left + right) / 2, (top + bot) / 2], dtype=DTYPE)
264
+ tgt_center = np.array([dsize / 2, dsize / 2], dtype=DTYPE)
265
+
266
+ s = dsize / size # scale
267
+ if flag_rot and angle is not None:
268
+ costheta, sintheta = cos(angle), sin(angle)
269
+ cx, cy = src_center[0], src_center[1] # ori center
270
+ tcx, tcy = tgt_center[0], tgt_center[1] # target center
271
+ # need to infer
272
+ M_o2c = np.array(
273
+ [[s * costheta, s * sintheta, tcx - s * (costheta * cx + sintheta * cy)],
274
+ [-s * sintheta, s * costheta, tcy - s * (-sintheta * cx + costheta * cy)]],
275
+ dtype=DTYPE
276
+ )
277
+ else:
278
+ M_o2c = np.array(
279
+ [[s, 0, tgt_center[0] - s * src_center[0]],
280
+ [0, s, tgt_center[1] - s * src_center[1]]],
281
+ dtype=DTYPE
282
+ )
283
+
284
+ # if flag_rot and angle is None:
285
+ # print('angle is None, but flag_rotate is True', style="bold yellow")
286
+
287
+ img_crop = _transform_img(img, M_o2c, dsize=dsize, borderMode=kwargs.get('borderMode', None))
288
+ lmk_crop = _transform_pts(lmk, M_o2c) if lmk is not None else None
289
+
290
+ M_o2c = np.vstack([M_o2c, np.array([0, 0, 1], dtype=DTYPE)])
291
+ M_c2o = np.linalg.inv(M_o2c)
292
+
293
+ # cv2.imwrite('crop.jpg', img_crop)
294
+
295
+ return {
296
+ 'img_crop': img_crop,
297
+ 'lmk_crop': lmk_crop,
298
+ 'M_o2c': M_o2c,
299
+ 'M_c2o': M_c2o,
300
+ }
301
+
302
+
303
+ def _estimate_similar_transform_from_pts(
304
+ pts,
305
+ dsize,
306
+ scale=1.5,
307
+ vx_ratio=0,
308
+ vy_ratio=-0.1,
309
+ flag_do_rot=True,
310
+ **kwargs
311
+ ):
312
+ """ calculate the affine matrix of the cropped image from sparse points, the original image to the cropped image, the inverse is the cropped image to the original image
313
+ pts: landmark, 101 or 68 points or other points, Nx2
314
+ scale: the larger scale factor, the smaller face ratio
315
+ vx_ratio: x shift
316
+ vy_ratio: y shift, the smaller the y shift, the lower the face region
317
+ rot_flag: if it is true, conduct correction
318
+ """
319
+ center, size, angle = parse_rect_from_landmark(
320
+ pts, scale=scale, vx_ratio=vx_ratio, vy_ratio=vy_ratio,
321
+ use_lip=kwargs.get('use_lip', True)
322
+ )
323
+
324
+ s = dsize / size[0] # scale
325
+ tgt_center = np.array([dsize / 2, dsize / 2], dtype=DTYPE) # center of dsize
326
+
327
+ if flag_do_rot:
328
+ costheta, sintheta = cos(angle), sin(angle)
329
+ cx, cy = center[0], center[1] # ori center
330
+ tcx, tcy = tgt_center[0], tgt_center[1] # target center
331
+ # need to infer
332
+ M_INV = np.array(
333
+ [[s * costheta, s * sintheta, tcx - s * (costheta * cx + sintheta * cy)],
334
+ [-s * sintheta, s * costheta, tcy - s * (-sintheta * cx + costheta * cy)]],
335
+ dtype=DTYPE
336
+ )
337
+ else:
338
+ M_INV = np.array(
339
+ [[s, 0, tgt_center[0] - s * center[0]],
340
+ [0, s, tgt_center[1] - s * center[1]]],
341
+ dtype=DTYPE
342
+ )
343
+
344
+ M_INV_H = np.vstack([M_INV, np.array([0, 0, 1])])
345
+ M = np.linalg.inv(M_INV_H)
346
+
347
+ # M_INV is from the original image to the cropped image, M is from the cropped image to the original image
348
+ return M_INV, M[:2, ...]
349
+
350
+
351
+ def crop_image(img, pts: np.ndarray, **kwargs):
352
+ dsize = kwargs.get('dsize', 224)
353
+ scale = kwargs.get('scale', 1.5) # 1.5 | 1.6
354
+ vy_ratio = kwargs.get('vy_ratio', -0.1) # -0.0625 | -0.1
355
+
356
+ M_INV, _ = _estimate_similar_transform_from_pts(
357
+ pts,
358
+ dsize=dsize,
359
+ scale=scale,
360
+ vy_ratio=vy_ratio,
361
+ flag_do_rot=kwargs.get('flag_do_rot', True),
362
+ )
363
+
364
+ img_crop = _transform_img(img, M_INV, dsize) # origin to crop
365
+ pt_crop = _transform_pts(pts, M_INV)
366
+
367
+ M_o2c = np.vstack([M_INV, np.array([0, 0, 1], dtype=DTYPE)])
368
+ M_c2o = np.linalg.inv(M_o2c)
369
+
370
+ ret_dct = {
371
+ 'M_o2c': M_o2c, # from the original image to the cropped image 3x3
372
+ 'M_c2o': M_c2o, # from the cropped image to the original image 3x3
373
+ 'img_crop': img_crop, # the cropped image
374
+ 'pt_crop': pt_crop, # the landmarks of the cropped image
375
+ }
376
+
377
+ return ret_dct
378
+
379
+ def average_bbox_lst(bbox_lst):
380
+ if len(bbox_lst) == 0:
381
+ return None
382
+ bbox_arr = np.array(bbox_lst)
383
+ return np.mean(bbox_arr, axis=0).tolist()
384
+
385
+ def prepare_paste_back(mask_crop, crop_M_c2o, dsize):
386
+ """prepare mask for later image paste back
387
+ """
388
+ mask_ori = _transform_img(mask_crop, crop_M_c2o, dsize)
389
+ mask_ori = mask_ori.astype(np.float32) / 255.
390
+ return mask_ori
391
+
392
+ def paste_back(img_crop, M_c2o, img_ori, mask_ori):
393
+ """paste back the image
394
+ """
395
+ dsize = (img_ori.shape[1], img_ori.shape[0])
396
+ result = _transform_img(img_crop, M_c2o, dsize=dsize)
397
+ result = np.clip(mask_ori * result + (1 - mask_ori) * img_ori, 0, 255).astype(np.uint8)
398
+ return result
src/utils/cropper.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ import os.path as osp
4
+ from dataclasses import dataclass, field
5
+ from typing import List, Tuple, Union
6
+
7
+ import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False)
8
+ import numpy as np
9
+
10
+ from ..config.crop_config import CropConfig
11
+ from .crop import (
12
+ average_bbox_lst,
13
+ crop_image,
14
+ crop_image_by_bbox,
15
+ parse_bbox_from_landmark,
16
+ )
17
+ from .io import contiguous
18
+ from .rprint import rlog as log
19
+ from .face_analysis_diy import FaceAnalysisDIY
20
+ from .landmark_runner import LandmarkRunner
21
+
22
+
23
+ def make_abs_path(fn):
24
+ return osp.join(osp.dirname(osp.realpath(__file__)), fn)
25
+
26
+
27
+ @dataclass
28
+ class Trajectory:
29
+ start: int = -1 # start frame
30
+ end: int = -1 # end frame
31
+ lmk_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # lmk list
32
+ bbox_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # bbox list
33
+
34
+ frame_rgb_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # frame list
35
+ lmk_crop_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # lmk list
36
+ frame_rgb_crop_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # frame crop list
37
+
38
+
39
+ class Cropper(object):
40
+ def __init__(self, **kwargs) -> None:
41
+ self.crop_cfg: CropConfig = kwargs.get("crop_cfg", None)
42
+ device_id = kwargs.get("device_id", 0)
43
+ flag_force_cpu = kwargs.get("flag_force_cpu", False)
44
+ if flag_force_cpu:
45
+ device = "cpu"
46
+ face_analysis_wrapper_provicer = ["CPUExecutionProvider"]
47
+ else:
48
+ device = "cuda"
49
+ face_analysis_wrapper_provicer = ["CUDAExecutionProvider"]
50
+ self.landmark_runner = LandmarkRunner(
51
+ ckpt_path=make_abs_path(self.crop_cfg.landmark_ckpt_path),
52
+ onnx_provider=device,
53
+ device_id=device_id,
54
+ )
55
+ self.landmark_runner.warmup()
56
+
57
+ self.face_analysis_wrapper = FaceAnalysisDIY(
58
+ name="buffalo_l",
59
+ root=make_abs_path(self.crop_cfg.insightface_root),
60
+ providers=face_analysis_wrapper_provicer,
61
+ )
62
+ self.face_analysis_wrapper.prepare(ctx_id=device_id, det_size=(512, 512))
63
+ self.face_analysis_wrapper.warmup()
64
+
65
+ def update_config(self, user_args):
66
+ for k, v in user_args.items():
67
+ if hasattr(self.crop_cfg, k):
68
+ setattr(self.crop_cfg, k, v)
69
+
70
+ def crop_source_image(self, img_rgb_: np.ndarray, crop_cfg: CropConfig):
71
+ # crop a source image and get neccessary information
72
+ img_rgb = img_rgb_.copy() # copy it
73
+
74
+ img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
75
+ src_face = self.face_analysis_wrapper.get(
76
+ img_bgr,
77
+ flag_do_landmark_2d_106=True,
78
+ direction=crop_cfg.direction,
79
+ max_face_num=crop_cfg.max_face_num,
80
+ )
81
+
82
+ if len(src_face) == 0:
83
+ log("No face detected in the source image.")
84
+ return None
85
+ elif len(src_face) > 1:
86
+ log(f"More than one face detected in the image, only pick one face by rule {crop_cfg.direction}.")
87
+
88
+ # NOTE: temporarily only pick the first face, to support multiple face in the future
89
+ src_face = src_face[0]
90
+ lmk = src_face.landmark_2d_106 # this is the 106 landmarks from insightface
91
+
92
+ # crop the face
93
+ ret_dct = crop_image(
94
+ img_rgb, # ndarray
95
+ lmk, # 106x2 or Nx2
96
+ dsize=crop_cfg.dsize,
97
+ scale=crop_cfg.scale,
98
+ vx_ratio=crop_cfg.vx_ratio,
99
+ vy_ratio=crop_cfg.vy_ratio,
100
+ )
101
+
102
+ lmk = self.landmark_runner.run(img_rgb, lmk)
103
+ ret_dct["lmk_crop"] = lmk
104
+
105
+ # update a 256x256 version for network input
106
+ ret_dct["img_crop_256x256"] = cv2.resize(ret_dct["img_crop"], (256, 256), interpolation=cv2.INTER_AREA)
107
+ ret_dct["lmk_crop_256x256"] = ret_dct["lmk_crop"] * 256 / crop_cfg.dsize
108
+
109
+ return ret_dct
110
+
111
+ def crop_driving_video(self, driving_rgb_lst, **kwargs):
112
+ """Tracking based landmarks/alignment and cropping"""
113
+ trajectory = Trajectory()
114
+ direction = kwargs.get("direction", "large-small")
115
+ for idx, frame_rgb in enumerate(driving_rgb_lst):
116
+ if idx == 0 or trajectory.start == -1:
117
+ src_face = self.face_analysis_wrapper.get(
118
+ contiguous(frame_rgb[..., ::-1]),
119
+ flag_do_landmark_2d_106=True,
120
+ direction=direction,
121
+ )
122
+ if len(src_face) == 0:
123
+ log(f"No face detected in the frame #{idx}")
124
+ continue
125
+ elif len(src_face) > 1:
126
+ log(f"More than one face detected in the driving frame_{idx}, only pick one face by rule {direction}.")
127
+ src_face = src_face[0]
128
+ lmk = src_face.landmark_2d_106
129
+ lmk = self.landmark_runner.run(frame_rgb, lmk)
130
+ trajectory.start, trajectory.end = idx, idx
131
+ else:
132
+ lmk = self.landmark_runner.run(frame_rgb, trajectory.lmk_lst[-1])
133
+ trajectory.end = idx
134
+
135
+ trajectory.lmk_lst.append(lmk)
136
+ ret_bbox = parse_bbox_from_landmark(
137
+ lmk,
138
+ scale=self.crop_cfg.scale_crop_video,
139
+ vx_ratio_crop_video=self.crop_cfg.vx_ratio_crop_video,
140
+ vy_ratio=self.crop_cfg.vy_ratio_crop_video,
141
+ )["bbox"]
142
+ bbox = [
143
+ ret_bbox[0, 0],
144
+ ret_bbox[0, 1],
145
+ ret_bbox[2, 0],
146
+ ret_bbox[2, 1],
147
+ ] # 4,
148
+ trajectory.bbox_lst.append(bbox) # bbox
149
+ trajectory.frame_rgb_lst.append(frame_rgb)
150
+
151
+ global_bbox = average_bbox_lst(trajectory.bbox_lst)
152
+
153
+ for idx, (frame_rgb, lmk) in enumerate(zip(trajectory.frame_rgb_lst, trajectory.lmk_lst)):
154
+ ret_dct = crop_image_by_bbox(
155
+ frame_rgb,
156
+ global_bbox,
157
+ lmk=lmk,
158
+ dsize=kwargs.get("dsize", 512),
159
+ flag_rot=False,
160
+ borderValue=(0, 0, 0),
161
+ )
162
+ trajectory.frame_rgb_crop_lst.append(ret_dct["img_crop"])
163
+ trajectory.lmk_crop_lst.append(ret_dct["lmk_crop"])
164
+
165
+ return {
166
+ "frame_crop_lst": trajectory.frame_rgb_crop_lst,
167
+ "lmk_crop_lst": trajectory.lmk_crop_lst,
168
+ }
169
+
170
+ def calc_lmks_from_cropped_video(self, driving_rgb_crop_lst, **kwargs):
171
+ """Tracking based landmarks/alignment"""
172
+ trajectory = Trajectory()
173
+ direction = kwargs.get("direction", "large-small")
174
+
175
+ for idx, frame_rgb_crop in enumerate(driving_rgb_crop_lst):
176
+ if idx == 0 or trajectory.start == -1:
177
+ src_face = self.face_analysis_wrapper.get(
178
+ contiguous(frame_rgb_crop[..., ::-1]), # convert to BGR
179
+ flag_do_landmark_2d_106=True,
180
+ direction=direction,
181
+ )
182
+ if len(src_face) == 0:
183
+ log(f"No face detected in the frame #{idx}")
184
+ raise Exception(f"No face detected in the frame #{idx}")
185
+ elif len(src_face) > 1:
186
+ log(f"More than one face detected in the driving frame_{idx}, only pick one face by rule {direction}.")
187
+ src_face = src_face[0]
188
+ lmk = src_face.landmark_2d_106
189
+ lmk = self.landmark_runner.run(frame_rgb_crop, lmk)
190
+ trajectory.start, trajectory.end = idx, idx
191
+ else:
192
+ lmk = self.landmark_runner.run(frame_rgb_crop, trajectory.lmk_lst[-1])
193
+ trajectory.end = idx
194
+
195
+ trajectory.lmk_lst.append(lmk)
196
+ return trajectory.lmk_lst
src/utils/dependencies/insightface/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+ # pylint: disable=wrong-import-position
3
+ """InsightFace: A Face Analysis Toolkit."""
4
+ from __future__ import absolute_import
5
+
6
+ try:
7
+ #import mxnet as mx
8
+ import onnxruntime
9
+ except ImportError:
10
+ raise ImportError(
11
+ "Unable to import dependency onnxruntime. "
12
+ )
13
+
14
+ __version__ = '0.7.3'
15
+
16
+ from . import model_zoo
17
+ from . import utils
18
+ from . import app
19
+ from . import data
20
+
src/utils/dependencies/insightface/app/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .face_analysis import *
src/utils/dependencies/insightface/app/common.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.linalg import norm as l2norm
3
+ #from easydict import EasyDict
4
+
5
+ class Face(dict):
6
+
7
+ def __init__(self, d=None, **kwargs):
8
+ if d is None:
9
+ d = {}
10
+ if kwargs:
11
+ d.update(**kwargs)
12
+ for k, v in d.items():
13
+ setattr(self, k, v)
14
+ # Class attributes
15
+ #for k in self.__class__.__dict__.keys():
16
+ # if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
17
+ # setattr(self, k, getattr(self, k))
18
+
19
+ def __setattr__(self, name, value):
20
+ if isinstance(value, (list, tuple)):
21
+ value = [self.__class__(x)
22
+ if isinstance(x, dict) else x for x in value]
23
+ elif isinstance(value, dict) and not isinstance(value, self.__class__):
24
+ value = self.__class__(value)
25
+ super(Face, self).__setattr__(name, value)
26
+ super(Face, self).__setitem__(name, value)
27
+
28
+ __setitem__ = __setattr__
29
+
30
+ def __getattr__(self, name):
31
+ return None
32
+
33
+ @property
34
+ def embedding_norm(self):
35
+ if self.embedding is None:
36
+ return None
37
+ return l2norm(self.embedding)
38
+
39
+ @property
40
+ def normed_embedding(self):
41
+ if self.embedding is None:
42
+ return None
43
+ return self.embedding / self.embedding_norm
44
+
45
+ @property
46
+ def sex(self):
47
+ if self.gender is None:
48
+ return None
49
+ return 'M' if self.gender==1 else 'F'
src/utils/dependencies/insightface/app/face_analysis.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Organization : insightface.ai
3
+ # @Author : Jia Guo
4
+ # @Time : 2021-05-04
5
+ # @Function :
6
+
7
+
8
+ from __future__ import division
9
+
10
+ import glob
11
+ import os.path as osp
12
+
13
+ import numpy as np
14
+ import onnxruntime
15
+ from numpy.linalg import norm
16
+
17
+ from ..model_zoo import model_zoo
18
+ from ..utils import ensure_available
19
+ from .common import Face
20
+
21
+
22
+ DEFAULT_MP_NAME = 'buffalo_l'
23
+ __all__ = ['FaceAnalysis']
24
+
25
+ class FaceAnalysis:
26
+ def __init__(self, name=DEFAULT_MP_NAME, root='~/.insightface', allowed_modules=None, **kwargs):
27
+ onnxruntime.set_default_logger_severity(3)
28
+ self.models = {}
29
+ self.model_dir = ensure_available('models', name, root=root)
30
+ onnx_files = glob.glob(osp.join(self.model_dir, '*.onnx'))
31
+ onnx_files = sorted(onnx_files)
32
+ for onnx_file in onnx_files:
33
+ model = model_zoo.get_model(onnx_file, **kwargs)
34
+ if model is None:
35
+ print('model not recognized:', onnx_file)
36
+ elif allowed_modules is not None and model.taskname not in allowed_modules:
37
+ print('model ignore:', onnx_file, model.taskname)
38
+ del model
39
+ elif model.taskname not in self.models and (allowed_modules is None or model.taskname in allowed_modules):
40
+ # print('find model:', onnx_file, model.taskname, model.input_shape, model.input_mean, model.input_std)
41
+ self.models[model.taskname] = model
42
+ else:
43
+ print('duplicated model task type, ignore:', onnx_file, model.taskname)
44
+ del model
45
+ assert 'detection' in self.models
46
+ self.det_model = self.models['detection']
47
+
48
+
49
+ def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)):
50
+ self.det_thresh = det_thresh
51
+ assert det_size is not None
52
+ # print('set det-size:', det_size)
53
+ self.det_size = det_size
54
+ for taskname, model in self.models.items():
55
+ if taskname=='detection':
56
+ model.prepare(ctx_id, input_size=det_size, det_thresh=det_thresh)
57
+ else:
58
+ model.prepare(ctx_id)
59
+
60
+ def get(self, img, max_num=0):
61
+ bboxes, kpss = self.det_model.detect(img,
62
+ max_num=max_num,
63
+ metric='default')
64
+ if bboxes.shape[0] == 0:
65
+ return []
66
+ ret = []
67
+ for i in range(bboxes.shape[0]):
68
+ bbox = bboxes[i, 0:4]
69
+ det_score = bboxes[i, 4]
70
+ kps = None
71
+ if kpss is not None:
72
+ kps = kpss[i]
73
+ face = Face(bbox=bbox, kps=kps, det_score=det_score)
74
+ for taskname, model in self.models.items():
75
+ if taskname=='detection':
76
+ continue
77
+ model.get(img, face)
78
+ ret.append(face)
79
+ return ret
80
+
81
+ def draw_on(self, img, faces):
82
+ import cv2
83
+ dimg = img.copy()
84
+ for i in range(len(faces)):
85
+ face = faces[i]
86
+ box = face.bbox.astype(np.int)
87
+ color = (0, 0, 255)
88
+ cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2)
89
+ if face.kps is not None:
90
+ kps = face.kps.astype(np.int)
91
+ #print(landmark.shape)
92
+ for l in range(kps.shape[0]):
93
+ color = (0, 0, 255)
94
+ if l == 0 or l == 3:
95
+ color = (0, 255, 0)
96
+ cv2.circle(dimg, (kps[l][0], kps[l][1]), 1, color,
97
+ 2)
98
+ if face.gender is not None and face.age is not None:
99
+ cv2.putText(dimg,'%s,%d'%(face.sex,face.age), (box[0]-1, box[1]-4),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,255,0),1)
100
+
101
+ #for key, value in face.items():
102
+ # if key.startswith('landmark_3d'):
103
+ # print(key, value.shape)
104
+ # print(value[0:10,:])
105
+ # lmk = np.round(value).astype(np.int)
106
+ # for l in range(lmk.shape[0]):
107
+ # color = (255, 0, 0)
108
+ # cv2.circle(dimg, (lmk[l][0], lmk[l][1]), 1, color,
109
+ # 2)
110
+ return dimg
src/utils/dependencies/insightface/data/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .image import get_image
2
+ from .pickle_object import get_object
src/utils/dependencies/insightface/data/image.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import os
3
+ import os.path as osp
4
+ from pathlib import Path
5
+
6
+ class ImageCache:
7
+ data = {}
8
+
9
+ def get_image(name, to_rgb=False):
10
+ key = (name, to_rgb)
11
+ if key in ImageCache.data:
12
+ return ImageCache.data[key]
13
+ images_dir = osp.join(Path(__file__).parent.absolute(), 'images')
14
+ ext_names = ['.jpg', '.png', '.jpeg']
15
+ image_file = None
16
+ for ext_name in ext_names:
17
+ _image_file = osp.join(images_dir, "%s%s"%(name, ext_name))
18
+ if osp.exists(_image_file):
19
+ image_file = _image_file
20
+ break
21
+ assert image_file is not None, '%s not found'%name
22
+ img = cv2.imread(image_file)
23
+ if to_rgb:
24
+ img = img[:,:,::-1]
25
+ ImageCache.data[key] = img
26
+ return img
27
+
src/utils/dependencies/insightface/data/images/Tom_Hanks_54745.png ADDED
src/utils/dependencies/insightface/data/images/mask_black.jpg ADDED
src/utils/dependencies/insightface/data/images/mask_blue.jpg ADDED