diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..2fff30bd39acafec860467a322b02527e853e457 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
 *.zip filter=lfs diff=lfs merge=lfs -text
 *.zst filter=lfs diff=lfs merge=lfs -text
 *tfevents* filter=lfs diff=lfs merge=lfs -text
+example/audio_driven/WDA_BenCardin1_000.wav filter=lfs diff=lfs merge=lfs -text
+example/audio_driven/WRA_MarkwayneMullin_000.wav filter=lfs diff=lfs merge=lfs -text
+example/audio_driven/WRA_MikeJohanns1_000.wav filter=lfs diff=lfs merge=lfs -text
+src/utils/dependencies/insightface/data/images/t1.jpg filter=lfs diff=lfs merge=lfs -text
diff --git a/ORIGINAL_README.md b/ORIGINAL_README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3b2b445b980fc48b2470c19b376bde43250d8403
--- /dev/null
+++ b/ORIGINAL_README.md
@@ -0,0 +1,118 @@
+
+
+# Unlock Pose Diversity: Accurate and Efficient Implicit Keypoint-based Spatiotemporal Diffusion for Audio-driven Talking Portrait
+[](https://arxiv.org/abs/2503.12963)
+[](https://creativecommons.org/licenses/by-nc/4.0/)
+[](https://github.com/chaolongy/KDTalker)
+
+
+
+    
+
+    1 University of Liverpool   2 Ant Group   3 Xi’an Jiaotong-Liverpool University   
+    4 Duke Kunshan University   5 Ricoh Software Research Center  
+
+
+
+
+
+# Comparative videos
+https://github.com/user-attachments/assets/08ebc6e0-41c5-4bf4-8ee8-2f7d317d92cd
+
+
+# Demo
+Gradio Demo [`KDTalker`](https://kdtalker.com/). The model was trained using only 4,282 video clips from [`VoxCeleb`](https://www.robots.ox.ac.uk/~vgg/data/voxceleb/).
+
+
+
+# To Do List
+- [ ] Train a community version using more datasets
+- [ ] Release training code
+
+
+# Environment
+Our KDTalker could be conducted on one RTX4090 or RTX3090.
+
+### 1. Clone the code and prepare the environment
+
+**Note:** Make sure your system has [`git`](https://git-scm.com/), [`conda`](https://anaconda.org/anaconda/conda), and [`FFmpeg`](https://ffmpeg.org/download.html) installed.
+
+```
+git clone https://github.com/chaolongy/KDTalker
+cd KDTalker
+
+# create env using conda
+conda create -n KDTalker python=3.9
+conda activate KDTalker
+
+conda install pytorch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 pytorch-cuda=11.8 -c pytorch -c nvidia
+
+pip install -r requirements.txt
+```
+
+### 2. Download pretrained weights
+
+First, you can download all LiverPorait pretrained weights from [Google Drive](https://drive.google.com/drive/folders/1UtKgzKjFAOmZkhNK-OYT0caJ_w2XAnib). Unzip and place them in `./pretrained_weights`.
+Ensuring the directory structure is as follows:
+```text
+pretrained_weights
+├── insightface
+│   └── models
+│       └── buffalo_l
+│           ├── 2d106det.onnx
+│           └── det_10g.onnx
+└── liveportrait
+    ├── base_models
+    │   ├── appearance_feature_extractor.pth
+    │   ├── motion_extractor.pth
+    │   ├── spade_generator.pth
+    │   └── warping_module.pth
+    ├── landmark.onnx
+    └── retargeting_models
+        └── stitching_retargeting_module.pth
+```
+You can download the weights for the face detector, audio extractor and KDTalker from [Google Drive](https://drive.google.com/drive/folders/1OkfiFArUCsnkF_0tI2SCEAwVCBLSjzd6?hl=zh-CN). Put them in `./ckpts`.
+
+OR, you can download above all weights in [Huggingface](https://huggingface.co/ChaolongYang/KDTalker/tree/main).
+
+
+
+# Inference
+```
+python inference.py -source_image ./example/source_image/WDA_BenCardin1_000.png -driven_audio ./example/driven_audio/WDA_BenCardin1_000.wav -output ./results/output.mp4
+```
+
+
+# Contact
+Our code is under the CC-BY-NC 4.0 license and intended solely for research purposes. If you have any questions or wish to use it for commercial purposes, please contact us at chaolong.yang@liverpool.ac.uk
+
+
+# Citation
+If you find this code helpful for your research, please cite:
+```
+@misc{yang2025kdtalker,
+      title={Unlock Pose Diversity: Accurate and Efficient Implicit Keypoint-based Spatiotemporal Diffusion for Audio-driven Talking Portrait}, 
+      author={Chaolong Yang and Kai Yao and Yuyao Yan and Chenru Jiang and Weiguang Zhao and Jie Sun and Guangliang Cheng and Yifei Zhang and Bin Dong and Kaizhu Huang},
+      year={2025},
+      eprint={2503.12963},
+      archivePrefix={arXiv},
+      primaryClass={cs.CV},
+      url={https://arxiv.org/abs/2503.12963}, 
+}
+```
+
+
+# Acknowledge
+We acknowledge these works for their public code and selfless help: [SadTalker](https://github.com/OpenTalker/SadTalker), [LivePortrait](https://github.com/KwaiVGI/LivePortrait), [Wav2Lip](https://github.com/Rudrabha/Wav2Lip), [Face-vid2vid](https://github.com/zhanglonghao1992/One-Shot_Free-View_Neural_Talking_Head_Synthesis) etc.
+
diff --git a/dataset_process/audio.py b/dataset_process/audio.py
new file mode 100644
index 0000000000000000000000000000000000000000..653cdd88738ccd95a89039650f1342df17564dec
--- /dev/null
+++ b/dataset_process/audio.py
@@ -0,0 +1,156 @@
+import librosa
+import librosa.filters
+import numpy as np
+# import tensorflow as tf
+from scipy import signal
+from scipy.io import wavfile
+from src.utils.hparams import hparams as hp
+
+
+def load_wav(path, sr):
+    return librosa.core.load(path, sr=sr)[0]
+
+
+def save_wav(wav, path, sr):
+    wav *= 32767 / max(0.01, np.max(np.abs(wav)))
+    # proposed by @dsmiller
+    wavfile.write(path, sr, wav.astype(np.int16))
+
+
+def save_wavenet_wav(wav, path, sr):
+    librosa.output.write_wav(path, wav, sr=sr)
+
+
+def preemphasis(wav, k, preemphasize=True):
+    if preemphasize:
+        return signal.lfilter([1, -k], [1], wav)
+    return wav
+
+
+def inv_preemphasis(wav, k, inv_preemphasize=True):
+    if inv_preemphasize:
+        return signal.lfilter([1], [1, -k], wav)
+    return wav
+
+
+def get_hop_size():
+    hop_size = hp.hop_size
+    if hop_size is None:
+        assert hp.frame_shift_ms is not None
+        hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate)
+    return hop_size
+
+
+def linearspectrogram(wav):
+    D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
+    S = _amp_to_db(np.abs(D)) - hp.ref_level_db
+
+    if hp.signal_normalization:
+        return _normalize(S)
+    return S
+
+
+def melspectrogram(wav):
+    D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
+    S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db
+
+    if hp.signal_normalization:
+        return _normalize(S)
+    return S
+
+
+def _lws_processor():
+    import lws
+    return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode="speech")
+
+
+def _stft(y):
+    if hp.use_lws:
+        return _lws_processor(hp).stft(y).T
+    else:
+        return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size)
+
+
+##########################################################
+# Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
+def num_frames(length, fsize, fshift):
+    """Compute number of time frames of spectrogram
+    """
+    pad = (fsize - fshift)
+    if length % fshift == 0:
+        M = (length + pad * 2 - fsize) // fshift + 1
+    else:
+        M = (length + pad * 2 - fsize) // fshift + 2
+    return M
+
+
+def pad_lr(x, fsize, fshift):
+    """Compute left and right padding
+    """
+    M = num_frames(len(x), fsize, fshift)
+    pad = (fsize - fshift)
+    T = len(x) + 2 * pad
+    r = (M - 1) * fshift + fsize - T
+    return pad, pad + r
+
+
+##########################################################
+# Librosa correct padding
+def librosa_pad_lr(x, fsize, fshift):
+    return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
+
+
+# Conversions
+_mel_basis = None
+
+
+def _linear_to_mel(spectogram):
+    global _mel_basis
+    if _mel_basis is None:
+        _mel_basis = _build_mel_basis()
+    return np.dot(_mel_basis, spectogram)
+
+
+def _build_mel_basis():
+    assert hp.fmax <= hp.sample_rate // 2
+    return librosa.filters.mel(sr=hp.sample_rate, n_fft=hp.n_fft, n_mels=hp.num_mels,
+                               fmin=hp.fmin, fmax=hp.fmax)
+
+
+def _amp_to_db(x):
+    min_level = np.exp(hp.min_level_db / 20 * np.log(10))
+    return 20 * np.log10(np.maximum(min_level, x))
+
+
+def _db_to_amp(x):
+    return np.power(10.0, (x) * 0.05)
+
+
+def _normalize(S):
+    if hp.allow_clipping_in_normalization:
+        if hp.symmetric_mels:
+            return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value,
+                           -hp.max_abs_value, hp.max_abs_value)
+        else:
+            return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value)
+
+    assert S.max() <= 0 and S.min() - hp.min_level_db >= 0
+    if hp.symmetric_mels:
+        return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value
+    else:
+        return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db))
+
+
+def _denormalize(D):
+    if hp.allow_clipping_in_normalization:
+        if hp.symmetric_mels:
+            return (((np.clip(D, -hp.max_abs_value,
+                              hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value))
+                    + hp.min_level_db)
+        else:
+            return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
+
+    if hp.symmetric_mels:
+        return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db)
+    else:
+        return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
diff --git a/dataset_process/croper.py b/dataset_process/croper.py
new file mode 100644
index 0000000000000000000000000000000000000000..639ec5206960b8a3940bbb882e4ab6ec97770684
--- /dev/null
+++ b/dataset_process/croper.py
@@ -0,0 +1,154 @@
+import cv2
+
+"""
+brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset)
+author: lzhbrian (https://lzhbrian.me)
+date: 2020.1.5
+note: code is heavily borrowed from 
+    https://github.com/NVlabs/ffhq-dataset
+    http://dlib.net/face_landmark_detection.py.html
+requirements:
+    apt install cmake
+    conda install Pillow numpy scipy
+    pip install dlib
+    # download face landmark model from: 
+    # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
+"""
+
+import numpy as np
+from PIL import Image
+import dlib
+
+
+class Croper:
+    def __init__(self, path_of_lm):
+        # download model from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
+        self.predictor = dlib.shape_predictor(path_of_lm)
+
+    def get_landmark(self, img_np):
+        """get landmark with dlib
+        :return: np.array shape=(68, 2)
+        """
+        detector = dlib.get_frontal_face_detector()
+        dets = detector(img_np, 1)
+        #     print("Number of faces detected: {}".format(len(dets)))
+        #     for k, d in enumerate(dets):
+        if len(dets) == 0:
+            return None
+        d = dets[0]
+        # Get the landmarks/parts for the face in box d.
+        shape = self.predictor(img_np, d)
+        #         print("Part 0: {}, Part 1: {} ...".format(shape.part(0), shape.part(1)))
+        t = list(shape.parts())
+        a = []
+        for tt in t:
+            a.append([tt.x, tt.y])
+        lm = np.array(a)
+        # lm is a shape=(68,2) np.array
+        return lm
+
+    def align_face(self, img, lm, output_size=1024):
+        """
+        :param filepath: str
+        :return: PIL Image
+        """
+        lm_chin = lm[0: 17]  # left-right
+        lm_eyebrow_left = lm[17: 22]  # left-right
+        lm_eyebrow_right = lm[22: 27]  # left-right
+        lm_nose = lm[27: 31]  # top-down
+        lm_nostrils = lm[31: 36]  # top-down
+        lm_eye_left = lm[36: 42]  # left-clockwise
+        lm_eye_right = lm[42: 48]  # left-clockwise
+        lm_mouth_outer = lm[48: 60]  # left-clockwise
+        lm_mouth_inner = lm[60: 68]  # left-clockwise
+
+        # Calculate auxiliary vectors.
+        eye_left = np.mean(lm_eye_left, axis=0)
+        eye_right = np.mean(lm_eye_right, axis=0)
+        eye_avg = (eye_left + eye_right) * 0.5
+        eye_to_eye = eye_right - eye_left
+        mouth_left = lm_mouth_outer[0]
+        mouth_right = lm_mouth_outer[6]
+        mouth_avg = (mouth_left + mouth_right) * 0.5
+        eye_to_mouth = mouth_avg - eye_avg
+
+        # Choose oriented crop rectangle.
+        x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]  # Addition of binocular difference and double mouth difference
+        x /= np.hypot(*x)
+        x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
+        y = np.flipud(x) * [-1, 1]
+        c = eye_avg + eye_to_mouth * 0.1
+        quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
+        qsize = np.hypot(*x) * 2
+
+        # Shrink.
+        shrink = int(np.floor(qsize / output_size * 0.5))
+        if shrink > 1:
+            rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
+            img = img.resize(rsize, Image.ANTIALIAS)
+            quad /= shrink
+            qsize /= shrink
+        else:
+            rsize = (int(np.rint(float(img.size[0]))), int(np.rint(float(img.size[1]))))
+
+        # Crop.
+        border = max(int(np.rint(qsize * 0.1)), 3)
+        crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
+                int(np.ceil(max(quad[:, 1]))))
+        crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
+                min(crop[3] + border, img.size[1]))
+        if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
+            # img = img.crop(crop)
+            quad -= crop[0:2]
+
+        # Pad.
+        pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
+               int(np.ceil(max(quad[:, 1]))))
+        pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
+               max(pad[3] - img.size[1] + border, 0))
+        # if enable_padding and max(pad) > border - 4:
+        #     pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
+        #     img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
+        #     h, w, _ = img.shape
+        #     y, x, _ = np.ogrid[:h, :w, :1]
+        #     mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
+        #                       1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
+        #     blur = qsize * 0.02
+        #     img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
+        #     img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
+        #     img = Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
+        #     quad += pad[:2]
+
+        # Transform.
+        quad = (quad + 0.5).flatten()
+        lx = max(min(quad[0], quad[2]), 0)
+        ly = max(min(quad[1], quad[7]), 0)
+        rx = min(max(quad[4], quad[6]), img.size[0])
+        ry = min(max(quad[3], quad[5]), img.size[0])
+        # img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(),
+        #                     Image.BILINEAR)
+        # if output_size < transform_size:
+        #     img = img.resize((output_size, output_size), Image.ANTIALIAS)
+
+        # Save aligned image.
+        return rsize, crop, [lx, ly, rx, ry]
+    
+    def crop(self, img_np_list, still=False, xsize=512):    # first frame for all video
+        img_np = img_np_list[0]
+        lm = self.get_landmark(img_np)
+        if lm is None:
+            raise 'can not detect the landmark from source image'
+        rsize, crop, quad = self.align_face(img=Image.fromarray(img_np), lm=lm, output_size=xsize)
+        clx, cly, crx, cry = crop
+        lx, ly, rx, ry = quad
+        lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)
+        for _i in range(len(img_np_list)):
+            _inp = img_np_list[_i]
+            _inp = cv2.resize(_inp, (rsize[0], rsize[1]))
+            _inp = _inp[cly:cry, clx:crx]
+            # cv2.imwrite('test1.jpg', _inp)
+            if not still:
+                _inp = _inp[ly:ry, lx:rx]
+            # cv2.imwrite('test2.jpg', _inp)
+            img_np_list[_i] = _inp
+        return img_np_list, crop, quad
diff --git a/dataset_process/norm.npz b/dataset_process/norm.npz
new file mode 100644
index 0000000000000000000000000000000000000000..09d73394389ba43100ca7e4355130aac7fe4a4f4
--- /dev/null
+++ b/dataset_process/norm.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9422e503e75df9d1bd455d8e0f9f5e2826b12956cdedbb5566097c0151bddafb
+size 5580
diff --git a/example/audio_driven/WDA_BenCardin1_000.wav b/example/audio_driven/WDA_BenCardin1_000.wav
new file mode 100644
index 0000000000000000000000000000000000000000..2fbd5be4cda05e7a52d69c89f656d82ae46e627a
--- /dev/null
+++ b/example/audio_driven/WDA_BenCardin1_000.wav
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:46cba9e5aa26d94ce13ff5eeef3e40e8086337e07f6c3d553497ea1b9f8a5e23
+size 512774
diff --git a/example/audio_driven/WRA_MarkwayneMullin_000.wav b/example/audio_driven/WRA_MarkwayneMullin_000.wav
new file mode 100644
index 0000000000000000000000000000000000000000..6ddf75087dc80fdebc113ad174788d941acc1c89
--- /dev/null
+++ b/example/audio_driven/WRA_MarkwayneMullin_000.wav
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:791104d8663ca5af3d11dde1c042cf3f42957c6356c044d6bd8b8ee311442fc5
+size 512774
diff --git a/example/audio_driven/WRA_MikeJohanns1_000.wav b/example/audio_driven/WRA_MikeJohanns1_000.wav
new file mode 100644
index 0000000000000000000000000000000000000000..ee2173db1fbedad055615ecde7ca287f6c8a9246
--- /dev/null
+++ b/example/audio_driven/WRA_MikeJohanns1_000.wav
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9fc02acff776d4bd59bad02fbb773d1201948616e279ffc853b83753f4f4f2b
+size 512774
diff --git a/example/source_image/WDA_BenCardin1_000.png b/example/source_image/WDA_BenCardin1_000.png
new file mode 100644
index 0000000000000000000000000000000000000000..dcb91cbce53e5ef7bb97f505b9f774f7513b9d32
Binary files /dev/null and b/example/source_image/WDA_BenCardin1_000.png differ
diff --git a/example/source_image/WRA_MarkwayneMullin_000.png b/example/source_image/WRA_MarkwayneMullin_000.png
new file mode 100644
index 0000000000000000000000000000000000000000..803f2dfb687cb099c4c7993b8f9f0e5cfbfcfcf8
Binary files /dev/null and b/example/source_image/WRA_MarkwayneMullin_000.png differ
diff --git a/example/source_image/WRA_MikeJohanns1_000.png b/example/source_image/WRA_MikeJohanns1_000.png
new file mode 100644
index 0000000000000000000000000000000000000000..0970330745b3eee97ea8d8cce78e2da1261353fe
Binary files /dev/null and b/example/source_image/WRA_MikeJohanns1_000.png differ
diff --git a/inference.py b/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..b995b9889585147d44a57551f3cd9b506aa6592f
--- /dev/null
+++ b/inference.py
@@ -0,0 +1,383 @@
+# -*- coding: UTF-8 -*-
+import os
+os.environ['HYDRA_FULL_ERROR']='1'
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+import argparse
+import shutil
+import uuid
+import os
+import numpy as np
+from tqdm import tqdm
+import cv2
+from rich.progress import track
+import tyro
+
+
+from PIL import Image
+import time
+import torch
+import torch.nn.functional as F
+from torch import nn
+import imageio
+from pydub import AudioSegment
+from pykalman import KalmanFilter
+
+
+from src.config.argument_config import ArgumentConfig
+from src.config.inference_config import InferenceConfig
+from src.config.crop_config import CropConfig
+from src.live_portrait_pipeline import LivePortraitPipeline
+from src.utils.camera import get_rotation_matrix
+from dataset_process import audio
+
+from dataset_process.croper import Croper
+
+
+def parse_audio_length(audio_length, sr, fps):
+    bit_per_frames = sr / fps
+    num_frames = int(audio_length / bit_per_frames)
+    audio_length = int(num_frames * bit_per_frames)
+    return audio_length, num_frames
+
+def crop_pad_audio(wav, audio_length):
+    if len(wav) > audio_length:
+        wav = wav[:audio_length]
+    elif len(wav) < audio_length:
+        wav = np.pad(wav, [0, audio_length - len(wav)], mode='constant', constant_values=0)
+    return wav
+
+class Conv2d(nn.Module):
+    def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, use_act=True, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.conv_block = nn.Sequential(
+            nn.Conv2d(cin, cout, kernel_size, stride, padding),
+            nn.BatchNorm2d(cout)
+        )
+        self.act = nn.ReLU()
+        self.residual = residual
+        self.use_act = use_act
+
+    def forward(self, x):
+        out = self.conv_block(x)
+        if self.residual:
+            out += x
+
+        if self.use_act:
+            return self.act(out)
+        else:
+            return out
+
+class AudioEncoder(nn.Module):
+    def __init__(self, wav2lip_checkpoint, device):
+        super(AudioEncoder, self).__init__()
+
+        self.audio_encoder = nn.Sequential(
+            Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
+            Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
+            Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
+
+            Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1),
+            Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
+            Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
+
+            Conv2d(64, 128, kernel_size=3, stride=3, padding=1),
+            Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
+            Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
+
+            Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1),
+            Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),
+
+            Conv2d(256, 512, kernel_size=3, stride=1, padding=0),
+            Conv2d(512, 512, kernel_size=1, stride=1, padding=0),)
+
+        #### load the pre-trained audio_encoder
+        wav2lip_state_dict = torch.load(wav2lip_checkpoint, map_location=torch.device(device))['state_dict']
+        state_dict = self.audio_encoder.state_dict()
+
+        for k,v in wav2lip_state_dict.items():
+            if 'audio_encoder' in k:
+                state_dict[k.replace('module.audio_encoder.', '')] = v
+        self.audio_encoder.load_state_dict(state_dict)
+
+    def forward(self, audio_sequences):
+        B = audio_sequences.size(0)
+
+        audio_sequences = torch.cat([audio_sequences[:, i] for i in range(audio_sequences.size(1))], dim=0)
+
+        audio_embedding = self.audio_encoder(audio_sequences) # B, 512, 1, 1
+        dim = audio_embedding.shape[1]
+        audio_embedding = audio_embedding.reshape((B, -1, dim, 1, 1))
+
+        return audio_embedding.squeeze(-1).squeeze(-1) #B seq_len+1 512
+
+def partial_fields(target_class, kwargs):
+    return target_class(**{k: v for k, v in kwargs.items() if hasattr(target_class, k)})
+
+def dct2device(dct: dict, device):
+    for key in dct:
+        dct[key] = torch.tensor(dct[key]).to(device)
+    return dct
+
+def save_video_with_watermark(video, audio, save_path):
+    temp_file = str(uuid.uuid4())+'.mp4'
+    cmd = r'ffmpeg -y -i "%s" -i "%s" -vcodec copy "%s"' % (video, audio, temp_file)
+    os.system(cmd)
+    shutil.move(temp_file, save_path)
+
+class Inferencer(object):
+    def __init__(self):
+        st=time.time()
+        print('#'*25+'Start initialization'+'#'*25)
+        self.device = 'cuda'
+
+        from model import get_model
+        self.point_diffusion = get_model()
+        ckpt = torch.load('KDTalker.pth')
+
+        self.point_diffusion.load_state_dict(ckpt['model'])
+        self.point_diffusion.eval()
+        self.point_diffusion.to(self.device)
+
+        lm_croper_checkpoint = 'ckpts/shape_predictor_68_face_landmarks.dat'
+        self.croper = Croper(lm_croper_checkpoint)
+
+        self.norm_info = dict(np.load('dataset_process/norm.npz'))
+
+        wav2lip_checkpoint = 'ckpts/wav2lip.pth'
+        self.wav2lip_model = AudioEncoder(wav2lip_checkpoint, 'cuda')
+        self.wav2lip_model.cuda()
+        self.wav2lip_model.eval()
+
+        # set tyro theme
+        tyro.extras.set_accent_color("bright_cyan")
+        args = tyro.cli(ArgumentConfig)
+
+        # specify configs for inference
+        self.inf_cfg = partial_fields(InferenceConfig, args.__dict__)  # use attribute of args to initial InferenceConfig
+        self.crop_cfg = partial_fields(CropConfig, args.__dict__)  # use attribute of args to initial CropConfig
+
+        self.live_portrait_pipeline = LivePortraitPipeline(inference_cfg=self.inf_cfg, crop_cfg=self.crop_cfg)
+
+    def _norm(self, data_dict):
+        for k in data_dict.keys():
+            if k in ['yaw', 'pitch', 'roll', 't', 'exp', 'scale', 'kp', ]:
+                v=data_dict[k]
+                data_dict[k] = (v - self.norm_info[k+'_mean'])/self.norm_info[k+'_std']
+        return data_dict
+
+    def _denorm(self, data_dict):
+        for k in data_dict.keys():
+            if k in ['yaw', 'pitch', 'roll', 't', 'exp', 'scale', 'kp']:
+                v=data_dict[k]
+                data_dict[k] = v * self.norm_info[k+'_std'] + self.norm_info[k+'_mean']
+        return data_dict
+
+    def output_to_dict(self, data):
+        output = {}
+        output['scale'] = data[:, 0]
+        output['yaw'] = data[:, 1, None]
+        output['pitch'] = data[:, 2, None]
+        output['roll'] = data[:, 3, None]
+        output['t'] = data[:, 4:7]
+        output['exp'] = data[:, 7:]
+        return output
+
+    def extract_mel_from_audio(self, audio_file_path):
+        syncnet_mel_step_size = 16
+        fps = 25
+        wav = audio.load_wav(audio_file_path, 16000)
+        wav_length, num_frames = parse_audio_length(len(wav), 16000, 25)
+        wav = crop_pad_audio(wav, wav_length)
+        orig_mel = audio.melspectrogram(wav).T
+        spec = orig_mel.copy()
+        indiv_mels = []
+
+        for i in tqdm(range(num_frames), 'mel:'):
+            start_frame_num = i - 2
+            start_idx = int(80. * (start_frame_num / float(fps)))
+            end_idx = start_idx + syncnet_mel_step_size
+            seq = list(range(start_idx, end_idx))
+            seq = [min(max(item, 0), orig_mel.shape[0] - 1) for item in seq]
+            m = spec[seq, :]
+            indiv_mels.append(m.T)
+        indiv_mels = np.asarray(indiv_mels)  # T 80 16
+        return indiv_mels
+
+    def extract_wav2lip_from_audio(self, audio_file_path):
+        asd_mel = self.extract_mel_from_audio(audio_file_path)
+        asd_mel = torch.FloatTensor(asd_mel).cuda().unsqueeze(0).unsqueeze(2)
+        with torch.no_grad():
+            hidden = self.wav2lip_model(asd_mel)
+        return hidden[0].cpu().detach().numpy()
+
+    def headpose_pred_to_degree(self, pred):
+        device = pred.device
+        idx_tensor = [idx for idx in range(66)]
+        idx_tensor = torch.FloatTensor(idx_tensor).to(device)
+        pred = F.softmax(pred)
+        degree = torch.sum(pred * idx_tensor, 1) * 3 - 99
+        return degree
+
+    @torch.no_grad()
+    def generate_with_audio_img(self, image_path, audio_path, save_path):
+        image = np.array(Image.open(image_path).convert('RGB'))
+        cropped_image, crop, quad = self.croper.crop([image], still=False, xsize=512)
+        input_image = cv2.resize(cropped_image[0], (256, 256))
+
+        I_s = torch.FloatTensor(input_image.transpose((2, 0, 1))).unsqueeze(0).cuda() / 255
+
+        x_s_info = self.live_portrait_pipeline.live_portrait_wrapper.get_kp_info(I_s)
+        x_c_s = x_s_info['kp'].reshape(1, 21, -1)
+        R_s = get_rotation_matrix(x_s_info['pitch'], x_s_info['yaw'], x_s_info['roll'])
+        f_s = self.live_portrait_pipeline.live_portrait_wrapper.extract_feature_3d(I_s)
+        x_s = self.live_portrait_pipeline.live_portrait_wrapper.transform_keypoint(x_s_info)
+
+        ######## process driving info ########
+        kp_info = {}
+        for k in x_s_info.keys():
+            kp_info[k] = x_s_info[k].cpu().numpy()
+
+        kp_info = self._norm(kp_info)
+
+        ori_kp = torch.cat([torch.zeros([1, 7]), torch.Tensor(kp_info['kp'])], -1).cuda()
+
+        input_x = np.concatenate([kp_info[k] for k in ['scale', 'yaw', 'pitch', 'roll', 't', 'exp']], 1)
+        input_x = np.expand_dims(input_x, -1)
+        input_x = np.expand_dims(input_x, 0)
+        input_x = np.concatenate([input_x, input_x, input_x], -1)
+
+        aud_feat = self.extract_wav2lip_from_audio(audio_path)
+
+        sample_frame = 64
+        padding_size = (sample_frame - aud_feat.shape[0] % sample_frame) % sample_frame
+
+        if padding_size > 0:
+            aud_feat = np.concatenate((aud_feat, aud_feat[:padding_size, :]), axis=0)
+        else:
+            aud_feat = aud_feat
+
+        outputs = [input_x]
+
+        sample_frame = 64
+        for i in range(0, aud_feat.shape[0] - 1, sample_frame):
+            input_mel = torch.Tensor(aud_feat[i: i + sample_frame]).unsqueeze(0).cuda()
+            kp0 = torch.Tensor(outputs[-1])[:, -1].cuda()
+            pred_kp = self.point_diffusion.forward_sample(70, ref_kps=kp0, ori_kps=ori_kp, aud_feat=input_mel,
+                                                          scheduler='ddim', num_inference_steps=50)
+            outputs.append(pred_kp.cpu().numpy())
+
+        outputs = np.mean(np.concatenate(outputs, 1)[0, 1:aud_feat.shape[0] - padding_size + 1], -1)
+        output_dict = self.output_to_dict(outputs)
+        output_dict = self._denorm(output_dict)
+
+        num_frame = output_dict['yaw'].shape[0]
+        x_d_info = {}
+        for key in output_dict:
+            x_d_info[key] = torch.tensor(output_dict[key]).cuda()
+
+        # smooth
+        def smooth(sequence, n_dim_state=1):
+            kf = KalmanFilter(initial_state_mean=sequence[0],
+                              transition_covariance=0.05 * np.eye(n_dim_state),
+                              observation_covariance=0.001 * np.eye(n_dim_state))
+            state_means, _ = kf.smooth(sequence)
+            return state_means
+
+        yaw_data = x_d_info['yaw'].cpu().numpy()
+        pitch_data = x_d_info['pitch'].cpu().numpy()
+        roll_data = x_d_info['roll'].cpu().numpy()
+        t_data = x_d_info['t'].cpu().numpy()
+        exp_data = x_d_info['exp'].cpu().numpy()
+
+        smoothed_pitch = smooth(pitch_data, n_dim_state=1)
+        smoothed_yaw = smooth(yaw_data, n_dim_state=1)
+        smoothed_roll = smooth(roll_data, n_dim_state=1)
+        smoothed_t = smooth(t_data, n_dim_state=3)
+        smoothed_exp = smooth(exp_data, n_dim_state=63)
+
+        x_d_info['pitch'] = torch.Tensor(smoothed_pitch).cuda()
+        x_d_info['yaw'] = torch.Tensor(smoothed_yaw).cuda()
+        x_d_info['roll'] = torch.Tensor(smoothed_roll).cuda()
+        x_d_info['t'] = torch.Tensor(smoothed_t).cuda()
+        x_d_info['exp'] = torch.Tensor(smoothed_exp).cuda()
+
+        template_dct = {'motion': [], 'c_d_eyes_lst': [], 'c_d_lip_lst': []}
+        for i in track(range(num_frame), description='Making motion templates...', total=num_frame):
+            x_d_i_info = x_d_info
+            R_d_i = get_rotation_matrix(x_d_i_info['pitch'][i], x_d_i_info['yaw'][i], x_d_i_info['roll'][i])
+
+            item_dct = {
+                'scale': x_d_i_info['scale'][i].cpu().numpy().astype(np.float32),
+                'R_d': R_d_i.cpu().numpy().astype(np.float32),
+                'exp': x_d_i_info['exp'][i].reshape(1, 21, -1).cpu().numpy().astype(np.float32),
+                't': x_d_i_info['t'][i].cpu().numpy().astype(np.float32),
+            }
+
+            template_dct['motion'].append(item_dct)
+
+        I_p_lst = []
+        R_d_0, x_d_0_info = None, None
+
+        for i in track(range(num_frame), description='🚀Animating...', total=num_frame):
+            x_d_i_info = template_dct['motion'][i]
+            for key in x_d_i_info:
+                x_d_i_info[key] = torch.tensor(x_d_i_info[key]).cuda()
+            R_d_i = x_d_i_info['R_d']
+
+            if i == 0:
+                R_d_0 = R_d_i
+                x_d_0_info = x_d_i_info
+
+            if self.inf_cfg.flag_relative_motion:
+                R_new = (R_d_i @ R_d_0.permute(0, 2, 1)) @ R_s
+                delta_new = x_s_info['exp'].reshape(1, 21, -1) + (x_d_i_info['exp'] - x_d_0_info['exp'])
+                scale_new = x_s_info['scale'] * (x_d_i_info['scale'] / x_d_0_info['scale'])
+                t_new = x_s_info['t'] + (x_d_i_info['t'] - x_d_0_info['t'])
+            else:
+                R_new = R_d_i
+                delta_new = x_d_i_info['exp']
+                scale_new = x_s_info['scale']
+                t_new = x_d_i_info['t']
+
+            t_new[..., 2].fill_(0)
+            x_d_i_new = scale_new * (x_c_s @ R_new + delta_new) + t_new
+
+            out = self.live_portrait_pipeline.live_portrait_wrapper.warp_decode(f_s, x_s, x_d_i_new)
+            I_p_i = self.live_portrait_pipeline.live_portrait_wrapper.parse_output(out['out'])[0]
+            I_p_lst.append(I_p_i)
+
+        video_name = save_path.split('/')[-1]
+        video_save_dir = os.path.dirname(save_path)
+        path = os.path.join(video_save_dir, 'temp_' + video_name)
+
+        imageio.mimsave(path, I_p_lst, fps=float(25))
+
+        audio_name = audio_path.split('/')[-1]
+        new_audio_path = os.path.join(video_save_dir, audio_name)
+        start_time = 0
+        sound = AudioSegment.from_file(audio_path)
+        end_time = start_time + num_frame * 1 / 25 * 1000
+        word1 = sound.set_frame_rate(16000)
+        word = word1[start_time:end_time]
+        word.export(new_audio_path, format="wav")
+
+        save_video_with_watermark(path, new_audio_path, save_path, watermark=False)
+        print(f'The generated video is named {video_save_dir}/{video_name}')
+
+        os.remove(path)
+        os.remove(new_audio_path)
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-source_image", type=str, default="example/source_image/WDA_BenCardin1_000.png",
+                        help="source image")
+    parser.add_argument("-driven_audio", type=str, default="example/driven_audio/WDA_BenCardin1_000.wav",
+                        help="driving audio")
+    parser.add_argument("-output", type=str, default="results/output.mp4", help="output video file name", )
+
+    args = parser.parse_args()
+
+    Infer = Inferencer()
+    Infer.generate_with_audio_img(args.source_image, args.driven_audio, args.output)
\ No newline at end of file
diff --git a/model/__init__.py b/model/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1775c3f81c2aa1c64e3664b4f05e19380c89975
--- /dev/null
+++ b/model/__init__.py
@@ -0,0 +1,6 @@
+from .model import ConditionalPointCloudDiffusionModel
+
+def get_model():
+    model = ConditionalPointCloudDiffusionModel()
+    return model
+
diff --git a/model/model.py b/model/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..9be3352de0aada9056102dee8cba7901faf398f1
--- /dev/null
+++ b/model/model.py
@@ -0,0 +1,230 @@
+import inspect
+from typing import Optional
+from einops import rearrange
+import torch
+import torch.nn.functional as F
+from diffusers.schedulers.scheduling_ddpm import DDPMScheduler
+from diffusers.schedulers.scheduling_ddim import DDIMScheduler
+from diffusers.schedulers.scheduling_pndm import PNDMScheduler
+
+from torch import Tensor
+from tqdm import tqdm
+from diffusers import ModelMixin
+from .model_utils import get_custom_betas
+from .point_model import PointModel
+import copy
+import torch.nn as nn
+
+class TemporalSmoothnessLoss(nn.Module):
+    def __init__(self):
+        super(TemporalSmoothnessLoss, self).__init__()
+
+    def forward(self, input):
+        # Calculate the difference between consecutive frames
+        diff = input[:, 1:, :] - input[:, :-1, :]
+
+        # Compute the L2 norm (squared) of the differences
+        smoothness_loss = torch.mean(torch.sum(diff ** 2, dim=2))
+
+        return smoothness_loss
+
+class ConditionalPointCloudDiffusionModel(ModelMixin):
+    def __init__(
+        self,
+        beta_start: float = 1e-5,
+        beta_end: float = 8e-3,
+        beta_schedule: str = 'linear',
+        point_cloud_model: str = 'simple',
+        point_cloud_model_embed_dim: int = 64,
+    ):
+        super().__init__()
+        self.in_channels = 70  # 3 for 3D point positions
+        self.out_channels = 70
+
+        # Checks
+        # Create diffusion model schedulers which define the sampling timesteps
+        scheduler_kwargs = {}
+        if beta_schedule == 'custom':
+            scheduler_kwargs.update(dict(trained_betas=get_custom_betas(beta_start=beta_start, beta_end=beta_end)))
+        else:
+            scheduler_kwargs.update(dict(beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule))
+        self.schedulers_map = {
+            'ddpm': DDPMScheduler(**scheduler_kwargs, clip_sample=False),
+            'ddim': DDIMScheduler(**scheduler_kwargs, clip_sample=False), 
+            'pndm': PNDMScheduler(**scheduler_kwargs), 
+        }
+        self.scheduler = self.schedulers_map['ddim']  # this can be changed for inference
+
+        # Create point cloud model for processing point cloud at each diffusion step
+        self.point_model = PointModel(
+            model_type=point_cloud_model,
+            embed_dim=point_cloud_model_embed_dim,
+            in_channels=self.in_channels,
+            out_channels=self.out_channels,
+        )
+
+    def forward_train(
+        self,
+        pc: Optional[Tensor],
+        ref_kps: Optional[Tensor],
+        ori_kps: Optional[Tensor],
+        aud_feat: Optional[Tensor],
+        mode: str = 'train',
+        return_intermediate_steps: bool = False
+    ):
+
+        # Normalize colors and convert to tensor
+        x_0 = pc
+        B, Nf, Np, D = x_0.shape# batch, nums of frames, nums of points, 3
+
+
+        x_0=x_0[:,:,:,0]# batch, nums of frames, 70
+
+        # Sample random noise
+        noise = torch.randn_like(x_0)
+
+        # Sample random timesteps for each point_cloud
+        timestep = torch.randint(0, self.scheduler.num_train_timesteps, (B,),
+            device=self.device, dtype=torch.long)
+
+        # Add noise to points
+        x_t = self.scheduler.add_noise(x_0, noise, timestep)
+
+        # Conditioning
+        ref_kps = ref_kps[:, :, 0]
+
+        x_t_input = torch.cat([ori_kps.unsqueeze(1), ref_kps.unsqueeze(1), x_t], dim=1)
+
+        aud_feat = torch.cat([torch.zeros(B, 2, 512).cuda(), aud_feat], 1)
+
+        # Augmentation for audio feature
+        if mode in 'train':
+            if torch.rand(1) > 0.3:
+                mean = torch.mean(aud_feat)
+                std = torch.std(aud_feat)
+                sample = torch.normal(mean=torch.full(aud_feat.shape, mean), std=torch.full(aud_feat.shape, std)).cuda()
+                aud_feat = sample + aud_feat
+            else:
+                pass
+        else:
+            pass
+
+        # Forward
+        noise_pred = self.point_model(x_t_input, timestep, context=aud_feat)    #torch.cat([mel_feat,style_embed],-1))
+        noise_pred = noise_pred[:, 2:]
+
+        # Check
+        if not noise_pred.shape == noise.shape:
+            raise ValueError(f'{noise_pred.shape=} and {noise.shape=}')
+
+        loss = F.mse_loss(noise_pred, noise)
+
+        loss_pose = F.mse_loss(noise_pred[:, :, 1:7], noise[:, :, 1:7])
+        loss_exp = F.mse_loss(noise_pred[:, :, 7:], noise[:, :, 7:])
+
+
+        # Whether to return intermediate steps
+        if return_intermediate_steps:
+            return loss, (x_0, x_t, noise, noise_pred)
+
+        return loss, loss_exp, loss_pose
+
+    @torch.no_grad()
+    def forward_sample(
+        self,
+        num_points: int,
+        ref_kps: Optional[Tensor],
+        ori_kps: Optional[Tensor],
+        aud_feat: Optional[Tensor],
+        # Optional overrides
+        scheduler: Optional[str] = 'ddpm',
+        # Inference parameters
+        num_inference_steps: Optional[int] = 50,
+        eta: Optional[float] = 0.0,  # for DDIM
+        # Whether to return all the intermediate steps in generation
+        return_sample_every_n_steps: int = -1,
+        # Whether to disable tqdm
+        disable_tqdm: bool = False,
+    ):
+
+        # Get scheduler from mapping, or use self.scheduler if None
+        scheduler = self.scheduler if scheduler is None else self.schedulers_map[scheduler]
+
+        # Get the size of the noise
+        Np = num_points
+        Nf = aud_feat.size(1)
+        B = 1
+        D = 3
+        device = self.device
+
+        # Sample noise
+        x_t = torch.randn(B, Nf, Np, D, device=device)
+
+        x_t = x_t[:, :, :, 0]
+
+        ref_kps = ref_kps[:,:,0]
+
+        # Set timesteps
+        accepts_offset = "offset" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+        extra_set_kwargs = {"offset": 1} if accepts_offset else {}
+        scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
+
+        accepts_eta = "eta" in set(inspect.signature(scheduler.step).parameters.keys())
+        extra_step_kwargs = {"eta": eta} if accepts_eta else {}
+
+        # Loop over timesteps
+        all_outputs = []
+        return_all_outputs = (return_sample_every_n_steps > 0)
+        progress_bar = tqdm(scheduler.timesteps.to(device), desc=f'Sampling ({x_t.shape})', disable=disable_tqdm)
+
+        aud_feat = torch.cat([torch.zeros(B, 2, 512).cuda(), aud_feat], 1)
+
+        for i, t in enumerate(progress_bar):
+            x_t_input = torch.cat([ori_kps.unsqueeze(1).detach(),ref_kps.unsqueeze(1).detach(), x_t], dim=1)
+
+            # Forward
+            noise_pred = self.point_model(x_t_input, t.reshape(1).expand(B), context=aud_feat)[:, 2:]
+
+            # Step
+            x_t = scheduler.step(noise_pred, t, x_t, **extra_step_kwargs).prev_sample
+
+            # Append to output list if desired
+            if (return_all_outputs and (i % return_sample_every_n_steps == 0 or i == len(scheduler.timesteps) - 1)):
+                all_outputs.append(x_t)
+
+        # Convert output back into a point cloud, undoing normalization and scaling
+        output = x_t
+        output = torch.stack([output,output,output],-1)
+        if return_all_outputs:
+            all_outputs = torch.stack(all_outputs, dim=1)  # (B, sample_steps, N, D)
+        return (output, all_outputs) if return_all_outputs else output
+
+    def forward(self, batch: dict, mode: str = 'train', **kwargs):
+        """A wrapper around the forward method for training and inference"""
+
+        if mode == 'train':
+            return self.forward_train(
+                pc=batch['sequence_keypoints'],
+                ref_kps=batch['ref_keypoint'],
+                ori_kps=batch['ori_keypoint'],
+                aud_feat=batch['aud_feat'],
+                mode='train',
+                **kwargs)
+        elif mode == 'val':
+            return self.forward_train(
+                pc=batch['sequence_keypoints'],
+                ref_kps=batch['ref_keypoint'],
+                ori_kps=batch['ori_keypoint'],
+                aud_feat=batch['aud_feat'],
+                mode='val',
+                **kwargs)
+        elif mode == 'sample':
+            num_points = 70
+            return self.forward_sample(
+                num_points=num_points,
+                ref_kps=batch['ref_keypoint'],
+                ori_kps=batch['ori_keypoint'],
+                aud_feat=batch['aud_feat'],
+                **kwargs) 
+        else:
+            raise NotImplementedError()
\ No newline at end of file
diff --git a/model/model_utils.py b/model/model_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e1e0c8ac665bc49f91c62ca2004243de6871a70
--- /dev/null
+++ b/model/model_utils.py
@@ -0,0 +1,33 @@
+import cv2
+import numpy as np
+import torch
+import torch.nn as nn
+
+def set_requires_grad(module: nn.Module, requires_grad: bool):
+    for p in module.parameters():
+        p.requires_grad_(requires_grad)
+
+
+def compute_distance_transform(mask: torch.Tensor):
+    image_size = mask.shape[-1]
+    distance_transform = torch.stack([
+        torch.from_numpy(cv2.distanceTransform(
+            (1 - m), distanceType=cv2.DIST_L2, maskSize=cv2.DIST_MASK_3
+        ) / (image_size / 2))
+        for m in mask.squeeze(1).detach().cpu().numpy().astype(np.uint8)
+    ]).unsqueeze(1).clip(0, 1).to(mask.device)
+    return distance_transform
+
+
+def default(x, d):
+    return d if x is None else x
+
+def get_custom_betas(beta_start: float, beta_end: float, warmup_frac: float = 0.3, num_train_timesteps: int = 1000):
+    """Custom beta schedule"""
+    betas = np.linspace(beta_start, beta_end, num_train_timesteps, dtype=np.float32)
+    warmup_frac = 0.3
+    warmup_time = int(num_train_timesteps * warmup_frac)
+    warmup_steps = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)
+    warmup_time = min(warmup_time, num_train_timesteps)
+    betas[:warmup_time] = warmup_steps[:warmup_time]
+    return betas
diff --git a/model/point_model.py b/model/point_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..36526f9ca52e4add43fae6ae1b59e3340501ae23
--- /dev/null
+++ b/model/point_model.py
@@ -0,0 +1,38 @@
+import torch
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers import ModelMixin
+from torch import Tensor
+
+from .temporaltrans.temptrans import SimpleTransModel
+
+class PointModel(ModelMixin, ConfigMixin):
+    @register_to_config
+    def __init__(
+        self,
+        model_type: str = 'pvcnn',
+        in_channels: int = 3,
+        out_channels: int = 3,
+        embed_dim: int = 64,
+        dropout: float = 0.1,
+        width_multiplier: int = 1,
+        voxel_resolution_multiplier: int = 1,
+    ):
+        super().__init__()
+        self.model_type = model_type
+        if self.model_type == 'simple':
+            self.autocast_context = torch.autocast('cuda', dtype=torch.float32)
+            self.model = SimpleTransModel(
+                embed_dim=embed_dim,
+                num_classes=out_channels,
+                extra_feature_channels=(in_channels - 3),
+            )
+            self.model.output_projection.bias.data.normal_(0, 1e-6)
+            self.model.output_projection.weight.data.normal_(0, 1e-6)
+        else:
+            raise NotImplementedError()
+
+    def forward(self, inputs: Tensor, t: Tensor, context=None) -> Tensor:
+        """ Receives input of shape (B, N, in_channels) and returns output
+            of shape (B, N, out_channels) """
+        with self.autocast_context:
+            return self.model(inputs, t, context)
diff --git a/model/temporaltrans/temptrans.py b/model/temporaltrans/temptrans.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ab5798dadfe12b7c40d14ea40f8d881d1728beb
--- /dev/null
+++ b/model/temporaltrans/temptrans.py
@@ -0,0 +1,267 @@
+import torch
+import torch.nn.functional as F
+from torch import nn
+from einops import rearrange
+from .transformer_utils import BaseTemperalPointModel
+import math
+from einops_exts import check_shape, rearrange_many
+from functools import partial
+from rotary_embedding_torch import RotaryEmbedding
+
+def exists(x):
+    return x is not None
+
+class SinusoidalPosEmb(nn.Module):
+    def __init__(self, dim):
+        super().__init__()
+        self.dim = dim
+
+    def forward(self, x):
+        device = x.device
+        half_dim = self.dim // 2
+        emb = math.log(10000) / (half_dim - 1)
+        emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
+        emb = x[:, None] * emb[None, :]
+        emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
+        return emb
+
+
+class RelativePositionBias(nn.Module):
+    def __init__(
+        self,
+        heads = 8,
+        num_buckets = 32,
+        max_distance = 128
+    ):
+        super().__init__()
+        self.num_buckets = num_buckets
+        self.max_distance = max_distance
+        self.relative_attention_bias = nn.Embedding(num_buckets, heads)
+
+    @staticmethod
+    def _relative_position_bucket(relative_position, num_buckets = 32, max_distance = 128):
+        ret = 0
+        n = -relative_position
+
+        num_buckets //= 2
+        ret += (n < 0).long() * num_buckets
+        n = torch.abs(n)
+
+        max_exact = num_buckets // 2
+        is_small = n < max_exact
+
+        val_if_large = max_exact + (
+            torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
+        ).long()
+        val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
+
+        ret += torch.where(is_small, n, val_if_large)
+        return ret
+
+    def forward(self, n, device):
+        q_pos = torch.arange(n, dtype = torch.long, device = device)
+        k_pos = torch.arange(n, dtype = torch.long, device = device)
+        rel_pos = rearrange(k_pos, 'j -> 1 j') - rearrange(q_pos, 'i -> i 1')
+        rp_bucket = self._relative_position_bucket(rel_pos, num_buckets = self.num_buckets, max_distance = self.max_distance)
+        values = self.relative_attention_bias(rp_bucket)
+        return rearrange(values, 'i j h -> h i j')
+
+
+class Residual(nn.Module):
+    def __init__(self, fn):
+        super().__init__()
+        self.fn = fn
+
+    def forward(self, x, *args, **kwargs):
+        return self.fn(x, *args, **kwargs) + x
+
+
+class LayerNorm(nn.Module):
+    def __init__(self, dim, eps = 1e-5):
+        super().__init__()
+        self.eps = eps
+        self.gamma = nn.Parameter(torch.ones(1, 1, dim))
+        self.beta = nn.Parameter(torch.zeros(1, 1, dim))
+
+    def forward(self, x):
+        var = torch.var(x, dim = -1, unbiased = False, keepdim = True)
+        mean = torch.mean(x, dim = -1, keepdim = True)
+        return (x - mean) / (var + self.eps).sqrt() * self.gamma + self.beta
+
+
+class PreNorm(nn.Module):
+    def __init__(self, dim, fn):
+        super().__init__()
+        self.fn = fn
+        self.norm = LayerNorm(dim)
+
+    def forward(self, x, **kwargs):
+        x = self.norm(x)
+        return self.fn(x, **kwargs)
+
+
+class EinopsToAndFrom(nn.Module):
+    def __init__(self, from_einops, to_einops, fn):
+        super().__init__()
+        self.from_einops = from_einops
+        self.to_einops = to_einops
+        self.fn = fn
+
+    def forward(self, x, **kwargs):
+        shape = x.shape
+        reconstitute_kwargs = dict(tuple(zip(self.from_einops.split(' '), shape)))
+        x = rearrange(x, f'{self.from_einops} -> {self.to_einops}')
+        x = self.fn(x, **kwargs)
+        x = rearrange(x, f'{self.to_einops} -> {self.from_einops}', **reconstitute_kwargs)
+        return x
+
+
+class Attention(nn.Module):
+    def __init__(
+            self, dim, heads=4, attn_head_dim=None, casual_attn=False,rotary_emb = None):
+        super().__init__()
+        self.num_heads = heads
+        head_dim = dim // heads
+        self.casual_attn = casual_attn
+
+        if attn_head_dim is not None:
+            head_dim = attn_head_dim
+
+        all_head_dim = head_dim * self.num_heads
+        self.scale = head_dim ** -0.5
+        self.to_qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
+        self.proj = nn.Linear(all_head_dim, dim)
+        self.rotary_emb = rotary_emb
+
+    def forward(self, x, pos_bias = None):
+        N, device = x.shape[-2], x.device
+        qkv = self.to_qkv(x).chunk(3, dim = -1)
+
+        q, k, v = rearrange_many(qkv, '... n (h d) -> ... h n d', h=self.num_heads)
+
+        q = q * self.scale
+
+        if exists(self.rotary_emb):
+            q = self.rotary_emb.rotate_queries_or_keys(q)
+            k = self.rotary_emb.rotate_queries_or_keys(k)
+
+        sim = torch.einsum('... h i d, ... h j d -> ... h i j', q, k)
+
+        if exists(pos_bias):
+            sim = sim + pos_bias
+
+        if self.casual_attn:
+            mask = torch.tril(torch.ones(sim.size(-1), sim.size(-2))).to(device)
+            sim = sim.masked_fill(mask[..., :, :] == 0, float('-inf'))
+
+        attn = sim.softmax(dim = -1)
+        x = torch.einsum('... h i j, ... h j d -> ... h i d', attn, v)
+        x = rearrange(x, '... h n d -> ... n (h d)')
+        x = self.proj(x)
+        return x
+
+
+class Block(nn.Module):
+    def __init__(self, dim, dim_out):
+        super().__init__()
+        self.proj = nn.Linear(dim, dim_out)
+        self.norm = LayerNorm(dim)
+        self.act = nn.SiLU()
+
+    def forward(self, x, scale_shift=None):
+        x = self.proj(x)
+
+        if exists(scale_shift):
+            x = self.norm(x)
+            scale, shift = scale_shift
+            x = x * (scale + 1) + shift
+        return self.act(x)
+
+
+class ResnetBlock(nn.Module):
+    def __init__(self, dim, dim_out, cond_dim=None):
+        super().__init__()
+        self.mlp = nn.Sequential(
+            nn.SiLU(),
+            nn.Linear(cond_dim, dim_out * 2)
+        ) if exists(cond_dim) else None
+
+        self.block1 = Block(dim, dim_out)
+        self.block2 = Block(dim_out, dim_out)
+
+    def forward(self, x, cond_emb=None):
+        scale_shift = None
+        if exists(self.mlp):
+            assert exists(cond_emb), 'time emb must be passed in'
+            cond_emb = self.mlp(cond_emb)
+            #cond_emb = rearrange(cond_emb, 'b f c -> b f 1 c')
+            scale_shift = cond_emb.chunk(2, dim=-1)
+
+        h = self.block1(x, scale_shift=scale_shift)
+        h = self.block2(h)
+        return h + x
+
+class SimpleTransModel(BaseTemperalPointModel):
+    """
+    A simple model that processes a point cloud by applying a series of MLPs to each point
+    individually, along with some pooled global features.
+    """
+
+    def get_layers(self):
+        self.input_projection = nn.Linear(
+            in_features=70,
+            out_features=self.dim
+        )
+
+        cond_dim = 512 + self.timestep_embed_dim
+
+        num_head = self.dim//64
+
+        rotary_emb = RotaryEmbedding(min(32, num_head))
+
+        self.time_rel_pos_bias = RelativePositionBias(heads=num_head, max_distance=128)  # realistically will not be able to generate that many frames of video... yet
+
+        temporal_casual_attn = lambda dim: Attention(dim, heads=num_head, casual_attn=False,rotary_emb=rotary_emb)
+
+        cond_block = partial(ResnetBlock, cond_dim=cond_dim)
+
+        layers = nn.ModuleList([])
+
+        for _ in range(self.num_layers):
+            layers.append(nn.ModuleList([
+                cond_block(self.dim, self.dim),
+                cond_block(self.dim, self.dim),
+                Residual(PreNorm(self.dim, temporal_casual_attn(self.dim)))
+            ]))
+
+        return layers
+
+    def forward(self, inputs: torch.Tensor, timesteps: torch.Tensor, context=None):
+        """
+         Apply the model to an input batch.
+         :param x: an [N x C x ...] Tensor of inputs.
+         :param timesteps: a 1-D batch of timesteps.
+         :param context: conditioning plugged in via crossattn
+         """
+        # Prepare inputs
+
+        batch, num_frames, channels = inputs.size()
+
+        device = inputs.device
+        x = self.input_projection(inputs)
+
+        t_emb = self.time_mlp(timesteps) if exists(self.time_mlp) else None
+        t_emb = t_emb[:,None,:].expand(-1, num_frames, -1)  # b f c
+        if context is not None:
+            t_emb = torch.cat([t_emb, context],-1)
+
+        time_rel_pos_bias = self.time_rel_pos_bias(num_frames, device=device)
+
+        for block1, block2,  temporal_attn in self.layers:
+            x = block1(x, t_emb)
+            x = block2(x, t_emb)
+            x = temporal_attn(x, pos_bias=time_rel_pos_bias)
+
+        # Project
+        x = self.output_projection(x)
+        return x
\ No newline at end of file
diff --git a/model/temporaltrans/transformer_utils.py b/model/temporaltrans/transformer_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..97af01dd96288c3e8382255eec11d9474cd325d9
--- /dev/null
+++ b/model/temporaltrans/transformer_utils.py
@@ -0,0 +1,147 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch import nn
+from einops import rearrange
+import math
+from einops_exts import check_shape, rearrange_many
+from torch import Size, Tensor, nn
+
+class SinusoidalPosEmb(nn.Module):
+    def __init__(self, dim):
+        super().__init__()
+        self.dim = dim
+
+    def forward(self, x):
+        device = x.device
+        half_dim = self.dim // 2
+        emb = math.log(10000) / (half_dim - 1)
+        emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
+        emb = x[:, None] * emb[None, :]
+        emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
+        return emb
+
+
+def map_positional_encoding(v: Tensor, freq_bands: Tensor) -> Tensor:
+    """Map v to positional encoding representation phi(v)
+
+    Arguments:
+        v (Tensor): input features (B, IFeatures)
+        freq_bands (Tensor): frequency bands (N_freqs, )
+
+    Returns:
+        phi(v) (Tensor): fourrier features (B, 3 + (2 * N_freqs) * 3)
+    """
+    pe = [v]
+    for freq in freq_bands:
+        fv = freq * v
+        pe += [torch.sin(fv), torch.cos(fv)]
+    return torch.cat(pe, dim=-1)
+
+class FeatureMapping(nn.Module):
+    """FeatureMapping nn.Module
+
+    Maps v to features following transformation phi(v)
+
+    Arguments:
+        i_dim (int): input dimensions
+        o_dim (int): output dimensions
+    """
+
+    def __init__(self, i_dim: int, o_dim: int) -> None:
+        super().__init__()
+        self.i_dim = i_dim
+        self.o_dim = o_dim
+
+    def forward(self, v: Tensor) -> Tensor:
+        """FeratureMapping forward pass
+
+        Arguments:
+            v (Tensor): input features (B, IFeatures)
+
+        Returns:
+            phi(v) (Tensor): mapped features (B, OFeatures)
+        """
+        raise NotImplementedError("Forward pass not implemented yet!")
+
+class PositionalEncoding(FeatureMapping):
+    """PositionalEncoding module
+
+    Maps v to positional encoding representation phi(v)
+
+    Arguments:
+        i_dim (int): input dimension for v
+        N_freqs (int): #frequency to sample (default: 10)
+    """
+
+    def __init__(
+        self,
+        i_dim: int,
+        N_freqs: int = 10,
+    ) -> None:
+        super().__init__(i_dim, 3 + (2 * N_freqs) * 3)
+        self.N_freqs = N_freqs
+
+        a, b = 1, self.N_freqs - 1
+        freq_bands = 2 ** torch.linspace(a, b, self.N_freqs)
+        self.register_buffer("freq_bands", freq_bands)
+
+    def forward(self, v: Tensor) -> Tensor:
+        """Map v to positional encoding representation phi(v)
+
+        Arguments:
+            v (Tensor): input features (B, IFeatures)
+
+        Returns:
+            phi(v) (Tensor): fourrier features (B, 3 + (2 * N_freqs) * 3)
+        """
+        return map_positional_encoding(v, self.freq_bands)
+
+class BaseTemperalPointModel(nn.Module):
+    """ A base class providing useful methods for point cloud processing. """
+
+    def __init__(
+        self,
+        *,
+        num_classes,
+        embed_dim,
+        extra_feature_channels,
+        dim: int = 768,
+        num_layers: int = 6
+    ):
+        super().__init__()
+
+        self.extra_feature_channels = extra_feature_channels
+        self.timestep_embed_dim = 256
+        self.output_dim = num_classes
+        self.dim = dim
+        self.num_layers = num_layers
+
+
+        self.time_mlp = nn.Sequential(
+            SinusoidalPosEmb(dim),
+            nn.Linear(dim, self.timestep_embed_dim ),
+            nn.SiLU(),
+            nn.Linear(self.timestep_embed_dim , self.timestep_embed_dim )
+        )
+
+        self.positional_encoding = PositionalEncoding(i_dim=3, N_freqs=10)
+        positional_encoding_d_out = 3 + (2 * 10) * 3
+
+        # Input projection (point coords, point coord encodings, other features, and timestep embeddings)
+
+        self.input_projection = nn.Linear(
+            in_features=(3 + positional_encoding_d_out),
+            out_features=self.dim
+        )#b f p c
+
+        # Transformer layers
+        self.layers = self.get_layers()
+
+        # Output projection
+        self.output_projection = nn.Linear(self.dim, self.output_dim)
+    def get_layers(self):
+        raise NotImplementedError('This method should be implemented by subclasses')
+
+    def forward(self, inputs: torch.Tensor, t: torch.Tensor):
+        raise NotImplementedError('This method should be implemented by subclasses')
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f6a696fb823660a120ff58c9980dc0f6acb4cfad
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,44 @@
+onnxruntime-gpu==1.18.0
+transformers==4.33.0
+pyyaml==6.0.1
+scipy==1.10.0
+imageio==2.34.2
+lmdb==1.4.1
+tqdm==4.64.1
+rich==13.7.1
+ffmpeg-python==0.2.0
+protobuf==3.20.2
+onnx==1.16.1
+scikit-image==0.24.0
+scikit-learn==1.3.2
+albumentations==1.4.10
+matplotlib==3.7.0
+imageio-ffmpeg==0.5.1
+tyro==0.8.5
+pykalman==0.9.7
+pillow>=10.2.0
+pytorch_fid
+cpbd
+
+wandb==0.17.5
+accelerate==0.23.0
+basicsr==1.4.2
+diffusers==0.10.2
+einops==0.6.0
+einops_exts==0.0.4
+hydra-core==1.3.2
+librosa==0.10.0.post2
+lws==1.2.7
+moviepy==1.0.3
+omegaconf==2.3.0
+opencv_python_headless>=4.9.0.80
+pydub==0.25.1
+PyYAML==6.0.1
+realesrgan==0.3.0
+rotary_embedding_torch==0.3.0
+timm==0.4.12
+torch_ema==0.3
+warmup_scheduler==0.3
+yacs==0.1.8
+numpy==1.24.4
+dlib==19.24.99
\ No newline at end of file
diff --git a/src/config/__init__.py b/src/config/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/config/argument_config.py b/src/config/argument_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..bacf15abe59b0899f3c4c25e4fcf9ae9b2a7f427
--- /dev/null
+++ b/src/config/argument_config.py
@@ -0,0 +1,48 @@
+# coding: utf-8
+
+"""
+All configs for user
+"""
+
+from dataclasses import dataclass
+import tyro
+from typing_extensions import Annotated
+from typing import Optional
+from .base_config import PrintableConfig, make_abs_path
+
+
+@dataclass(repr=False)  # use repr from PrintableConfig
+class ArgumentConfig(PrintableConfig):
+    ########## input arguments ##########
+    source_image: Annotated[str, tyro.conf.arg(aliases=["-s"])] = make_abs_path('../../assets/examples/source/s6.jpg')  # path to the source portrait
+    driving_info:  Annotated[str, tyro.conf.arg(aliases=["-d"])] = make_abs_path('../../assets/examples/driving/d12.mp4')  # path to driving video or template (.pkl format)
+    output_dir: Annotated[str, tyro.conf.arg(aliases=["-o"])] = 'animations/'  # directory to save output video
+
+    ########## inference arguments ##########
+    flag_use_half_precision: bool = False  # whether to use half precision (FP16). If black boxes appear, it might be due to GPU incompatibility; set to False.
+    flag_crop_driving_video: bool = False  # whether to crop the driving video, if the given driving info is a video
+    device_id: int = 0 # gpu device id
+    flag_force_cpu: bool = False # force cpu inference, WIP!
+    flag_lip_zero: bool = False # whether let the lip to close state before animation, only take effect when flag_eye_retargeting and flag_lip_retargeting is False
+    flag_eye_retargeting: bool = False # not recommend to be True, WIP
+    flag_lip_retargeting: bool = False # not recommend to be True, WIP
+    flag_stitching: bool = False  # recommend to True if head movement is small, False if head movement is large
+    flag_relative_motion: bool = False  # whether to use relative motion
+    flag_pasteback: bool = False  # whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space
+    flag_do_crop: bool = False  # whether to crop the source portrait to the face-cropping space
+    flag_do_rot: bool = False  # whether to conduct the rotation when flag_do_crop is True
+
+    ########## crop arguments ##########
+    scale: float = 2.3 # the ratio of face area is smaller if scale is larger
+    vx_ratio: float = 0  # the ratio to move the face to left or right in cropping space
+    vy_ratio: float = -0.125  # the ratio to move the face to up or down in cropping space
+
+    scale_crop_video: float = 2.2 # scale factor for cropping video
+    vx_ratio_crop_video: float = 0. # adjust y offset
+    vy_ratio_crop_video: float = -0.1  # adjust x offset
+
+    ########## gradio arguments ##########
+    server_port: Annotated[int, tyro.conf.arg(aliases=["-p"])]  = 8890 # port for gradio server
+    share: bool = False # whether to share the server to public
+    server_name: Optional[str] = "127.0.0.1"  # set the local server name, "0.0.0.0" to broadcast all
+    flag_do_torch_compile: bool = False  # whether to use torch.compile to accelerate generation
diff --git a/src/config/base_config.py b/src/config/base_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..216b8be50aecc8af4b9d1d2a9401e034dd7769e4
--- /dev/null
+++ b/src/config/base_config.py
@@ -0,0 +1,29 @@
+# coding: utf-8
+
+"""
+pretty printing class
+"""
+
+from __future__ import annotations
+import os.path as osp
+from typing import Tuple
+
+
+def make_abs_path(fn):
+    return osp.join(osp.dirname(osp.realpath(__file__)), fn)
+
+
+class PrintableConfig:  # pylint: disable=too-few-public-methods
+    """Printable Config defining str function"""
+
+    def __repr__(self):
+        lines = [self.__class__.__name__ + ":"]
+        for key, val in vars(self).items():
+            if isinstance(val, Tuple):
+                flattened_val = "["
+                for item in val:
+                    flattened_val += str(item) + "\n"
+                flattened_val = flattened_val.rstrip("\n")
+                val = flattened_val + "]"
+            lines += f"{key}: {str(val)}".split("\n")
+        return "\n    ".join(lines)
diff --git a/src/config/crop_config.py b/src/config/crop_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6d6370dd5572e5ebb5f607540baac08c26997bb
--- /dev/null
+++ b/src/config/crop_config.py
@@ -0,0 +1,29 @@
+# coding: utf-8
+
+"""
+parameters used for crop faces
+"""
+
+from dataclasses import dataclass
+
+from .base_config import PrintableConfig
+
+
+@dataclass(repr=False)  # use repr from PrintableConfig
+class CropConfig(PrintableConfig):
+    insightface_root: str = "../../pretrained_weights/insightface"
+    landmark_ckpt_path: str = "../../pretrained_weights/liveportrait/landmark.onnx"
+    device_id: int = 0  # gpu device id
+    flag_force_cpu: bool = False  # force cpu inference, WIP
+    ########## source image cropping option ##########
+    dsize: int = 512  # crop size
+    scale: float = 2.0  # scale factor
+    vx_ratio: float = 0  # vx ratio
+    vy_ratio: float = -0.125  # vy ratio +up, -down
+    max_face_num: int = 0  # max face number, 0 mean no limit
+
+    ########## driving video auto cropping option ##########
+    scale_crop_video: float = 2.2  # 2.0 # scale factor for cropping video
+    vx_ratio_crop_video: float = 0.0  # adjust y offset
+    vy_ratio_crop_video: float = -0.1  # adjust x offset
+    direction: str = "large-small"  # direction of cropping
diff --git a/src/config/inference_config.py b/src/config/inference_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..b14d7cba6205ba4a868d834adc21e9786a17689b
--- /dev/null
+++ b/src/config/inference_config.py
@@ -0,0 +1,52 @@
+# coding: utf-8
+
+"""
+config dataclass used for inference
+"""
+
+import os.path as osp
+import cv2
+from numpy import ndarray
+from dataclasses import dataclass
+from typing import Literal, Tuple
+from .base_config import PrintableConfig, make_abs_path
+
+
+@dataclass(repr=False)  # use repr from PrintableConfig
+class InferenceConfig(PrintableConfig):
+    # MODEL CONFIG, NOT EXPORTED PARAMS
+    models_config: str = make_abs_path('./models.yaml')  # portrait animation config
+    checkpoint_F: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/appearance_feature_extractor.pth')  # path to checkpoint of F
+    checkpoint_M: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/motion_extractor.pth')  # path to checkpoint pf M
+    checkpoint_G: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/spade_generator.pth')  # path to checkpoint of G
+    checkpoint_W: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/warping_module.pth')  # path to checkpoint of W
+    checkpoint_S: str = make_abs_path('../../pretrained_weights/liveportrait/retargeting_models/stitching_retargeting_module.pth')  # path to checkpoint to S and R_eyes, R_lip
+
+    # EXPORTED PARAMS
+    flag_use_half_precision: bool = True
+    flag_crop_driving_video: bool = False
+    device_id: int = 0
+    flag_lip_zero: bool = False
+    flag_eye_retargeting: bool = False
+    flag_lip_retargeting: bool = False
+    flag_stitching: bool = False
+    flag_relative_motion: bool = False
+    flag_pasteback: bool = False
+    flag_do_crop: bool = False
+    flag_do_rot: bool = False
+    flag_force_cpu: bool = False 
+    flag_do_torch_compile: bool = False  
+
+    # NOT EXPORTED PARAMS
+    lip_zero_threshold: float = 0.03 # threshold for flag_lip_zero
+    anchor_frame: int = 0 # TO IMPLEMENT
+
+    input_shape: Tuple[int, int] = (256, 256)  # input shape
+    output_format: Literal['mp4', 'gif'] = 'mp4'  # output video format
+    crf: int = 15  # crf for output video
+    output_fps: int = 25 # default output fps
+
+    mask_crop: ndarray = cv2.imread(make_abs_path('../utils/resources/mask_template.png'), cv2.IMREAD_COLOR)
+    size_gif: int = 256 # default gif size, TO IMPLEMENT
+    source_max_dim: int = 1280 # the max dim of height and width of source image
+    source_division: int = 2 # make sure the height and width of source image can be divided by this number
diff --git a/src/config/models.yaml b/src/config/models.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..131d1c65025c31e37af9239e211ea14454128a2e
--- /dev/null
+++ b/src/config/models.yaml
@@ -0,0 +1,43 @@
+model_params:
+  appearance_feature_extractor_params: # the F in the paper
+    image_channel: 3
+    block_expansion: 64
+    num_down_blocks: 2
+    max_features: 512
+    reshape_channel: 32
+    reshape_depth: 16
+    num_resblocks: 6
+  motion_extractor_params: # the M in the paper
+    num_kp: 21
+    backbone: convnextv2_tiny
+  warping_module_params: # the W in the paper
+    num_kp: 21
+    block_expansion: 64
+    max_features: 512
+    num_down_blocks: 2
+    reshape_channel: 32
+    estimate_occlusion_map: True
+    dense_motion_params:
+      block_expansion: 32
+      max_features: 1024
+      num_blocks: 5
+      reshape_depth: 16
+      compress: 4
+  spade_generator_params: # the G in the paper
+    upscale: 2 # represents upsample factor 256x256 -> 512x512
+    block_expansion: 64
+    max_features: 512
+    num_down_blocks: 2
+  stitching_retargeting_module_params: # the S in the paper
+    stitching:
+      input_size: 126 # (21*3)*2
+      hidden_sizes: [128, 128, 64]
+      output_size: 65 # (21*3)+2(tx,ty)
+    lip:
+      input_size: 65 # (21*3)+2
+      hidden_sizes: [128, 128, 64]
+      output_size: 63 # (21*3)
+    eye:
+      input_size: 66 # (21*3)+3
+      hidden_sizes: [256, 256, 128, 128, 64]
+      output_size: 63 # (21*3)
diff --git a/src/gradio_pipeline.py b/src/gradio_pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7343f7df6b8a6c6815c5af3526ed6dc857a7c0c
--- /dev/null
+++ b/src/gradio_pipeline.py
@@ -0,0 +1,117 @@
+# coding: utf-8
+
+"""
+Pipeline for gradio
+"""
+import gradio as gr
+
+from .config.argument_config import ArgumentConfig
+from .live_portrait_pipeline import LivePortraitPipeline
+from .utils.io import load_img_online
+from .utils.rprint import rlog as log
+from .utils.crop import prepare_paste_back, paste_back
+from .utils.camera import get_rotation_matrix
+
+
+def update_args(args, user_args):
+    """update the args according to user inputs
+    """
+    for k, v in user_args.items():
+        if hasattr(args, k):
+            setattr(args, k, v)
+    return args
+
+
+class GradioPipeline(LivePortraitPipeline):
+
+    def __init__(self, inference_cfg, crop_cfg, args: ArgumentConfig):
+        super().__init__(inference_cfg, crop_cfg)
+        # self.live_portrait_wrapper = self.live_portrait_wrapper
+        self.args = args
+
+    def execute_video(
+        self,
+        input_image_path,
+        input_video_path,
+        flag_relative_input,
+        flag_do_crop_input,
+        flag_remap_input,
+        flag_crop_driving_video_input
+    ):
+        """ for video driven potrait animation
+        """
+        if input_image_path is not None and input_video_path is not None:
+            args_user = {
+                'source_image': input_image_path,
+                'driving_info': input_video_path,
+                'flag_relative': flag_relative_input,
+                'flag_do_crop': flag_do_crop_input,
+                'flag_pasteback': flag_remap_input,
+                'flag_crop_driving_video': flag_crop_driving_video_input
+            }
+            # update config from user input
+            self.args = update_args(self.args, args_user)
+            self.live_portrait_wrapper.update_config(self.args.__dict__)
+            self.cropper.update_config(self.args.__dict__)
+            # video driven animation
+            video_path, video_path_concat = self.execute(self.args)
+            gr.Info("Run successfully!", duration=2)
+            return video_path, video_path_concat,
+        else:
+            raise gr.Error("The input source portrait or driving video hasn't been prepared yet 💥!", duration=5)
+
+    def execute_image(self, input_eye_ratio: float, input_lip_ratio: float, input_image, flag_do_crop=True):
+        """ for single image retargeting
+        """
+        # disposable feature
+        f_s_user, x_s_user, source_lmk_user, crop_M_c2o, mask_ori, img_rgb = \
+            self.prepare_retargeting(input_image, flag_do_crop)
+
+        if input_eye_ratio is None or input_lip_ratio is None:
+            raise gr.Error("Invalid ratio input 💥!", duration=5)
+        else:
+            inference_cfg = self.live_portrait_wrapper.inference_cfg
+            x_s_user = x_s_user.to(self.live_portrait_wrapper.device)
+            f_s_user = f_s_user.to(self.live_portrait_wrapper.device)
+            # ∆_eyes,i = R_eyes(x_s; c_s,eyes, c_d,eyes,i)
+            combined_eye_ratio_tensor = self.live_portrait_wrapper.calc_combined_eye_ratio([[input_eye_ratio]], source_lmk_user)
+            eyes_delta = self.live_portrait_wrapper.retarget_eye(x_s_user, combined_eye_ratio_tensor)
+            # ∆_lip,i = R_lip(x_s; c_s,lip, c_d,lip,i)
+            combined_lip_ratio_tensor = self.live_portrait_wrapper.calc_combined_lip_ratio([[input_lip_ratio]], source_lmk_user)
+            lip_delta = self.live_portrait_wrapper.retarget_lip(x_s_user, combined_lip_ratio_tensor)
+            num_kp = x_s_user.shape[1]
+            # default: use x_s
+            x_d_new = x_s_user + eyes_delta.reshape(-1, num_kp, 3) + lip_delta.reshape(-1, num_kp, 3)
+            # D(W(f_s; x_s, x′_d))
+            out = self.live_portrait_wrapper.warp_decode(f_s_user, x_s_user, x_d_new)
+            out = self.live_portrait_wrapper.parse_output(out['out'])[0]
+            out_to_ori_blend = paste_back(out, crop_M_c2o, img_rgb, mask_ori)
+            gr.Info("Run successfully!", duration=2)
+            return out, out_to_ori_blend
+
+    def prepare_retargeting(self, input_image, flag_do_crop=True):
+        """ for single image retargeting
+        """
+        if input_image is not None:
+            # gr.Info("Upload successfully!", duration=2)
+            inference_cfg = self.live_portrait_wrapper.inference_cfg
+            ######## process source portrait ########
+            img_rgb = load_img_online(input_image, mode='rgb', max_dim=1280, n=16)
+            log(f"Load source image from {input_image}.")
+            crop_info = self.cropper.crop_source_image(img_rgb, self.cropper.crop_cfg)
+            if flag_do_crop:
+                I_s = self.live_portrait_wrapper.prepare_source(crop_info['img_crop_256x256'])
+            else:
+                I_s = self.live_portrait_wrapper.prepare_source(img_rgb)
+            x_s_info = self.live_portrait_wrapper.get_kp_info(I_s)
+            R_s = get_rotation_matrix(x_s_info['pitch'], x_s_info['yaw'], x_s_info['roll'])
+            ############################################
+            f_s_user = self.live_portrait_wrapper.extract_feature_3d(I_s)
+            x_s_user = self.live_portrait_wrapper.transform_keypoint(x_s_info)
+            source_lmk_user = crop_info['lmk_crop']
+            crop_M_c2o = crop_info['M_c2o']
+            mask_ori = prepare_paste_back(inference_cfg.mask_crop, crop_info['M_c2o'], dsize=(img_rgb.shape[1], img_rgb.shape[0]))
+            return f_s_user, x_s_user, source_lmk_user, crop_M_c2o, mask_ori, img_rgb
+        else:
+            # when press the clear button, go here
+            raise gr.Error("The retargeting input hasn't been prepared yet 💥!", duration=5)
diff --git a/src/live_portrait_pipeline.py b/src/live_portrait_pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..e20db99a98c8015b99167d9e8e36aef6c615999e
--- /dev/null
+++ b/src/live_portrait_pipeline.py
@@ -0,0 +1,285 @@
+# coding: utf-8
+
+"""
+Pipeline of LivePortrait
+"""
+
+import torch
+torch.backends.cudnn.benchmark = True # disable CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR warning
+
+import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False)
+import numpy as np
+import os
+import os.path as osp
+from rich.progress import track
+
+from .config.argument_config import ArgumentConfig
+from .config.inference_config import InferenceConfig
+from .config.crop_config import CropConfig
+from .utils.cropper import Cropper
+from .utils.camera import get_rotation_matrix
+from .utils.video import images2video, concat_frames, get_fps, add_audio_to_video, has_audio_stream
+from .utils.crop import  prepare_paste_back, paste_back
+from .utils.io import load_image_rgb, load_driving_info, resize_to_limit, dump, load
+from .utils.helper import mkdir, basename, dct2device, is_video, is_template, remove_suffix
+from .utils.rprint import rlog as log
+# from .utils.viz import viz_lmk
+from .live_portrait_wrapper import LivePortraitWrapper
+
+
+def make_abs_path(fn):
+    return osp.join(osp.dirname(osp.realpath(__file__)), fn)
+
+
+class LivePortraitPipeline(object):
+
+    def __init__(self, inference_cfg: InferenceConfig, crop_cfg: CropConfig):
+        self.live_portrait_wrapper: LivePortraitWrapper = LivePortraitWrapper(inference_cfg=inference_cfg)
+        self.cropper: Cropper = Cropper(crop_cfg=crop_cfg)
+
+    def execute(self, args: ArgumentConfig):
+        # for convenience
+        inf_cfg = self.live_portrait_wrapper.inference_cfg
+        device = self.live_portrait_wrapper.device
+        crop_cfg = self.cropper.crop_cfg
+
+        ######## process source portrait ########
+        img_rgb = load_image_rgb(args.source_image)
+        img_rgb = resize_to_limit(img_rgb, inf_cfg.source_max_dim, inf_cfg.source_division)
+        log(f"Load source image from {args.source_image}")
+
+        crop_info = self.cropper.crop_source_image(img_rgb, crop_cfg)
+        if crop_info is None:
+            raise Exception("No face detected in the source image!")
+        source_lmk = crop_info['lmk_crop']
+        img_crop, img_crop_256x256 = crop_info['img_crop'], crop_info['img_crop_256x256']
+
+        if inf_cfg.flag_do_crop:
+            I_s = self.live_portrait_wrapper.prepare_source(img_crop_256x256)
+        else:
+            img_crop_256x256 = cv2.resize(img_rgb, (256, 256))  # force to resize to 256x256
+            I_s = self.live_portrait_wrapper.prepare_source(img_crop_256x256)
+        x_s_info = self.live_portrait_wrapper.get_kp_info(I_s)
+        x_c_s = x_s_info['kp']
+        R_s = get_rotation_matrix(x_s_info['pitch'], x_s_info['yaw'], x_s_info['roll'])
+        f_s = self.live_portrait_wrapper.extract_feature_3d(I_s)
+        x_s = self.live_portrait_wrapper.transform_keypoint(x_s_info)
+
+        flag_lip_zero = inf_cfg.flag_lip_zero  # not overwrite
+        if flag_lip_zero:
+            # let lip-open scalar to be 0 at first
+            c_d_lip_before_animation = [0.]
+            combined_lip_ratio_tensor_before_animation = self.live_portrait_wrapper.calc_combined_lip_ratio(c_d_lip_before_animation, source_lmk)
+            if combined_lip_ratio_tensor_before_animation[0][0] < inf_cfg.lip_zero_threshold:
+                flag_lip_zero = False
+            else:
+                lip_delta_before_animation = self.live_portrait_wrapper.retarget_lip(x_s, combined_lip_ratio_tensor_before_animation)
+        ############################################
+
+        ######## process driving info ########
+        flag_load_from_template = is_template(args.driving_info)
+        driving_rgb_crop_256x256_lst = None
+        wfp_template = None
+
+        if flag_load_from_template:
+            # NOTE: load from template, it is fast, but the cropping video is None
+            log(f"Load from template: {args.driving_info}, NOT the video, so the cropping video and audio are both NULL.", style='bold green')
+            template_dct = load(args.driving_info)
+            n_frames = template_dct['n_frames']
+
+            # set output_fps
+            output_fps = template_dct.get('output_fps', inf_cfg.output_fps)
+            log(f'The FPS of template: {output_fps}')
+
+            if args.flag_crop_driving_video:
+                log("Warning: flag_crop_driving_video is True, but the driving info is a template, so it is ignored.")
+
+        elif osp.exists(args.driving_info) and is_video(args.driving_info):
+            # load from video file, AND make motion template
+            log(f"Load video: {args.driving_info}")
+            if osp.isdir(args.driving_info):
+                output_fps = inf_cfg.output_fps
+            else:
+                output_fps = int(get_fps(args.driving_info))
+                log(f'The FPS of {args.driving_info} is: {output_fps}')
+
+            log(f"Load video file (mp4 mov avi etc...): {args.driving_info}")
+            driving_rgb_lst = load_driving_info(args.driving_info)
+
+            ######## make motion template ########
+            log("Start making motion template...")
+            if inf_cfg.flag_crop_driving_video:
+                ret = self.cropper.crop_driving_video(driving_rgb_lst)
+                log(f'Driving video is cropped, {len(ret["frame_crop_lst"])} frames are processed.')
+                driving_rgb_crop_lst, driving_lmk_crop_lst = ret['frame_crop_lst'], ret['lmk_crop_lst']
+                driving_rgb_crop_256x256_lst = [cv2.resize(_, (256, 256)) for _ in driving_rgb_crop_lst]
+            else:
+                driving_lmk_crop_lst = self.cropper.calc_lmks_from_cropped_video(driving_rgb_lst)
+                driving_rgb_crop_256x256_lst = [cv2.resize(_, (256, 256)) for _ in driving_rgb_lst]  # force to resize to 256x256
+
+            c_d_eyes_lst, c_d_lip_lst = self.live_portrait_wrapper.calc_driving_ratio(driving_lmk_crop_lst)
+            # save the motion template
+            I_d_lst = self.live_portrait_wrapper.prepare_driving_videos(driving_rgb_crop_256x256_lst)
+            template_dct = self.make_motion_template(I_d_lst, c_d_eyes_lst, c_d_lip_lst, output_fps=output_fps)
+
+            wfp_template = remove_suffix(args.driving_info) + '.pkl'
+            dump(wfp_template, template_dct)
+            log(f"Dump motion template to {wfp_template}")
+
+            n_frames = I_d_lst.shape[0]
+        else:
+            raise Exception(f"{args.driving_info} not exists or unsupported driving info types!")
+        #########################################
+
+        ######## prepare for pasteback ########
+        I_p_pstbk_lst = None
+        if inf_cfg.flag_pasteback and inf_cfg.flag_do_crop and inf_cfg.flag_stitching:
+            mask_ori_float = prepare_paste_back(inf_cfg.mask_crop, crop_info['M_c2o'], dsize=(img_rgb.shape[1], img_rgb.shape[0]))
+            I_p_pstbk_lst = []
+            log("Prepared pasteback mask done.")
+        #########################################
+
+        I_p_lst = []
+        R_d_0, x_d_0_info = None, None
+
+        for i in track(range(n_frames), description='🚀Animating...', total=n_frames):
+            x_d_i_info = template_dct['motion'][i]
+            x_d_i_info = dct2device(x_d_i_info, device)
+            R_d_i = x_d_i_info['R_d']
+
+            if i == 0:
+                R_d_0 = R_d_i
+                x_d_0_info = x_d_i_info
+
+            if inf_cfg.flag_relative_motion:
+                R_new = (R_d_i @ R_d_0.permute(0, 2, 1)) @ R_s
+                delta_new = x_s_info['exp'] + (x_d_i_info['exp'] - x_d_0_info['exp'])
+                scale_new = x_s_info['scale'] * (x_d_i_info['scale'] / x_d_0_info['scale'])
+                t_new = x_s_info['t'] + (x_d_i_info['t'] - x_d_0_info['t'])
+            else:
+                R_new = R_d_i
+                delta_new = x_d_i_info['exp']
+                scale_new = x_s_info['scale']
+                t_new = x_d_i_info['t']
+
+            t_new[..., 2].fill_(0)  # zero tz
+            x_d_i_new = scale_new * (x_c_s @ R_new + delta_new) + t_new
+
+            # Algorithm 1:
+            if not inf_cfg.flag_stitching and not inf_cfg.flag_eye_retargeting and not inf_cfg.flag_lip_retargeting:
+                # without stitching or retargeting
+                if flag_lip_zero:
+                    x_d_i_new += lip_delta_before_animation.reshape(-1, x_s.shape[1], 3)
+                else:
+                    pass
+            elif inf_cfg.flag_stitching and not inf_cfg.flag_eye_retargeting and not inf_cfg.flag_lip_retargeting:
+                # with stitching and without retargeting
+                if flag_lip_zero:
+                    x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new) + lip_delta_before_animation.reshape(-1, x_s.shape[1], 3)
+                else:
+                    x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new)
+            else:
+                eyes_delta, lip_delta = None, None
+                if inf_cfg.flag_eye_retargeting:
+                    c_d_eyes_i = c_d_eyes_lst[i]
+                    combined_eye_ratio_tensor = self.live_portrait_wrapper.calc_combined_eye_ratio(c_d_eyes_i, source_lmk)
+                    # ∆_eyes,i = R_eyes(x_s; c_s,eyes, c_d,eyes,i)
+                    eyes_delta = self.live_portrait_wrapper.retarget_eye(x_s, combined_eye_ratio_tensor)
+                if inf_cfg.flag_lip_retargeting:
+                    c_d_lip_i = c_d_lip_lst[i]
+                    combined_lip_ratio_tensor = self.live_portrait_wrapper.calc_combined_lip_ratio(c_d_lip_i, source_lmk)
+                    # ∆_lip,i = R_lip(x_s; c_s,lip, c_d,lip,i)
+                    lip_delta = self.live_portrait_wrapper.retarget_lip(x_s, combined_lip_ratio_tensor)
+
+                if inf_cfg.flag_relative_motion:  # use x_s
+                    x_d_i_new = x_s + \
+                        (eyes_delta.reshape(-1, x_s.shape[1], 3) if eyes_delta is not None else 0) + \
+                        (lip_delta.reshape(-1, x_s.shape[1], 3) if lip_delta is not None else 0)
+                else:  # use x_d,i
+                    x_d_i_new = x_d_i_new + \
+                        (eyes_delta.reshape(-1, x_s.shape[1], 3) if eyes_delta is not None else 0) + \
+                        (lip_delta.reshape(-1, x_s.shape[1], 3) if lip_delta is not None else 0)
+
+                if inf_cfg.flag_stitching:
+                    x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new)
+
+            out = self.live_portrait_wrapper.warp_decode(f_s, x_s, x_d_i_new)
+            I_p_i = self.live_portrait_wrapper.parse_output(out['out'])[0]
+            I_p_lst.append(I_p_i)
+
+            if inf_cfg.flag_pasteback and inf_cfg.flag_do_crop and inf_cfg.flag_stitching:
+                # TODO: pasteback is slow, considering optimize it using multi-threading or GPU
+                I_p_pstbk = paste_back(I_p_i, crop_info['M_c2o'], img_rgb, mask_ori_float)
+                I_p_pstbk_lst.append(I_p_pstbk)
+
+        mkdir(args.output_dir)
+        wfp_concat = None
+        flag_has_audio = (not flag_load_from_template) and has_audio_stream(args.driving_info)
+
+        ######### build final concact result #########
+        # driving frame | source image | generation, or source image | generation
+        frames_concatenated = concat_frames(driving_rgb_crop_256x256_lst, img_crop_256x256, I_p_lst)
+        wfp_concat = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_concat.mp4')
+        images2video(frames_concatenated, wfp=wfp_concat, fps=output_fps)
+
+        if flag_has_audio:
+            # final result with concact
+            wfp_concat_with_audio = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_concat_with_audio.mp4')
+            add_audio_to_video(wfp_concat, args.driving_info, wfp_concat_with_audio)
+            os.replace(wfp_concat_with_audio, wfp_concat)
+            log(f"Replace {wfp_concat} with {wfp_concat_with_audio}")
+
+        # save drived result
+        wfp = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}.mp4')
+        if I_p_pstbk_lst is not None and len(I_p_pstbk_lst) > 0:
+            images2video(I_p_pstbk_lst, wfp=wfp, fps=output_fps)
+        else:
+            images2video(I_p_lst, wfp=wfp, fps=output_fps)
+
+        ######### build final result #########
+        if flag_has_audio:
+            wfp_with_audio = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_with_audio.mp4')
+            add_audio_to_video(wfp, args.driving_info, wfp_with_audio)
+            os.replace(wfp_with_audio, wfp)
+            log(f"Replace {wfp} with {wfp_with_audio}")
+
+        # final log
+        if wfp_template not in (None, ''):
+            log(f'Animated template: {wfp_template}, you can specify `-d` argument with this template path next time to avoid cropping video, motion making and protecting privacy.', style='bold green')
+        log(f'Animated video: {wfp}')
+        log(f'Animated video with concact: {wfp_concat}')
+
+        return wfp, wfp_concat
+
+    def make_motion_template(self, I_d_lst, c_d_eyes_lst, c_d_lip_lst, **kwargs):
+        n_frames = I_d_lst.shape[0]
+        template_dct = {
+            'n_frames': n_frames,
+            'output_fps': kwargs.get('output_fps', 25),
+            'motion': [],
+            'c_d_eyes_lst': [],
+            'c_d_lip_lst': [],
+        }
+
+        for i in track(range(n_frames), description='Making motion templates...', total=n_frames):
+            # collect s_d, R_d, δ_d and t_d for inference
+            I_d_i = I_d_lst[i]
+            x_d_i_info = self.live_portrait_wrapper.get_kp_info(I_d_i)
+            R_d_i = get_rotation_matrix(x_d_i_info['pitch'], x_d_i_info['yaw'], x_d_i_info['roll'])
+
+            item_dct = {
+                'scale': x_d_i_info['scale'].cpu().numpy().astype(np.float32),
+                'R_d': R_d_i.cpu().numpy().astype(np.float32),
+                'exp': x_d_i_info['exp'].cpu().numpy().astype(np.float32),
+                't': x_d_i_info['t'].cpu().numpy().astype(np.float32),
+            }
+
+            template_dct['motion'].append(item_dct)
+
+            c_d_eyes = c_d_eyes_lst[i].astype(np.float32)
+            template_dct['c_d_eyes_lst'].append(c_d_eyes)
+
+            c_d_lip = c_d_lip_lst[i].astype(np.float32)
+            template_dct['c_d_lip_lst'].append(c_d_lip)
+
+        return template_dct
diff --git a/src/live_portrait_wrapper.py b/src/live_portrait_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c5bd6eebe929d9aa535dc3000c48272e7cdf417
--- /dev/null
+++ b/src/live_portrait_wrapper.py
@@ -0,0 +1,318 @@
+# coding: utf-8
+
+"""
+Wrapper for LivePortrait core functions
+"""
+
+import os.path as osp
+import numpy as np
+import cv2
+import torch
+import yaml
+
+from .utils.timer import Timer
+from .utils.helper import load_model, concat_feat
+from .utils.camera import headpose_pred_to_degree, get_rotation_matrix
+from .utils.retargeting_utils import calc_eye_close_ratio, calc_lip_close_ratio
+from .config.inference_config import InferenceConfig
+from .utils.rprint import rlog as log
+
+
+class LivePortraitWrapper(object):
+
+    def __init__(self, inference_cfg: InferenceConfig):
+
+        self.inference_cfg = inference_cfg
+        self.device_id = inference_cfg.device_id
+        self.compile = inference_cfg.flag_do_torch_compile
+        if inference_cfg.flag_force_cpu:
+            self.device = 'cpu'
+        else:
+            self.device = 'cuda:' + str(self.device_id)
+
+        model_config = yaml.load(open(inference_cfg.models_config, 'r'), Loader=yaml.SafeLoader)
+        # init F
+        self.appearance_feature_extractor = load_model(inference_cfg.checkpoint_F, model_config, self.device, 'appearance_feature_extractor')
+        log(f'Load appearance_feature_extractor done.')
+        # init M
+        self.motion_extractor = load_model(inference_cfg.checkpoint_M, model_config, self.device, 'motion_extractor')
+        log(f'Load motion_extractor done.')
+        # init W
+        self.warping_module = load_model(inference_cfg.checkpoint_W, model_config, self.device, 'warping_module')
+        log(f'Load warping_module done.')
+        # init G
+        self.spade_generator = load_model(inference_cfg.checkpoint_G, model_config, self.device, 'spade_generator')
+        log(f'Load spade_generator done.')
+        # init S and R
+        if inference_cfg.checkpoint_S is not None and osp.exists(inference_cfg.checkpoint_S):
+            self.stitching_retargeting_module = load_model(inference_cfg.checkpoint_S, model_config, self.device, 'stitching_retargeting_module')
+            log(f'Load stitching_retargeting_module done.')
+        else:
+            self.stitching_retargeting_module = None
+        # Optimize for inference
+        if self.compile:
+            self.warping_module = torch.compile(self.warping_module, mode='max-autotune')  
+            self.spade_generator = torch.compile(self.spade_generator, mode='max-autotune')  
+        
+        self.timer = Timer()
+
+    def update_config(self, user_args):
+        for k, v in user_args.items():
+            if hasattr(self.inference_cfg, k):
+                setattr(self.inference_cfg, k, v)
+
+    def prepare_source(self, img: np.ndarray) -> torch.Tensor:
+        """ construct the input as standard
+        img: HxWx3, uint8, 256x256
+        """
+        h, w = img.shape[:2]
+        if h != self.inference_cfg.input_shape[0] or w != self.inference_cfg.input_shape[1]:
+            x = cv2.resize(img, (self.inference_cfg.input_shape[0], self.inference_cfg.input_shape[1]))
+        else:
+            x = img.copy()
+
+        if x.ndim == 3:
+            x = x[np.newaxis].astype(np.float32) / 255.  # HxWx3 -> 1xHxWx3, normalized to 0~1
+        elif x.ndim == 4:
+            x = x.astype(np.float32) / 255.  # BxHxWx3, normalized to 0~1
+        else:
+            raise ValueError(f'img ndim should be 3 or 4: {x.ndim}')
+        x = np.clip(x, 0, 1)  # clip to 0~1
+        x = torch.from_numpy(x).permute(0, 3, 1, 2)  # 1xHxWx3 -> 1x3xHxW
+        x = x.to(self.device)
+        return x
+
+    def prepare_driving_videos(self, imgs) -> torch.Tensor:
+        """ construct the input as standard
+        imgs: NxBxHxWx3, uint8
+        """
+        if isinstance(imgs, list):
+            _imgs = np.array(imgs)[..., np.newaxis]  # TxHxWx3x1
+        elif isinstance(imgs, np.ndarray):
+            _imgs = imgs
+        else:
+            raise ValueError(f'imgs type error: {type(imgs)}')
+
+        y = _imgs.astype(np.float32) / 255.
+        y = np.clip(y, 0, 1)  # clip to 0~1
+        y = torch.from_numpy(y).permute(0, 4, 3, 1, 2)  # TxHxWx3x1 -> Tx1x3xHxW
+        y = y.to(self.device)
+
+        return y
+
+    def extract_feature_3d(self, x: torch.Tensor) -> torch.Tensor:
+        """ get the appearance feature of the image by F
+        x: Bx3xHxW, normalized to 0~1
+        """
+        with torch.no_grad():
+            with torch.autocast(device_type=self.device[:4], dtype=torch.float16, enabled=self.inference_cfg.flag_use_half_precision):
+                feature_3d = self.appearance_feature_extractor(x)
+
+        return feature_3d.float()
+
+    def get_kp_info(self, x: torch.Tensor, **kwargs) -> dict:
+        """ get the implicit keypoint information
+        x: Bx3xHxW, normalized to 0~1
+        flag_refine_info: whether to trandform the pose to degrees and the dimention of the reshape
+        return: A dict contains keys: 'pitch', 'yaw', 'roll', 't', 'exp', 'scale', 'kp'
+        """
+        with torch.no_grad():
+            with torch.autocast(device_type=self.device[:4], dtype=torch.float16, enabled=self.inference_cfg.flag_use_half_precision):
+                kp_info = self.motion_extractor(x)
+
+            if self.inference_cfg.flag_use_half_precision:
+                # float the dict
+                for k, v in kp_info.items():
+                    if isinstance(v, torch.Tensor):
+                        kp_info[k] = v.float()
+
+        flag_refine_info: bool = kwargs.get('flag_refine_info', True)
+        if flag_refine_info:
+            bs = kp_info['kp'].shape[0]
+            kp_info['pitch'] = headpose_pred_to_degree(kp_info['pitch'])[:, None]  # Bx1
+            kp_info['yaw'] = headpose_pred_to_degree(kp_info['yaw'])[:, None]  # Bx1
+            kp_info['roll'] = headpose_pred_to_degree(kp_info['roll'])[:, None]  # Bx1
+            kp_info['kp'] = kp_info['kp'].reshape(bs, -1)  # B,Nx3
+            kp_info['exp'] = kp_info['exp'].reshape(bs, -1)  # B,Nx3
+
+        return kp_info
+
+    def get_pose_dct(self, kp_info: dict) -> dict:
+        pose_dct = dict(
+            pitch=headpose_pred_to_degree(kp_info['pitch']).item(),
+            yaw=headpose_pred_to_degree(kp_info['yaw']).item(),
+            roll=headpose_pred_to_degree(kp_info['roll']).item(),
+        )
+        return pose_dct
+
+    def get_fs_and_kp_info(self, source_prepared, driving_first_frame):
+
+        # get the canonical keypoints of source image by M
+        source_kp_info = self.get_kp_info(source_prepared, flag_refine_info=True)
+        source_rotation = get_rotation_matrix(source_kp_info['pitch'], source_kp_info['yaw'], source_kp_info['roll'])
+
+        # get the canonical keypoints of first driving frame by M
+        driving_first_frame_kp_info = self.get_kp_info(driving_first_frame, flag_refine_info=True)
+        driving_first_frame_rotation = get_rotation_matrix(
+            driving_first_frame_kp_info['pitch'],
+            driving_first_frame_kp_info['yaw'],
+            driving_first_frame_kp_info['roll']
+        )
+
+        # get feature volume by F
+        source_feature_3d = self.extract_feature_3d(source_prepared)
+
+        return source_kp_info, source_rotation, source_feature_3d, driving_first_frame_kp_info, driving_first_frame_rotation
+
+    def transform_keypoint(self, kp_info: dict):
+        """
+        transform the implicit keypoints with the pose, shift, and expression deformation
+        kp: BxNx3
+        """
+        kp = kp_info['kp']    # (bs, k, 3)
+        pitch, yaw, roll = kp_info['pitch'], kp_info['yaw'], kp_info['roll']
+
+        t, exp = kp_info['t'], kp_info['exp']
+        scale = kp_info['scale']
+
+        pitch = headpose_pred_to_degree(pitch)
+        yaw = headpose_pred_to_degree(yaw)
+        roll = headpose_pred_to_degree(roll)
+
+        bs = kp.shape[0]
+        if kp.ndim == 2:
+            num_kp = kp.shape[1] // 3  # Bx(num_kpx3)
+        else:
+            num_kp = kp.shape[1]  # Bxnum_kpx3
+
+        rot_mat = get_rotation_matrix(pitch, yaw, roll)    # (bs, 3, 3)
+
+        # Eqn.2: s * (R * x_c,s + exp) + t
+        kp_transformed = kp.view(bs, num_kp, 3) @ rot_mat + exp.view(bs, num_kp, 3)
+        kp_transformed *= scale[..., None]  # (bs, k, 3) * (bs, 1, 1) = (bs, k, 3)
+        kp_transformed[:, :, 0:2] += t[:, None, 0:2]  # remove z, only apply tx ty
+        # kp_transformed[:, :, :] += t[:, None, :]
+
+        return kp_transformed
+
+    def retarget_eye(self, kp_source: torch.Tensor, eye_close_ratio: torch.Tensor) -> torch.Tensor:
+        """
+        kp_source: BxNx3
+        eye_close_ratio: Bx3
+        Return: Bx(3*num_kp+2)
+        """
+        feat_eye = concat_feat(kp_source, eye_close_ratio)
+
+        with torch.no_grad():
+            delta = self.stitching_retargeting_module['eye'](feat_eye)
+
+        return delta
+
+    def retarget_lip(self, kp_source: torch.Tensor, lip_close_ratio: torch.Tensor) -> torch.Tensor:
+        """
+        kp_source: BxNx3
+        lip_close_ratio: Bx2
+        """
+        feat_lip = concat_feat(kp_source, lip_close_ratio)
+
+        with torch.no_grad():
+            delta = self.stitching_retargeting_module['lip'](feat_lip)
+
+        return delta
+
+    def stitch(self, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
+        """
+        kp_source: BxNx3
+        kp_driving: BxNx3
+        Return: Bx(3*num_kp+2)
+        """
+        feat_stiching = concat_feat(kp_source, kp_driving)
+
+        with torch.no_grad():
+            delta = self.stitching_retargeting_module['stitching'](feat_stiching)
+
+        return delta
+
+    def stitching(self, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
+        """ conduct the stitching
+        kp_source: Bxnum_kpx3
+        kp_driving: Bxnum_kpx3
+        """
+
+        if self.stitching_retargeting_module is not None:
+
+            bs, num_kp = kp_source.shape[:2]
+
+            kp_driving_new = kp_driving.clone()
+            delta = self.stitch(kp_source, kp_driving_new)
+
+            delta_exp = delta[..., :3*num_kp].reshape(bs, num_kp, 3)  # 1x20x3
+            delta_tx_ty = delta[..., 3*num_kp:3*num_kp+2].reshape(bs, 1, 2)  # 1x1x2
+
+            kp_driving_new += delta_exp
+            kp_driving_new[..., :2] += delta_tx_ty
+
+            return kp_driving_new
+
+        return kp_driving
+
+    def warp_decode(self, feature_3d: torch.Tensor, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
+        """ get the image after the warping of the implicit keypoints
+        feature_3d: Bx32x16x64x64, feature volume
+        kp_source: BxNx3
+        kp_driving: BxNx3
+        """
+        # The line 18 in Algorithm 1: D(W(f_s; x_s, x′_d,i))
+        with torch.no_grad():
+            with torch.autocast(device_type=self.device[:4], dtype=torch.float16, enabled=self.inference_cfg.flag_use_half_precision):
+                if self.compile:
+                    # Mark the beginning of a new CUDA Graph step
+                    torch.compiler.cudagraph_mark_step_begin()
+                # get decoder input
+                ret_dct = self.warping_module(feature_3d, kp_source=kp_source, kp_driving=kp_driving)
+                # decode
+                ret_dct['out'] = self.spade_generator(feature=ret_dct['out'])
+
+            # float the dict
+            if self.inference_cfg.flag_use_half_precision:
+                for k, v in ret_dct.items():
+                    if isinstance(v, torch.Tensor):
+                        ret_dct[k] = v.float()
+
+        return ret_dct
+
+    def parse_output(self, out: torch.Tensor) -> np.ndarray:
+        """ construct the output as standard
+        return: 1xHxWx3, uint8
+        """
+        out = np.transpose(out.data.cpu().numpy(), [0, 2, 3, 1])  # 1x3xHxW -> 1xHxWx3
+        out = np.clip(out, 0, 1)  # clip to 0~1
+        out = np.clip(out * 255, 0, 255).astype(np.uint8)  # 0~1 -> 0~255
+
+        return out
+
+    def calc_driving_ratio(self, driving_lmk_lst):
+        input_eye_ratio_lst = []
+        input_lip_ratio_lst = []
+        for lmk in driving_lmk_lst:
+            # for eyes retargeting
+            input_eye_ratio_lst.append(calc_eye_close_ratio(lmk[None]))
+            # for lip retargeting
+            input_lip_ratio_lst.append(calc_lip_close_ratio(lmk[None]))
+        return input_eye_ratio_lst, input_lip_ratio_lst
+
+    def calc_combined_eye_ratio(self, c_d_eyes_i, source_lmk):
+        c_s_eyes = calc_eye_close_ratio(source_lmk[None])
+        c_s_eyes_tensor = torch.from_numpy(c_s_eyes).float().to(self.device)
+        c_d_eyes_i_tensor = torch.Tensor([c_d_eyes_i[0][0]]).reshape(1, 1).to(self.device)
+        # [c_s,eyes, c_d,eyes,i]
+        combined_eye_ratio_tensor = torch.cat([c_s_eyes_tensor, c_d_eyes_i_tensor], dim=1)
+        return combined_eye_ratio_tensor
+
+    def calc_combined_lip_ratio(self, c_d_lip_i, source_lmk):
+        c_s_lip = calc_lip_close_ratio(source_lmk[None])
+        c_s_lip_tensor = torch.from_numpy(c_s_lip).float().to(self.device)
+        c_d_lip_i_tensor = torch.Tensor([c_d_lip_i[0]]).to(self.device).reshape(1, 1) # 1x1
+        # [c_s,lip, c_d,lip,i]
+        combined_lip_ratio_tensor = torch.cat([c_s_lip_tensor, c_d_lip_i_tensor], dim=1) # 1x2
+        return combined_lip_ratio_tensor
diff --git a/src/modules/__init__.py b/src/modules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/modules/appearance_feature_extractor.py b/src/modules/appearance_feature_extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d89e4f18a2fbe58447f52ab4c5e3f2011a4ec80
--- /dev/null
+++ b/src/modules/appearance_feature_extractor.py
@@ -0,0 +1,48 @@
+# coding: utf-8
+
+"""
+Appearance extractor(F) defined in paper, which maps the source image s to a 3D appearance feature volume.
+"""
+
+import torch
+from torch import nn
+from .util import SameBlock2d, DownBlock2d, ResBlock3d
+
+
+class AppearanceFeatureExtractor(nn.Module):
+
+    def __init__(self, image_channel, block_expansion, num_down_blocks, max_features, reshape_channel, reshape_depth, num_resblocks):
+        super(AppearanceFeatureExtractor, self).__init__()
+        self.image_channel = image_channel
+        self.block_expansion = block_expansion
+        self.num_down_blocks = num_down_blocks
+        self.max_features = max_features
+        self.reshape_channel = reshape_channel
+        self.reshape_depth = reshape_depth
+
+        self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(3, 3), padding=(1, 1))
+
+        down_blocks = []
+        for i in range(num_down_blocks):
+            in_features = min(max_features, block_expansion * (2 ** i))
+            out_features = min(max_features, block_expansion * (2 ** (i + 1)))
+            down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
+        self.down_blocks = nn.ModuleList(down_blocks)
+
+        self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)
+
+        self.resblocks_3d = torch.nn.Sequential()
+        for i in range(num_resblocks):
+            self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))
+
+    def forward(self, source_image):
+        out = self.first(source_image)  # Bx3x256x256 -> Bx64x256x256
+
+        for i in range(len(self.down_blocks)):
+            out = self.down_blocks[i](out)
+        out = self.second(out)
+        bs, c, h, w = out.shape  # ->Bx512x64x64
+
+        f_s = out.view(bs, self.reshape_channel, self.reshape_depth, h, w)  # ->Bx32x16x64x64
+        f_s = self.resblocks_3d(f_s)  # ->Bx32x16x64x64
+        return f_s
diff --git a/src/modules/convnextv2.py b/src/modules/convnextv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..83ea12662b607854915df8c7abb160b588d330b1
--- /dev/null
+++ b/src/modules/convnextv2.py
@@ -0,0 +1,149 @@
+# coding: utf-8
+
+"""
+This moudle is adapted to the ConvNeXtV2 version for the extraction of implicit keypoints, poses, and expression deformation.
+"""
+
+import torch
+import torch.nn as nn
+# from timm.models.layers import trunc_normal_, DropPath
+from .util import LayerNorm, DropPath, trunc_normal_, GRN
+
+__all__ = ['convnextv2_tiny']
+
+
+class Block(nn.Module):
+    """ ConvNeXtV2 Block.
+
+    Args:
+        dim (int): Number of input channels.
+        drop_path (float): Stochastic depth rate. Default: 0.0
+    """
+
+    def __init__(self, dim, drop_path=0.):
+        super().__init__()
+        self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)  # depthwise conv
+        self.norm = LayerNorm(dim, eps=1e-6)
+        self.pwconv1 = nn.Linear(dim, 4 * dim)  # pointwise/1x1 convs, implemented with linear layers
+        self.act = nn.GELU()
+        self.grn = GRN(4 * dim)
+        self.pwconv2 = nn.Linear(4 * dim, dim)
+        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+
+    def forward(self, x):
+        input = x
+        x = self.dwconv(x)
+        x = x.permute(0, 2, 3, 1)  # (N, C, H, W) -> (N, H, W, C)
+        x = self.norm(x)
+        x = self.pwconv1(x)
+        x = self.act(x)
+        x = self.grn(x)
+        x = self.pwconv2(x)
+        x = x.permute(0, 3, 1, 2)  # (N, H, W, C) -> (N, C, H, W)
+
+        x = input + self.drop_path(x)
+        return x
+
+
+class ConvNeXtV2(nn.Module):
+    """ ConvNeXt V2
+
+    Args:
+        in_chans (int): Number of input image channels. Default: 3
+        num_classes (int): Number of classes for classification head. Default: 1000
+        depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
+        dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
+        drop_path_rate (float): Stochastic depth rate. Default: 0.
+        head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
+    """
+
+    def __init__(
+        self,
+        in_chans=3,
+        depths=[3, 3, 9, 3],
+        dims=[96, 192, 384, 768],
+        drop_path_rate=0.,
+        **kwargs
+    ):
+        super().__init__()
+        self.depths = depths
+        self.downsample_layers = nn.ModuleList()  # stem and 3 intermediate downsampling conv layers
+        stem = nn.Sequential(
+            nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
+            LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
+        )
+        self.downsample_layers.append(stem)
+        for i in range(3):
+            downsample_layer = nn.Sequential(
+                LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
+                nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
+            )
+            self.downsample_layers.append(downsample_layer)
+
+        self.stages = nn.ModuleList()  # 4 feature resolution stages, each consisting of multiple residual blocks
+        dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
+        cur = 0
+        for i in range(4):
+            stage = nn.Sequential(
+                *[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
+            )
+            self.stages.append(stage)
+            cur += depths[i]
+
+        self.norm = nn.LayerNorm(dims[-1], eps=1e-6)  # final norm layer
+
+        # NOTE: the output semantic items
+        num_bins = kwargs.get('num_bins', 66)
+        num_kp = kwargs.get('num_kp', 24)  # the number of implicit keypoints
+        self.fc_kp = nn.Linear(dims[-1], 3 * num_kp)  # implicit keypoints
+
+        # print('dims[-1]: ', dims[-1])
+        self.fc_scale = nn.Linear(dims[-1], 1)  # scale
+        self.fc_pitch = nn.Linear(dims[-1], num_bins)  # pitch bins
+        self.fc_yaw = nn.Linear(dims[-1], num_bins)  # yaw bins
+        self.fc_roll = nn.Linear(dims[-1], num_bins)  # roll bins
+        self.fc_t = nn.Linear(dims[-1], 3)  # translation
+        self.fc_exp = nn.Linear(dims[-1], 3 * num_kp)  # expression / delta
+
+    def _init_weights(self, m):
+        if isinstance(m, (nn.Conv2d, nn.Linear)):
+            trunc_normal_(m.weight, std=.02)
+            nn.init.constant_(m.bias, 0)
+
+    def forward_features(self, x):
+        for i in range(4):
+            x = self.downsample_layers[i](x)
+            x = self.stages[i](x)
+        return self.norm(x.mean([-2, -1]))  # global average pooling, (N, C, H, W) -> (N, C)
+
+    def forward(self, x):
+        x = self.forward_features(x)
+
+        # implicit keypoints
+        kp = self.fc_kp(x)
+
+        # pose and expression deformation
+        pitch = self.fc_pitch(x)
+        yaw = self.fc_yaw(x)
+        roll = self.fc_roll(x)
+        t = self.fc_t(x)
+        exp = self.fc_exp(x)
+        scale = self.fc_scale(x)
+
+        ret_dct = {
+            'pitch': pitch,
+            'yaw': yaw,
+            'roll': roll,
+            't': t,
+            'exp': exp,
+            'scale': scale,
+
+            'kp': kp,  # canonical keypoint
+        }
+
+        return ret_dct
+
+
+def convnextv2_tiny(**kwargs):
+    model = ConvNeXtV2(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
+    return model
diff --git a/src/modules/dense_motion.py b/src/modules/dense_motion.py
new file mode 100644
index 0000000000000000000000000000000000000000..0eec0c46345f8854b125a51eaee730bd4ee77f7d
--- /dev/null
+++ b/src/modules/dense_motion.py
@@ -0,0 +1,104 @@
+# coding: utf-8
+
+"""
+The module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving
+"""
+
+from torch import nn
+import torch.nn.functional as F
+import torch
+from .util import Hourglass, make_coordinate_grid, kp2gaussian
+
+
+class DenseMotionNetwork(nn.Module):
+    def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress, estimate_occlusion_map=True):
+        super(DenseMotionNetwork, self).__init__()
+        self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks)  # ~60+G
+
+        self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3)  # 65G! NOTE: computation cost is large
+        self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1)  # 0.8G
+        self.norm = nn.BatchNorm3d(compress, affine=True)
+        self.num_kp = num_kp
+        self.flag_estimate_occlusion_map = estimate_occlusion_map
+
+        if self.flag_estimate_occlusion_map:
+            self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3)
+        else:
+            self.occlusion = None
+
+    def create_sparse_motions(self, feature, kp_driving, kp_source):
+        bs, _, d, h, w = feature.shape  # (bs, 4, 16, 64, 64)
+        identity_grid = make_coordinate_grid((d, h, w), ref=kp_source)  # (16, 64, 64, 3)
+        identity_grid = identity_grid.view(1, 1, d, h, w, 3)  # (1, 1, d=16, h=64, w=64, 3)
+        coordinate_grid = identity_grid - kp_driving.view(bs, self.num_kp, 1, 1, 1, 3)
+
+        k = coordinate_grid.shape[1]
+
+        # NOTE: there lacks an one-order flow
+        driving_to_source = coordinate_grid + kp_source.view(bs, self.num_kp, 1, 1, 1, 3)    # (bs, num_kp, d, h, w, 3)
+
+        # adding background feature
+        identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1)
+        sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1)  # (bs, 1+num_kp, d, h, w, 3)
+        return sparse_motions
+
+    def create_deformed_feature(self, feature, sparse_motions):
+        bs, _, d, h, w = feature.shape
+        feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1)      # (bs, num_kp+1, 1, c, d, h, w)
+        feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w)                         # (bs*(num_kp+1), c, d, h, w)
+        sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1))                       # (bs*(num_kp+1), d, h, w, 3)
+        sparse_deformed = F.grid_sample(feature_repeat, sparse_motions, align_corners=False)
+        sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w))                        # (bs, num_kp+1, c, d, h, w)
+
+        return sparse_deformed
+
+    def create_heatmap_representations(self, feature, kp_driving, kp_source):
+        spatial_size = feature.shape[3:]  # (d=16, h=64, w=64)
+        gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01)  # (bs, num_kp, d, h, w)
+        gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01)  # (bs, num_kp, d, h, w)
+        heatmap = gaussian_driving - gaussian_source  # (bs, num_kp, d, h, w)
+
+        # adding background feature
+        zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.type()).to(heatmap.device)
+        heatmap = torch.cat([zeros, heatmap], dim=1)
+        heatmap = heatmap.unsqueeze(2)         # (bs, 1+num_kp, 1, d, h, w)
+        return heatmap
+
+    def forward(self, feature, kp_driving, kp_source):
+        bs, _, d, h, w = feature.shape  # (bs, 32, 16, 64, 64)
+
+        feature = self.compress(feature)  # (bs, 4, 16, 64, 64)
+        feature = self.norm(feature)  # (bs, 4, 16, 64, 64)
+        feature = F.relu(feature)  # (bs, 4, 16, 64, 64)
+
+        out_dict = dict()
+
+        # 1. deform 3d feature
+        sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source)  # (bs, 1+num_kp, d, h, w, 3)
+        deformed_feature = self.create_deformed_feature(feature, sparse_motion)  # (bs, 1+num_kp, c=4, d=16, h=64, w=64)
+
+        # 2. (bs, 1+num_kp, d, h, w)
+        heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source)  # (bs, 1+num_kp, 1, d, h, w)
+
+        input = torch.cat([heatmap, deformed_feature], dim=2)  # (bs, 1+num_kp, c=5, d=16, h=64, w=64)
+        input = input.view(bs, -1, d, h, w)  # (bs, (1+num_kp)*c=105, d=16, h=64, w=64)
+
+        prediction = self.hourglass(input)
+
+        mask = self.mask(prediction)
+        mask = F.softmax(mask, dim=1)  # (bs, 1+num_kp, d=16, h=64, w=64)
+        out_dict['mask'] = mask
+        mask = mask.unsqueeze(2)                                   # (bs, num_kp+1, 1, d, h, w)
+        sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4)    # (bs, num_kp+1, 3, d, h, w)
+        deformation = (sparse_motion * mask).sum(dim=1)            # (bs, 3, d, h, w)  mask take effect in this place
+        deformation = deformation.permute(0, 2, 3, 4, 1)           # (bs, d, h, w, 3)
+
+        out_dict['deformation'] = deformation
+
+        if self.flag_estimate_occlusion_map:
+            bs, _, d, h, w = prediction.shape
+            prediction_reshape = prediction.view(bs, -1, h, w)
+            occlusion_map = torch.sigmoid(self.occlusion(prediction_reshape))  # Bx1x64x64
+            out_dict['occlusion_map'] = occlusion_map
+
+        return out_dict
diff --git a/src/modules/motion_extractor.py b/src/modules/motion_extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2982e53c52d9ec1e0bec0453cc05edb51a15d23
--- /dev/null
+++ b/src/modules/motion_extractor.py
@@ -0,0 +1,35 @@
+# coding: utf-8
+
+"""
+Motion extractor(M), which directly predicts the canonical keypoints, head pose and expression deformation of the input image
+"""
+
+from torch import nn
+import torch
+
+from .convnextv2 import convnextv2_tiny
+from .util import filter_state_dict
+
+model_dict = {
+    'convnextv2_tiny': convnextv2_tiny,
+}
+
+
+class MotionExtractor(nn.Module):
+    def __init__(self, **kwargs):
+        super(MotionExtractor, self).__init__()
+
+        # default is convnextv2_base
+        backbone = kwargs.get('backbone', 'convnextv2_tiny')
+        self.detector = model_dict.get(backbone)(**kwargs)
+
+    def load_pretrained(self, init_path: str):
+        if init_path not in (None, ''):
+            state_dict = torch.load(init_path, map_location=lambda storage, loc: storage)['model']
+            state_dict = filter_state_dict(state_dict, remove_name='head')
+            ret = self.detector.load_state_dict(state_dict, strict=False)
+            print(f'Load pretrained model from {init_path}, ret: {ret}')
+
+    def forward(self, x):
+        out = self.detector(x)
+        return out
diff --git a/src/modules/spade_generator.py b/src/modules/spade_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..147a9aed0c7707fe6ae3d59ce1a30154ef75afcc
--- /dev/null
+++ b/src/modules/spade_generator.py
@@ -0,0 +1,59 @@
+# coding: utf-8
+
+"""
+Spade decoder(G) defined in the paper, which input the warped feature to generate the animated image.
+"""
+
+import torch
+from torch import nn
+import torch.nn.functional as F
+from .util import SPADEResnetBlock
+
+
+class SPADEDecoder(nn.Module):
+    def __init__(self, upscale=1, max_features=256, block_expansion=64, out_channels=64, num_down_blocks=2):
+        for i in range(num_down_blocks):
+            input_channels = min(max_features, block_expansion * (2 ** (i + 1)))
+        self.upscale = upscale
+        super().__init__()
+        norm_G = 'spadespectralinstance'
+        label_num_channels = input_channels  # 256
+
+        self.fc = nn.Conv2d(input_channels, 2 * input_channels, 3, padding=1)
+        self.G_middle_0 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
+        self.G_middle_1 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
+        self.G_middle_2 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
+        self.G_middle_3 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
+        self.G_middle_4 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
+        self.G_middle_5 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
+        self.up_0 = SPADEResnetBlock(2 * input_channels, input_channels, norm_G, label_num_channels)
+        self.up_1 = SPADEResnetBlock(input_channels, out_channels, norm_G, label_num_channels)
+        self.up = nn.Upsample(scale_factor=2)
+
+        if self.upscale is None or self.upscale <= 1:
+            self.conv_img = nn.Conv2d(out_channels, 3, 3, padding=1)
+        else:
+            self.conv_img = nn.Sequential(
+                nn.Conv2d(out_channels, 3 * (2 * 2), kernel_size=3, padding=1),
+                nn.PixelShuffle(upscale_factor=2)
+            )
+
+    def forward(self, feature):
+        seg = feature  # Bx256x64x64
+        x = self.fc(feature)  # Bx512x64x64
+        x = self.G_middle_0(x, seg)
+        x = self.G_middle_1(x, seg)
+        x = self.G_middle_2(x, seg)
+        x = self.G_middle_3(x, seg)
+        x = self.G_middle_4(x, seg)
+        x = self.G_middle_5(x, seg)
+
+        x = self.up(x)  # Bx512x64x64 -> Bx512x128x128
+        x = self.up_0(x, seg)  # Bx512x128x128 -> Bx256x128x128
+        x = self.up(x)  # Bx256x128x128 -> Bx256x256x256
+        x = self.up_1(x, seg)  # Bx256x256x256 -> Bx64x256x256
+
+        x = self.conv_img(F.leaky_relu(x, 2e-1))  # Bx64x256x256 -> Bx3xHxW
+        x = torch.sigmoid(x)  # Bx3xHxW
+
+        return x
\ No newline at end of file
diff --git a/src/modules/stitching_retargeting_network.py b/src/modules/stitching_retargeting_network.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f50b7cf5a21cd71c70a7bbaaa4b6b68b4762ea3
--- /dev/null
+++ b/src/modules/stitching_retargeting_network.py
@@ -0,0 +1,38 @@
+# coding: utf-8
+
+"""
+Stitching module(S) and two retargeting modules(R) defined in the paper.
+
+- The stitching module pastes the animated portrait back into the original image space without pixel misalignment, such as in
+the stitching region.
+
+- The eyes retargeting module is designed to address the issue of incomplete eye closure during cross-id reenactment, especially
+when a person with small eyes drives a person with larger eyes.
+
+- The lip retargeting module is designed similarly to the eye retargeting module, and can also normalize the input by ensuring that
+the lips are in a closed state, which facilitates better animation driving.
+"""
+from torch import nn
+
+
+class StitchingRetargetingNetwork(nn.Module):
+    def __init__(self, input_size, hidden_sizes, output_size):
+        super(StitchingRetargetingNetwork, self).__init__()
+        layers = []
+        for i in range(len(hidden_sizes)):
+            if i == 0:
+                layers.append(nn.Linear(input_size, hidden_sizes[i]))
+            else:
+                layers.append(nn.Linear(hidden_sizes[i - 1], hidden_sizes[i]))
+            layers.append(nn.ReLU(inplace=True))
+        layers.append(nn.Linear(hidden_sizes[-1], output_size))
+        self.mlp = nn.Sequential(*layers)
+
+    def initialize_weights_to_zero(self):
+        for m in self.modules():
+            if isinstance(m, nn.Linear):
+                nn.init.zeros_(m.weight)
+                nn.init.zeros_(m.bias)
+
+    def forward(self, x):
+        return self.mlp(x)
diff --git a/src/modules/util.py b/src/modules/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..f83980b24372bee38779ceeb3349fca91735e56e
--- /dev/null
+++ b/src/modules/util.py
@@ -0,0 +1,441 @@
+# coding: utf-8
+
+"""
+This file defines various neural network modules and utility functions, including convolutional and residual blocks,
+normalizations, and functions for spatial transformation and tensor manipulation.
+"""
+
+from torch import nn
+import torch.nn.functional as F
+import torch
+import torch.nn.utils.spectral_norm as spectral_norm
+import math
+import warnings
+
+
+def kp2gaussian(kp, spatial_size, kp_variance):
+    """
+    Transform a keypoint into gaussian like representation
+    """
+    mean = kp
+
+    coordinate_grid = make_coordinate_grid(spatial_size, mean)
+    number_of_leading_dimensions = len(mean.shape) - 1
+    shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape
+    coordinate_grid = coordinate_grid.view(*shape)
+    repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 1)
+    coordinate_grid = coordinate_grid.repeat(*repeats)
+
+    # Preprocess kp shape
+    shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 3)
+    mean = mean.view(*shape)
+
+    mean_sub = (coordinate_grid - mean)
+
+    out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
+
+    return out
+
+
+def make_coordinate_grid(spatial_size, ref, **kwargs):
+    d, h, w = spatial_size
+    x = torch.arange(w).type(ref.dtype).to(ref.device)
+    y = torch.arange(h).type(ref.dtype).to(ref.device)
+    z = torch.arange(d).type(ref.dtype).to(ref.device)
+
+    # NOTE: must be right-down-in
+    x = (2 * (x / (w - 1)) - 1)  # the x axis faces to the right
+    y = (2 * (y / (h - 1)) - 1)  # the y axis faces to the bottom
+    z = (2 * (z / (d - 1)) - 1)  # the z axis faces to the inner
+
+    yy = y.view(1, -1, 1).repeat(d, 1, w)
+    xx = x.view(1, 1, -1).repeat(d, h, 1)
+    zz = z.view(-1, 1, 1).repeat(1, h, w)
+
+    meshed = torch.cat([xx.unsqueeze_(3), yy.unsqueeze_(3), zz.unsqueeze_(3)], 3)
+
+    return meshed
+
+
+class ConvT2d(nn.Module):
+    """
+    Upsampling block for use in decoder.
+    """
+
+    def __init__(self, in_features, out_features, kernel_size=3, stride=2, padding=1, output_padding=1):
+        super(ConvT2d, self).__init__()
+
+        self.convT = nn.ConvTranspose2d(in_features, out_features, kernel_size=kernel_size, stride=stride,
+                                        padding=padding, output_padding=output_padding)
+        self.norm = nn.InstanceNorm2d(out_features)
+
+    def forward(self, x):
+        out = self.convT(x)
+        out = self.norm(out)
+        out = F.leaky_relu(out)
+        return out
+
+
+class ResBlock3d(nn.Module):
+    """
+    Res block, preserve spatial resolution.
+    """
+
+    def __init__(self, in_features, kernel_size, padding):
+        super(ResBlock3d, self).__init__()
+        self.conv1 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=padding)
+        self.conv2 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=padding)
+        self.norm1 = nn.BatchNorm3d(in_features, affine=True)
+        self.norm2 = nn.BatchNorm3d(in_features, affine=True)
+
+    def forward(self, x):
+        out = self.norm1(x)
+        out = F.relu(out)
+        out = self.conv1(out)
+        out = self.norm2(out)
+        out = F.relu(out)
+        out = self.conv2(out)
+        out += x
+        return out
+
+
+class UpBlock3d(nn.Module):
+    """
+    Upsampling block for use in decoder.
+    """
+
+    def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
+        super(UpBlock3d, self).__init__()
+
+        self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
+                              padding=padding, groups=groups)
+        self.norm = nn.BatchNorm3d(out_features, affine=True)
+
+    def forward(self, x):
+        out = F.interpolate(x, scale_factor=(1, 2, 2))
+        out = self.conv(out)
+        out = self.norm(out)
+        out = F.relu(out)
+        return out
+
+
+class DownBlock2d(nn.Module):
+    """
+    Downsampling block for use in encoder.
+    """
+
+    def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
+        super(DownBlock2d, self).__init__()
+        self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, padding=padding, groups=groups)
+        self.norm = nn.BatchNorm2d(out_features, affine=True)
+        self.pool = nn.AvgPool2d(kernel_size=(2, 2))
+
+    def forward(self, x):
+        out = self.conv(x)
+        out = self.norm(out)
+        out = F.relu(out)
+        out = self.pool(out)
+        return out
+
+
+class DownBlock3d(nn.Module):
+    """
+    Downsampling block for use in encoder.
+    """
+
+    def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
+        super(DownBlock3d, self).__init__()
+        '''
+        self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
+                                padding=padding, groups=groups, stride=(1, 2, 2))
+        '''
+        self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
+                              padding=padding, groups=groups)
+        self.norm = nn.BatchNorm3d(out_features, affine=True)
+        self.pool = nn.AvgPool3d(kernel_size=(1, 2, 2))
+
+    def forward(self, x):
+        out = self.conv(x)
+        out = self.norm(out)
+        out = F.relu(out)
+        out = self.pool(out)
+        return out
+
+
+class SameBlock2d(nn.Module):
+    """
+    Simple block, preserve spatial resolution.
+    """
+
+    def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1, lrelu=False):
+        super(SameBlock2d, self).__init__()
+        self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, padding=padding, groups=groups)
+        self.norm = nn.BatchNorm2d(out_features, affine=True)
+        if lrelu:
+            self.ac = nn.LeakyReLU()
+        else:
+            self.ac = nn.ReLU()
+
+    def forward(self, x):
+        out = self.conv(x)
+        out = self.norm(out)
+        out = self.ac(out)
+        return out
+
+
+class Encoder(nn.Module):
+    """
+    Hourglass Encoder
+    """
+
+    def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
+        super(Encoder, self).__init__()
+
+        down_blocks = []
+        for i in range(num_blocks):
+            down_blocks.append(DownBlock3d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)), min(max_features, block_expansion * (2 ** (i + 1))), kernel_size=3, padding=1))
+        self.down_blocks = nn.ModuleList(down_blocks)
+
+    def forward(self, x):
+        outs = [x]
+        for down_block in self.down_blocks:
+            outs.append(down_block(outs[-1]))
+        return outs
+
+
+class Decoder(nn.Module):
+    """
+    Hourglass Decoder
+    """
+
+    def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
+        super(Decoder, self).__init__()
+
+        up_blocks = []
+
+        for i in range(num_blocks)[::-1]:
+            in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
+            out_filters = min(max_features, block_expansion * (2 ** i))
+            up_blocks.append(UpBlock3d(in_filters, out_filters, kernel_size=3, padding=1))
+
+        self.up_blocks = nn.ModuleList(up_blocks)
+        self.out_filters = block_expansion + in_features
+
+        self.conv = nn.Conv3d(in_channels=self.out_filters, out_channels=self.out_filters, kernel_size=3, padding=1)
+        self.norm = nn.BatchNorm3d(self.out_filters, affine=True)
+
+    def forward(self, x):
+        out = x.pop()
+        for up_block in self.up_blocks:
+            out = up_block(out)
+            skip = x.pop()
+            out = torch.cat([out, skip], dim=1)
+        out = self.conv(out)
+        out = self.norm(out)
+        out = F.relu(out)
+        return out
+
+
+class Hourglass(nn.Module):
+    """
+    Hourglass architecture.
+    """
+
+    def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
+        super(Hourglass, self).__init__()
+        self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
+        self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
+        self.out_filters = self.decoder.out_filters
+
+    def forward(self, x):
+        return self.decoder(self.encoder(x))
+
+
+class SPADE(nn.Module):
+    def __init__(self, norm_nc, label_nc):
+        super().__init__()
+
+        self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
+        nhidden = 128
+
+        self.mlp_shared = nn.Sequential(
+            nn.Conv2d(label_nc, nhidden, kernel_size=3, padding=1),
+            nn.ReLU())
+        self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
+        self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
+
+    def forward(self, x, segmap):
+        normalized = self.param_free_norm(x)
+        segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
+        actv = self.mlp_shared(segmap)
+        gamma = self.mlp_gamma(actv)
+        beta = self.mlp_beta(actv)
+        out = normalized * (1 + gamma) + beta
+        return out
+
+
+class SPADEResnetBlock(nn.Module):
+    def __init__(self, fin, fout, norm_G, label_nc, use_se=False, dilation=1):
+        super().__init__()
+        # Attributes
+        self.learned_shortcut = (fin != fout)
+        fmiddle = min(fin, fout)
+        self.use_se = use_se
+        # create conv layers
+        self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation)
+        self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation)
+        if self.learned_shortcut:
+            self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
+        # apply spectral norm if specified
+        if 'spectral' in norm_G:
+            self.conv_0 = spectral_norm(self.conv_0)
+            self.conv_1 = spectral_norm(self.conv_1)
+            if self.learned_shortcut:
+                self.conv_s = spectral_norm(self.conv_s)
+        # define normalization layers
+        self.norm_0 = SPADE(fin, label_nc)
+        self.norm_1 = SPADE(fmiddle, label_nc)
+        if self.learned_shortcut:
+            self.norm_s = SPADE(fin, label_nc)
+
+    def forward(self, x, seg1):
+        x_s = self.shortcut(x, seg1)
+        dx = self.conv_0(self.actvn(self.norm_0(x, seg1)))
+        dx = self.conv_1(self.actvn(self.norm_1(dx, seg1)))
+        out = x_s + dx
+        return out
+
+    def shortcut(self, x, seg1):
+        if self.learned_shortcut:
+            x_s = self.conv_s(self.norm_s(x, seg1))
+        else:
+            x_s = x
+        return x_s
+
+    def actvn(self, x):
+        return F.leaky_relu(x, 2e-1)
+
+
+def filter_state_dict(state_dict, remove_name='fc'):
+    new_state_dict = {}
+    for key in state_dict:
+        if remove_name in key:
+            continue
+        new_state_dict[key] = state_dict[key]
+    return new_state_dict
+
+
+class GRN(nn.Module):
+    """ GRN (Global Response Normalization) layer
+    """
+
+    def __init__(self, dim):
+        super().__init__()
+        self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
+        self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))
+
+    def forward(self, x):
+        Gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True)
+        Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
+        return self.gamma * (x * Nx) + self.beta + x
+
+
+class LayerNorm(nn.Module):
+    r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
+    The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
+    shape (batch_size, height, width, channels) while channels_first corresponds to inputs
+    with shape (batch_size, channels, height, width).
+    """
+
+    def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
+        super().__init__()
+        self.weight = nn.Parameter(torch.ones(normalized_shape))
+        self.bias = nn.Parameter(torch.zeros(normalized_shape))
+        self.eps = eps
+        self.data_format = data_format
+        if self.data_format not in ["channels_last", "channels_first"]:
+            raise NotImplementedError
+        self.normalized_shape = (normalized_shape, )
+
+    def forward(self, x):
+        if self.data_format == "channels_last":
+            return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
+        elif self.data_format == "channels_first":
+            u = x.mean(1, keepdim=True)
+            s = (x - u).pow(2).mean(1, keepdim=True)
+            x = (x - u) / torch.sqrt(s + self.eps)
+            x = self.weight[:, None, None] * x + self.bias[:, None, None]
+            return x
+
+
+def _no_grad_trunc_normal_(tensor, mean, std, a, b):
+    # Cut & paste from PyTorch official master until it's in a few official releases - RW
+    # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
+    def norm_cdf(x):
+        # Computes standard normal cumulative distribution function
+        return (1. + math.erf(x / math.sqrt(2.))) / 2.
+
+    if (mean < a - 2 * std) or (mean > b + 2 * std):
+        warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
+                      "The distribution of values may be incorrect.",
+                      stacklevel=2)
+
+    with torch.no_grad():
+        # Values are generated by using a truncated uniform distribution and
+        # then using the inverse CDF for the normal distribution.
+        # Get upper and lower cdf values
+        l = norm_cdf((a - mean) / std)
+        u = norm_cdf((b - mean) / std)
+
+        # Uniformly fill tensor with values from [l, u], then translate to
+        # [2l-1, 2u-1].
+        tensor.uniform_(2 * l - 1, 2 * u - 1)
+
+        # Use inverse cdf transform for normal distribution to get truncated
+        # standard normal
+        tensor.erfinv_()
+
+        # Transform to proper mean, std
+        tensor.mul_(std * math.sqrt(2.))
+        tensor.add_(mean)
+
+        # Clamp to ensure it's in the proper range
+        tensor.clamp_(min=a, max=b)
+        return tensor
+
+
+def drop_path(x, drop_prob=0., training=False, scale_by_keep=True):
+    """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+    This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
+    the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
+    changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
+    'survival rate' as the argument.
+
+    """
+    if drop_prob == 0. or not training:
+        return x
+    keep_prob = 1 - drop_prob
+    shape = (x.shape[0],) + (1,) * (x.ndim - 1)  # work with diff dim tensors, not just 2D ConvNets
+    random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
+    if keep_prob > 0.0 and scale_by_keep:
+        random_tensor.div_(keep_prob)
+    return x * random_tensor
+
+
+class DropPath(nn.Module):
+    """ Drop paths (Stochastic Depth) per sample  (when applied in main path of residual blocks).
+    """
+
+    def __init__(self, drop_prob=None, scale_by_keep=True):
+        super(DropPath, self).__init__()
+        self.drop_prob = drop_prob
+        self.scale_by_keep = scale_by_keep
+
+    def forward(self, x):
+        return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
+
+
+def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
+    return _no_grad_trunc_normal_(tensor, mean, std, a, b)
diff --git a/src/modules/warping_network.py b/src/modules/warping_network.py
new file mode 100644
index 0000000000000000000000000000000000000000..9191a197055a954272ee8ed86c5e34f3f33f9ad5
--- /dev/null
+++ b/src/modules/warping_network.py
@@ -0,0 +1,77 @@
+# coding: utf-8
+
+"""
+Warping field estimator(W) defined in the paper, which generates a warping field using the implicit
+keypoint representations x_s and x_d, and employs this flow field to warp the source feature volume f_s.
+"""
+
+from torch import nn
+import torch.nn.functional as F
+from .util import SameBlock2d
+from .dense_motion import DenseMotionNetwork
+
+
+class WarpingNetwork(nn.Module):
+    def __init__(
+        self,
+        num_kp,
+        block_expansion,
+        max_features,
+        num_down_blocks,
+        reshape_channel,
+        estimate_occlusion_map=False,
+        dense_motion_params=None,
+        **kwargs
+    ):
+        super(WarpingNetwork, self).__init__()
+
+        self.upscale = kwargs.get('upscale', 1)
+        self.flag_use_occlusion_map = kwargs.get('flag_use_occlusion_map', True)
+
+        if dense_motion_params is not None:
+            self.dense_motion_network = DenseMotionNetwork(
+                num_kp=num_kp,
+                feature_channel=reshape_channel,
+                estimate_occlusion_map=estimate_occlusion_map,
+                **dense_motion_params
+            )
+        else:
+            self.dense_motion_network = None
+
+        self.third = SameBlock2d(max_features, block_expansion * (2 ** num_down_blocks), kernel_size=(3, 3), padding=(1, 1), lrelu=True)
+        self.fourth = nn.Conv2d(in_channels=block_expansion * (2 ** num_down_blocks), out_channels=block_expansion * (2 ** num_down_blocks), kernel_size=1, stride=1)
+
+        self.estimate_occlusion_map = estimate_occlusion_map
+
+    def deform_input(self, inp, deformation):
+        return F.grid_sample(inp, deformation, align_corners=False)
+
+    def forward(self, feature_3d, kp_driving, kp_source):
+        if self.dense_motion_network is not None:
+            # Feature warper, Transforming feature representation according to deformation and occlusion
+            dense_motion = self.dense_motion_network(
+                feature=feature_3d, kp_driving=kp_driving, kp_source=kp_source
+            )
+            if 'occlusion_map' in dense_motion:
+                occlusion_map = dense_motion['occlusion_map']  # Bx1x64x64
+            else:
+                occlusion_map = None
+
+            deformation = dense_motion['deformation']  # Bx16x64x64x3
+            out = self.deform_input(feature_3d, deformation)  # Bx32x16x64x64
+
+            bs, c, d, h, w = out.shape  # Bx32x16x64x64
+            out = out.view(bs, c * d, h, w)  # -> Bx512x64x64
+            out = self.third(out)  # -> Bx256x64x64
+            out = self.fourth(out)  # -> Bx256x64x64
+
+            if self.flag_use_occlusion_map and (occlusion_map is not None):
+                out = out * occlusion_map
+
+        ret_dct = {
+            'occlusion_map': occlusion_map,
+            'deformation': deformation,
+            'out': out,
+        }
+
+        return ret_dct
diff --git a/src/utils/__init__.py b/src/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/utils/camera.py b/src/utils/camera.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3dd942697e1f00a96dc3efc75b883d98b52e525
--- /dev/null
+++ b/src/utils/camera.py
@@ -0,0 +1,73 @@
+# coding: utf-8
+
+"""
+functions for processing and transforming 3D facial keypoints
+"""
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+
+PI = np.pi
+
+
+def headpose_pred_to_degree(pred):
+    """
+    pred: (bs, 66) or (bs, 1) or others
+    """
+    if pred.ndim > 1 and pred.shape[1] == 66:
+        # NOTE: note that the average is modified to 97.5
+        device = pred.device
+        idx_tensor = [idx for idx in range(0, 66)]
+        idx_tensor = torch.FloatTensor(idx_tensor).to(device)
+        pred = F.softmax(pred, dim=1)
+        degree = torch.sum(pred*idx_tensor, axis=1) * 3 - 97.5
+
+        return degree
+
+    return pred
+
+
+def get_rotation_matrix(pitch_, yaw_, roll_):
+    """ the input is in degree
+    """
+    # transform to radian
+    pitch = pitch_ / 180 * PI
+    yaw = yaw_ / 180 * PI
+    roll = roll_ / 180 * PI
+
+    device = pitch.device
+
+    if pitch.ndim == 1:
+        pitch = pitch.unsqueeze(1)
+    if yaw.ndim == 1:
+        yaw = yaw.unsqueeze(1)
+    if roll.ndim == 1:
+        roll = roll.unsqueeze(1)
+
+    # calculate the euler matrix
+    bs = pitch.shape[0]
+    ones = torch.ones([bs, 1]).to(device)
+    zeros = torch.zeros([bs, 1]).to(device)
+    x, y, z = pitch, yaw, roll
+
+    rot_x = torch.cat([
+        ones, zeros, zeros,
+        zeros, torch.cos(x), -torch.sin(x),
+        zeros, torch.sin(x), torch.cos(x)
+    ], dim=1).reshape([bs, 3, 3])
+
+    rot_y = torch.cat([
+        torch.cos(y), zeros, torch.sin(y),
+        zeros, ones, zeros,
+        -torch.sin(y), zeros, torch.cos(y)
+    ], dim=1).reshape([bs, 3, 3])
+
+    rot_z = torch.cat([
+        torch.cos(z), -torch.sin(z), zeros,
+        torch.sin(z), torch.cos(z), zeros,
+        zeros, zeros, ones
+    ], dim=1).reshape([bs, 3, 3])
+
+    rot = rot_z @ rot_y @ rot_x
+    return rot.permute(0, 2, 1)  # transpose
diff --git a/src/utils/crop.py b/src/utils/crop.py
new file mode 100644
index 0000000000000000000000000000000000000000..065b9f0f9f25be8444b7c9bfca45652f80f5685b
--- /dev/null
+++ b/src/utils/crop.py
@@ -0,0 +1,398 @@
+# coding: utf-8
+
+"""
+cropping function and the related preprocess functions for cropping
+"""
+
+import numpy as np
+import os.path as osp
+from math import sin, cos, acos, degrees
+import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False) # NOTE: enforce single thread
+from .rprint import rprint as print
+
+DTYPE = np.float32
+CV2_INTERP = cv2.INTER_LINEAR
+
+def make_abs_path(fn):
+    return osp.join(osp.dirname(osp.realpath(__file__)), fn)
+
+def _transform_img(img, M, dsize, flags=CV2_INTERP, borderMode=None):
+    """ conduct similarity or affine transformation to the image, do not do border operation!
+    img:
+    M: 2x3 matrix or 3x3 matrix
+    dsize: target shape (width, height)
+    """
+    if isinstance(dsize, tuple) or isinstance(dsize, list):
+        _dsize = tuple(dsize)
+    else:
+        _dsize = (dsize, dsize)
+
+    if borderMode is not None:
+        return cv2.warpAffine(img, M[:2, :], dsize=_dsize, flags=flags, borderMode=borderMode, borderValue=(0, 0, 0))
+    else:
+        return cv2.warpAffine(img, M[:2, :], dsize=_dsize, flags=flags)
+
+
+def _transform_pts(pts, M):
+    """ conduct similarity or affine transformation to the pts
+    pts: Nx2 ndarray
+    M: 2x3 matrix or 3x3 matrix
+    return: Nx2
+    """
+    return pts @ M[:2, :2].T + M[:2, 2]
+
+
+def parse_pt2_from_pt101(pt101, use_lip=True):
+    """
+    parsing the 2 points according to the 101 points, which cancels the roll
+    """
+    # the former version use the eye center, but it is not robust, now use interpolation
+    pt_left_eye = np.mean(pt101[[39, 42, 45, 48]], axis=0)  # left eye center
+    pt_right_eye = np.mean(pt101[[51, 54, 57, 60]], axis=0)  # right eye center
+
+    if use_lip:
+        # use lip
+        pt_center_eye = (pt_left_eye + pt_right_eye) / 2
+        pt_center_lip = (pt101[75] + pt101[81]) / 2
+        pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
+    else:
+        pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
+    return pt2
+
+
+def parse_pt2_from_pt106(pt106, use_lip=True):
+    """
+    parsing the 2 points according to the 106 points, which cancels the roll
+    """
+    pt_left_eye = np.mean(pt106[[33, 35, 40, 39]], axis=0)  # left eye center
+    pt_right_eye = np.mean(pt106[[87, 89, 94, 93]], axis=0)  # right eye center
+
+    if use_lip:
+        # use lip
+        pt_center_eye = (pt_left_eye + pt_right_eye) / 2
+        pt_center_lip = (pt106[52] + pt106[61]) / 2
+        pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
+    else:
+        pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
+    return pt2
+
+
+def parse_pt2_from_pt203(pt203, use_lip=True):
+    """
+    parsing the 2 points according to the 203 points, which cancels the roll
+    """
+    pt_left_eye = np.mean(pt203[[0, 6, 12, 18]], axis=0)  # left eye center
+    pt_right_eye = np.mean(pt203[[24, 30, 36, 42]], axis=0)  # right eye center
+    if use_lip:
+        # use lip
+        pt_center_eye = (pt_left_eye + pt_right_eye) / 2
+        pt_center_lip = (pt203[48] + pt203[66]) / 2
+        pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
+    else:
+        pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
+    return pt2
+
+
+def parse_pt2_from_pt68(pt68, use_lip=True):
+    """
+    parsing the 2 points according to the 68 points, which cancels the roll
+    """
+    lm_idx = np.array([31, 37, 40, 43, 46, 49, 55], dtype=np.int32) - 1
+    if use_lip:
+        pt5 = np.stack([
+            np.mean(pt68[lm_idx[[1, 2]], :], 0),  # left eye
+            np.mean(pt68[lm_idx[[3, 4]], :], 0),  # right eye
+            pt68[lm_idx[0], :],  # nose
+            pt68[lm_idx[5], :],  # lip
+            pt68[lm_idx[6], :]   # lip
+        ], axis=0)
+
+        pt2 = np.stack([
+            (pt5[0] + pt5[1]) / 2,
+            (pt5[3] + pt5[4]) / 2
+        ], axis=0)
+    else:
+        pt2 = np.stack([
+            np.mean(pt68[lm_idx[[1, 2]], :], 0),  # left eye
+            np.mean(pt68[lm_idx[[3, 4]], :], 0),  # right eye
+        ], axis=0)
+
+    return pt2
+
+
+def parse_pt2_from_pt5(pt5, use_lip=True):
+    """
+    parsing the 2 points according to the 5 points, which cancels the roll
+    """
+    if use_lip:
+        pt2 = np.stack([
+            (pt5[0] + pt5[1]) / 2,
+            (pt5[3] + pt5[4]) / 2
+        ], axis=0)
+    else:
+        pt2 = np.stack([
+            pt5[0],
+            pt5[1]
+        ], axis=0)
+    return pt2
+
+
+def parse_pt2_from_pt_x(pts, use_lip=True):
+    if pts.shape[0] == 101:
+        pt2 = parse_pt2_from_pt101(pts, use_lip=use_lip)
+    elif pts.shape[0] == 106:
+        pt2 = parse_pt2_from_pt106(pts, use_lip=use_lip)
+    elif pts.shape[0] == 68:
+        pt2 = parse_pt2_from_pt68(pts, use_lip=use_lip)
+    elif pts.shape[0] == 5:
+        pt2 = parse_pt2_from_pt5(pts, use_lip=use_lip)
+    elif pts.shape[0] == 203:
+        pt2 = parse_pt2_from_pt203(pts, use_lip=use_lip)
+    elif pts.shape[0] > 101:
+        # take the first 101 points
+        pt2 = parse_pt2_from_pt101(pts[:101], use_lip=use_lip)
+    else:
+        raise Exception(f'Unknow shape: {pts.shape}')
+
+    if not use_lip:
+        # NOTE: to compile with the latter code, need to rotate the pt2 90 degrees clockwise manually
+        v = pt2[1] - pt2[0]
+        pt2[1, 0] = pt2[0, 0] - v[1]
+        pt2[1, 1] = pt2[0, 1] + v[0]
+
+    return pt2
+
+
+def parse_rect_from_landmark(
+    pts,
+    scale=1.5,
+    need_square=True,
+    vx_ratio=0,
+    vy_ratio=0,
+    use_deg_flag=False,
+    **kwargs
+):
+    """parsing center, size, angle from 101/68/5/x landmarks
+    vx_ratio: the offset ratio along the pupil axis x-axis, multiplied by size
+    vy_ratio: the offset ratio along the pupil axis y-axis, multiplied by size, which is used to contain more forehead area
+
+    judge with pts.shape
+    """
+    pt2 = parse_pt2_from_pt_x(pts, use_lip=kwargs.get('use_lip', True))
+
+    uy = pt2[1] - pt2[0]
+    l = np.linalg.norm(uy)
+    if l <= 1e-3:
+        uy = np.array([0, 1], dtype=DTYPE)
+    else:
+        uy /= l
+    ux = np.array((uy[1], -uy[0]), dtype=DTYPE)
+
+    # the rotation degree of the x-axis, the clockwise is positive, the counterclockwise is negative (image coordinate system)
+    # print(uy)
+    # print(ux)
+    angle = acos(ux[0])
+    if ux[1] < 0:
+        angle = -angle
+
+    # rotation matrix
+    M = np.array([ux, uy])
+
+    # calculate the size which contains the angle degree of the bbox, and the center
+    center0 = np.mean(pts, axis=0)
+    rpts = (pts - center0) @ M.T  # (M @ P.T).T = P @ M.T
+    lt_pt = np.min(rpts, axis=0)
+    rb_pt = np.max(rpts, axis=0)
+    center1 = (lt_pt + rb_pt) / 2
+
+    size = rb_pt - lt_pt
+    if need_square:
+        m = max(size[0], size[1])
+        size[0] = m
+        size[1] = m
+
+    size *= scale  # scale size
+    center = center0 + ux * center1[0] + uy * center1[1]  # counterclockwise rotation, equivalent to M.T @ center1.T
+    center = center + ux * (vx_ratio * size) + uy * \
+        (vy_ratio * size)  # considering the offset in vx and vy direction
+
+    if use_deg_flag:
+        angle = degrees(angle)
+
+    return center, size, angle
+
+
+def parse_bbox_from_landmark(pts, **kwargs):
+    center, size, angle = parse_rect_from_landmark(pts, **kwargs)
+    cx, cy = center
+    w, h = size
+
+    # calculate the vertex positions before rotation
+    bbox = np.array([
+        [cx-w/2, cy-h/2],  # left, top
+        [cx+w/2, cy-h/2],
+        [cx+w/2, cy+h/2],  # right, bottom
+        [cx-w/2, cy+h/2]
+    ], dtype=DTYPE)
+
+    # construct rotation matrix
+    bbox_rot = bbox.copy()
+    R = np.array([
+        [np.cos(angle), -np.sin(angle)],
+        [np.sin(angle),  np.cos(angle)]
+    ], dtype=DTYPE)
+
+    # calculate the relative position of each vertex from the rotation center, then rotate these positions, and finally add the coordinates of the rotation center
+    bbox_rot = (bbox_rot - center) @ R.T + center
+
+    return {
+        'center': center,  # 2x1
+        'size': size,  # scalar
+        'angle': angle,  # rad, counterclockwise
+        'bbox': bbox,  # 4x2
+        'bbox_rot': bbox_rot,  # 4x2
+    }
+
+
+def crop_image_by_bbox(img, bbox, lmk=None, dsize=512, angle=None, flag_rot=False, **kwargs):
+    left, top, right, bot = bbox
+    if int(right - left) != int(bot - top):
+        print(f'right-left {right-left} != bot-top {bot-top}')
+    size = right - left
+
+    src_center = np.array([(left + right) / 2, (top + bot) / 2], dtype=DTYPE)
+    tgt_center = np.array([dsize / 2, dsize / 2], dtype=DTYPE)
+
+    s = dsize / size  # scale
+    if flag_rot and angle is not None:
+        costheta, sintheta = cos(angle), sin(angle)
+        cx, cy = src_center[0], src_center[1]  # ori center
+        tcx, tcy = tgt_center[0], tgt_center[1]  # target center
+        # need to infer
+        M_o2c = np.array(
+            [[s * costheta, s * sintheta, tcx - s * (costheta * cx + sintheta * cy)],
+             [-s * sintheta, s * costheta, tcy - s * (-sintheta * cx + costheta * cy)]],
+            dtype=DTYPE
+        )
+    else:
+        M_o2c = np.array(
+            [[s, 0, tgt_center[0] - s * src_center[0]],
+             [0, s, tgt_center[1] - s * src_center[1]]],
+            dtype=DTYPE
+        )
+
+    # if flag_rot and angle is None:
+        # print('angle is None, but flag_rotate is True', style="bold yellow")
+
+    img_crop = _transform_img(img, M_o2c, dsize=dsize, borderMode=kwargs.get('borderMode', None))
+    lmk_crop = _transform_pts(lmk, M_o2c) if lmk is not None else None
+
+    M_o2c = np.vstack([M_o2c, np.array([0, 0, 1], dtype=DTYPE)])
+    M_c2o = np.linalg.inv(M_o2c)
+
+    # cv2.imwrite('crop.jpg', img_crop)
+
+    return {
+        'img_crop': img_crop,
+        'lmk_crop': lmk_crop,
+        'M_o2c': M_o2c,
+        'M_c2o': M_c2o,
+    }
+
+
+def _estimate_similar_transform_from_pts(
+    pts,
+    dsize,
+    scale=1.5,
+    vx_ratio=0,
+    vy_ratio=-0.1,
+    flag_do_rot=True,
+    **kwargs
+):
+    """ calculate the affine matrix of the cropped image from sparse points, the original image to the cropped image, the inverse is the cropped image to the original image
+    pts: landmark, 101 or 68 points or other points, Nx2
+    scale: the larger scale factor, the smaller face ratio
+    vx_ratio: x shift
+    vy_ratio: y shift, the smaller the y shift, the lower the face region
+    rot_flag: if it is true, conduct correction
+    """
+    center, size, angle = parse_rect_from_landmark(
+        pts, scale=scale, vx_ratio=vx_ratio, vy_ratio=vy_ratio,
+        use_lip=kwargs.get('use_lip', True)
+    )
+
+    s = dsize / size[0]  # scale
+    tgt_center = np.array([dsize / 2, dsize / 2], dtype=DTYPE)  # center of dsize
+
+    if flag_do_rot:
+        costheta, sintheta = cos(angle), sin(angle)
+        cx, cy = center[0], center[1]  # ori center
+        tcx, tcy = tgt_center[0], tgt_center[1]  # target center
+        # need to infer
+        M_INV = np.array(
+            [[s * costheta, s * sintheta, tcx - s * (costheta * cx + sintheta * cy)],
+             [-s * sintheta, s * costheta, tcy - s * (-sintheta * cx + costheta * cy)]],
+            dtype=DTYPE
+        )
+    else:
+        M_INV = np.array(
+            [[s, 0, tgt_center[0] - s * center[0]],
+             [0, s, tgt_center[1] - s * center[1]]],
+            dtype=DTYPE
+        )
+
+    M_INV_H = np.vstack([M_INV, np.array([0, 0, 1])])
+    M = np.linalg.inv(M_INV_H)
+
+    # M_INV is from the original image to the cropped image, M is from the cropped image to the original image
+    return M_INV, M[:2, ...]
+
+
+def crop_image(img, pts: np.ndarray, **kwargs):
+    dsize = kwargs.get('dsize', 224)
+    scale = kwargs.get('scale', 1.5)  # 1.5 | 1.6
+    vy_ratio = kwargs.get('vy_ratio', -0.1)  # -0.0625 | -0.1
+
+    M_INV, _ = _estimate_similar_transform_from_pts(
+        pts,
+        dsize=dsize,
+        scale=scale,
+        vy_ratio=vy_ratio,
+        flag_do_rot=kwargs.get('flag_do_rot', True),
+    )
+
+    img_crop = _transform_img(img, M_INV, dsize)  # origin to crop
+    pt_crop = _transform_pts(pts, M_INV)
+
+    M_o2c = np.vstack([M_INV, np.array([0, 0, 1], dtype=DTYPE)])
+    M_c2o = np.linalg.inv(M_o2c)
+
+    ret_dct = {
+        'M_o2c': M_o2c,  # from the original image to the cropped image 3x3
+        'M_c2o': M_c2o,  # from the cropped image to the original image 3x3
+        'img_crop': img_crop,  # the cropped image
+        'pt_crop': pt_crop,  # the landmarks of the cropped image
+    }
+
+    return ret_dct
+
+def average_bbox_lst(bbox_lst):
+    if len(bbox_lst) == 0:
+        return None
+    bbox_arr = np.array(bbox_lst)
+    return np.mean(bbox_arr, axis=0).tolist()
+
+def prepare_paste_back(mask_crop, crop_M_c2o, dsize):
+    """prepare mask for later image paste back
+    """
+    mask_ori = _transform_img(mask_crop, crop_M_c2o, dsize)
+    mask_ori = mask_ori.astype(np.float32) / 255.
+    return mask_ori
+
+def paste_back(img_crop, M_c2o, img_ori, mask_ori):
+    """paste back the image
+    """
+    dsize = (img_ori.shape[1], img_ori.shape[0])
+    result = _transform_img(img_crop, M_c2o, dsize=dsize)
+    result = np.clip(mask_ori * result + (1 - mask_ori) * img_ori, 0, 255).astype(np.uint8)
+    return result
diff --git a/src/utils/cropper.py b/src/utils/cropper.py
new file mode 100644
index 0000000000000000000000000000000000000000..916d33b42106b26d3d47691d8c3484f8f295db85
--- /dev/null
+++ b/src/utils/cropper.py
@@ -0,0 +1,196 @@
+# coding: utf-8
+
+import os.path as osp
+from dataclasses import dataclass, field
+from typing import List, Tuple, Union
+
+import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False)
+import numpy as np
+
+from ..config.crop_config import CropConfig
+from .crop import (
+    average_bbox_lst,
+    crop_image,
+    crop_image_by_bbox,
+    parse_bbox_from_landmark,
+)
+from .io import contiguous
+from .rprint import rlog as log
+from .face_analysis_diy import FaceAnalysisDIY
+from .landmark_runner import LandmarkRunner
+
+
+def make_abs_path(fn):
+    return osp.join(osp.dirname(osp.realpath(__file__)), fn)
+
+
+@dataclass
+class Trajectory:
+    start: int = -1  # start frame
+    end: int = -1  # end frame
+    lmk_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list)  # lmk list
+    bbox_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list)  # bbox list
+
+    frame_rgb_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list)  # frame list
+    lmk_crop_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list)  # lmk list
+    frame_rgb_crop_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list)  # frame crop list
+
+
+class Cropper(object):
+    def __init__(self, **kwargs) -> None:
+        self.crop_cfg: CropConfig = kwargs.get("crop_cfg", None)
+        device_id = kwargs.get("device_id", 0)
+        flag_force_cpu = kwargs.get("flag_force_cpu", False)
+        if flag_force_cpu:
+            device = "cpu"
+            face_analysis_wrapper_provicer = ["CPUExecutionProvider"]
+        else:
+            device = "cuda"
+            face_analysis_wrapper_provicer = ["CUDAExecutionProvider"]
+        self.landmark_runner = LandmarkRunner(
+            ckpt_path=make_abs_path(self.crop_cfg.landmark_ckpt_path),
+            onnx_provider=device,
+            device_id=device_id,
+        )
+        self.landmark_runner.warmup()
+
+        self.face_analysis_wrapper = FaceAnalysisDIY(
+            name="buffalo_l",
+            root=make_abs_path(self.crop_cfg.insightface_root),
+            providers=face_analysis_wrapper_provicer,
+        )
+        self.face_analysis_wrapper.prepare(ctx_id=device_id, det_size=(512, 512))
+        self.face_analysis_wrapper.warmup()
+
+    def update_config(self, user_args):
+        for k, v in user_args.items():
+            if hasattr(self.crop_cfg, k):
+                setattr(self.crop_cfg, k, v)
+
+    def crop_source_image(self, img_rgb_: np.ndarray, crop_cfg: CropConfig):
+        # crop a source image and get neccessary information
+        img_rgb = img_rgb_.copy()  # copy it
+
+        img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
+        src_face = self.face_analysis_wrapper.get(
+            img_bgr,
+            flag_do_landmark_2d_106=True,
+            direction=crop_cfg.direction,
+            max_face_num=crop_cfg.max_face_num,
+        )
+
+        if len(src_face) == 0:
+            log("No face detected in the source image.")
+            return None
+        elif len(src_face) > 1:
+            log(f"More than one face detected in the image, only pick one face by rule {crop_cfg.direction}.")
+
+        # NOTE: temporarily only pick the first face, to support multiple face in the future
+        src_face = src_face[0]
+        lmk = src_face.landmark_2d_106  # this is the 106 landmarks from insightface
+
+        # crop the face
+        ret_dct = crop_image(
+            img_rgb,  # ndarray
+            lmk,  # 106x2 or Nx2
+            dsize=crop_cfg.dsize,
+            scale=crop_cfg.scale,
+            vx_ratio=crop_cfg.vx_ratio,
+            vy_ratio=crop_cfg.vy_ratio,
+        )
+
+        lmk = self.landmark_runner.run(img_rgb, lmk)
+        ret_dct["lmk_crop"] = lmk
+
+        # update a 256x256 version for network input
+        ret_dct["img_crop_256x256"] = cv2.resize(ret_dct["img_crop"], (256, 256), interpolation=cv2.INTER_AREA)
+        ret_dct["lmk_crop_256x256"] = ret_dct["lmk_crop"] * 256 / crop_cfg.dsize
+
+        return ret_dct
+
+    def crop_driving_video(self, driving_rgb_lst, **kwargs):
+        """Tracking based landmarks/alignment and cropping"""
+        trajectory = Trajectory()
+        direction = kwargs.get("direction", "large-small")
+        for idx, frame_rgb in enumerate(driving_rgb_lst):
+            if idx == 0 or trajectory.start == -1:
+                src_face = self.face_analysis_wrapper.get(
+                    contiguous(frame_rgb[..., ::-1]),
+                    flag_do_landmark_2d_106=True,
+                    direction=direction,
+                )
+                if len(src_face) == 0:
+                    log(f"No face detected in the frame #{idx}")
+                    continue
+                elif len(src_face) > 1:
+                    log(f"More than one face detected in the driving frame_{idx}, only pick one face by rule {direction}.")
+                src_face = src_face[0]
+                lmk = src_face.landmark_2d_106
+                lmk = self.landmark_runner.run(frame_rgb, lmk)
+                trajectory.start, trajectory.end = idx, idx
+            else:
+                lmk = self.landmark_runner.run(frame_rgb, trajectory.lmk_lst[-1])
+                trajectory.end = idx
+
+            trajectory.lmk_lst.append(lmk)
+            ret_bbox = parse_bbox_from_landmark(
+                lmk,
+                scale=self.crop_cfg.scale_crop_video,
+                vx_ratio_crop_video=self.crop_cfg.vx_ratio_crop_video,
+                vy_ratio=self.crop_cfg.vy_ratio_crop_video,
+            )["bbox"]
+            bbox = [
+                ret_bbox[0, 0],
+                ret_bbox[0, 1],
+                ret_bbox[2, 0],
+                ret_bbox[2, 1],
+            ]  # 4,
+            trajectory.bbox_lst.append(bbox)  # bbox
+            trajectory.frame_rgb_lst.append(frame_rgb)
+
+        global_bbox = average_bbox_lst(trajectory.bbox_lst)
+
+        for idx, (frame_rgb, lmk) in enumerate(zip(trajectory.frame_rgb_lst, trajectory.lmk_lst)):
+            ret_dct = crop_image_by_bbox(
+                frame_rgb,
+                global_bbox,
+                lmk=lmk,
+                dsize=kwargs.get("dsize", 512),
+                flag_rot=False,
+                borderValue=(0, 0, 0),
+            )
+            trajectory.frame_rgb_crop_lst.append(ret_dct["img_crop"])
+            trajectory.lmk_crop_lst.append(ret_dct["lmk_crop"])
+
+        return {
+            "frame_crop_lst": trajectory.frame_rgb_crop_lst,
+            "lmk_crop_lst": trajectory.lmk_crop_lst,
+        }
+
+    def calc_lmks_from_cropped_video(self, driving_rgb_crop_lst, **kwargs):
+        """Tracking based landmarks/alignment"""
+        trajectory = Trajectory()
+        direction = kwargs.get("direction", "large-small")
+
+        for idx, frame_rgb_crop in enumerate(driving_rgb_crop_lst):
+            if idx == 0 or trajectory.start == -1:
+                src_face = self.face_analysis_wrapper.get(
+                    contiguous(frame_rgb_crop[..., ::-1]),  # convert to BGR
+                    flag_do_landmark_2d_106=True,
+                    direction=direction,
+                )
+                if len(src_face) == 0:
+                    log(f"No face detected in the frame #{idx}")
+                    raise Exception(f"No face detected in the frame #{idx}")
+                elif len(src_face) > 1:
+                    log(f"More than one face detected in the driving frame_{idx}, only pick one face by rule {direction}.")
+                src_face = src_face[0]
+                lmk = src_face.landmark_2d_106
+                lmk = self.landmark_runner.run(frame_rgb_crop, lmk)
+                trajectory.start, trajectory.end = idx, idx
+            else:
+                lmk = self.landmark_runner.run(frame_rgb_crop, trajectory.lmk_lst[-1])
+                trajectory.end = idx
+
+            trajectory.lmk_lst.append(lmk)
+        return trajectory.lmk_lst
diff --git a/src/utils/dependencies/insightface/__init__.py b/src/utils/dependencies/insightface/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1680083da47850b31da10803c7d255e67dda619a
--- /dev/null
+++ b/src/utils/dependencies/insightface/__init__.py
@@ -0,0 +1,20 @@
+# coding: utf-8
+# pylint: disable=wrong-import-position
+"""InsightFace: A Face Analysis Toolkit."""
+from __future__ import absolute_import
+
+try:
+    #import mxnet as mx
+    import onnxruntime
+except ImportError:
+    raise ImportError(
+        "Unable to import dependency onnxruntime. "
+    )
+
+__version__ = '0.7.3'
+
+from . import model_zoo
+from . import utils
+from . import app
+from . import data
+
diff --git a/src/utils/dependencies/insightface/app/__init__.py b/src/utils/dependencies/insightface/app/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc574616885290489798bac5c682e7aaa65a5dad
--- /dev/null
+++ b/src/utils/dependencies/insightface/app/__init__.py
@@ -0,0 +1 @@
+from .face_analysis import *
diff --git a/src/utils/dependencies/insightface/app/common.py b/src/utils/dependencies/insightface/app/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..82ca987aeede35510b3aef72b4edf2390ad84e65
--- /dev/null
+++ b/src/utils/dependencies/insightface/app/common.py
@@ -0,0 +1,49 @@
+import numpy as np
+from numpy.linalg import norm as l2norm
+#from easydict import EasyDict
+
+class Face(dict):
+
+    def __init__(self, d=None, **kwargs):
+        if d is None:
+            d = {}
+        if kwargs:
+            d.update(**kwargs)
+        for k, v in d.items():
+            setattr(self, k, v)
+        # Class attributes
+        #for k in self.__class__.__dict__.keys():
+        #    if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
+        #        setattr(self, k, getattr(self, k))
+
+    def __setattr__(self, name, value):
+        if isinstance(value, (list, tuple)):
+            value = [self.__class__(x)
+                    if isinstance(x, dict) else x for x in value]
+        elif isinstance(value, dict) and not isinstance(value, self.__class__):
+            value = self.__class__(value)
+        super(Face, self).__setattr__(name, value)
+        super(Face, self).__setitem__(name, value)
+
+    __setitem__ = __setattr__
+
+    def __getattr__(self, name):
+        return None
+
+    @property
+    def embedding_norm(self):
+        if self.embedding is None:
+            return None
+        return l2norm(self.embedding)
+
+    @property 
+    def normed_embedding(self):
+        if self.embedding is None:
+            return None
+        return self.embedding / self.embedding_norm
+
+    @property 
+    def sex(self):
+        if self.gender is None:
+            return None
+        return 'M' if self.gender==1 else 'F'
diff --git a/src/utils/dependencies/insightface/app/face_analysis.py b/src/utils/dependencies/insightface/app/face_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa5128b3f5e02c2c19e7df195cc1c1e7fcf36c4d
--- /dev/null
+++ b/src/utils/dependencies/insightface/app/face_analysis.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+# @Organization  : insightface.ai
+# @Author        : Jia Guo
+# @Time          : 2021-05-04
+# @Function      :
+
+
+from __future__ import division
+
+import glob
+import os.path as osp
+
+import numpy as np
+import onnxruntime
+from numpy.linalg import norm
+
+from ..model_zoo import model_zoo
+from ..utils import ensure_available
+from .common import Face
+
+
+DEFAULT_MP_NAME = 'buffalo_l'
+__all__ = ['FaceAnalysis']
+
+class FaceAnalysis:
+    def __init__(self, name=DEFAULT_MP_NAME, root='~/.insightface', allowed_modules=None, **kwargs):
+        onnxruntime.set_default_logger_severity(3)
+        self.models = {}
+        self.model_dir = ensure_available('models', name, root=root)
+        onnx_files = glob.glob(osp.join(self.model_dir, '*.onnx'))
+        onnx_files = sorted(onnx_files)
+        for onnx_file in onnx_files:
+            model = model_zoo.get_model(onnx_file, **kwargs)
+            if model is None:
+                print('model not recognized:', onnx_file)
+            elif allowed_modules is not None and model.taskname not in allowed_modules:
+                print('model ignore:', onnx_file, model.taskname)
+                del model
+            elif model.taskname not in self.models and (allowed_modules is None or model.taskname in allowed_modules):
+                # print('find model:', onnx_file, model.taskname, model.input_shape, model.input_mean, model.input_std)
+                self.models[model.taskname] = model
+            else:
+                print('duplicated model task type, ignore:', onnx_file, model.taskname)
+                del model
+        assert 'detection' in self.models
+        self.det_model = self.models['detection']
+
+
+    def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)):
+        self.det_thresh = det_thresh
+        assert det_size is not None
+        # print('set det-size:', det_size)
+        self.det_size = det_size
+        for taskname, model in self.models.items():
+            if taskname=='detection':
+                model.prepare(ctx_id, input_size=det_size, det_thresh=det_thresh)
+            else:
+                model.prepare(ctx_id)
+
+    def get(self, img, max_num=0):
+        bboxes, kpss = self.det_model.detect(img,
+                                             max_num=max_num,
+                                             metric='default')
+        if bboxes.shape[0] == 0:
+            return []
+        ret = []
+        for i in range(bboxes.shape[0]):
+            bbox = bboxes[i, 0:4]
+            det_score = bboxes[i, 4]
+            kps = None
+            if kpss is not None:
+                kps = kpss[i]
+            face = Face(bbox=bbox, kps=kps, det_score=det_score)
+            for taskname, model in self.models.items():
+                if taskname=='detection':
+                    continue
+                model.get(img, face)
+            ret.append(face)
+        return ret
+
+    def draw_on(self, img, faces):
+        import cv2
+        dimg = img.copy()
+        for i in range(len(faces)):
+            face = faces[i]
+            box = face.bbox.astype(np.int)
+            color = (0, 0, 255)
+            cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2)
+            if face.kps is not None:
+                kps = face.kps.astype(np.int)
+                #print(landmark.shape)
+                for l in range(kps.shape[0]):
+                    color = (0, 0, 255)
+                    if l == 0 or l == 3:
+                        color = (0, 255, 0)
+                    cv2.circle(dimg, (kps[l][0], kps[l][1]), 1, color,
+                               2)
+            if face.gender is not None and face.age is not None:
+                cv2.putText(dimg,'%s,%d'%(face.sex,face.age), (box[0]-1, box[1]-4),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,255,0),1)
+
+            #for key, value in face.items():
+            #    if key.startswith('landmark_3d'):
+            #        print(key, value.shape)
+            #        print(value[0:10,:])
+            #        lmk = np.round(value).astype(np.int)
+            #        for l in range(lmk.shape[0]):
+            #            color = (255, 0, 0)
+            #            cv2.circle(dimg, (lmk[l][0], lmk[l][1]), 1, color,
+            #                       2)
+        return dimg
diff --git a/src/utils/dependencies/insightface/data/__init__.py b/src/utils/dependencies/insightface/data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..665c59ec99b6ebf12822015e0350969c7903e243
--- /dev/null
+++ b/src/utils/dependencies/insightface/data/__init__.py
@@ -0,0 +1,2 @@
+from .image import get_image
+from .pickle_object import get_object
diff --git a/src/utils/dependencies/insightface/data/image.py b/src/utils/dependencies/insightface/data/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d32c4bcb1b13d33bcb0d840cf7b8c08d183b3ea
--- /dev/null
+++ b/src/utils/dependencies/insightface/data/image.py
@@ -0,0 +1,27 @@
+import cv2
+import os
+import os.path as osp
+from pathlib import Path
+
+class ImageCache:
+    data = {}
+
+def get_image(name, to_rgb=False):
+    key = (name, to_rgb)
+    if key in ImageCache.data:
+        return ImageCache.data[key]
+    images_dir = osp.join(Path(__file__).parent.absolute(), 'images')
+    ext_names = ['.jpg', '.png', '.jpeg']
+    image_file = None
+    for ext_name in ext_names:
+        _image_file = osp.join(images_dir, "%s%s"%(name, ext_name))
+        if osp.exists(_image_file):
+            image_file = _image_file
+            break
+    assert image_file is not None, '%s not found'%name
+    img = cv2.imread(image_file)
+    if to_rgb:
+        img = img[:,:,::-1]
+    ImageCache.data[key] = img
+    return img
+
diff --git a/src/utils/dependencies/insightface/data/images/Tom_Hanks_54745.png b/src/utils/dependencies/insightface/data/images/Tom_Hanks_54745.png
new file mode 100644
index 0000000000000000000000000000000000000000..906315d13fa29bb3a5ded3e162592f2c7f041b23
Binary files /dev/null and b/src/utils/dependencies/insightface/data/images/Tom_Hanks_54745.png differ
diff --git a/src/utils/dependencies/insightface/data/images/mask_black.jpg b/src/utils/dependencies/insightface/data/images/mask_black.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0eab0df555c23f1e033537fe39f3c0c8303dd369
Binary files /dev/null and b/src/utils/dependencies/insightface/data/images/mask_black.jpg differ
diff --git a/src/utils/dependencies/insightface/data/images/mask_blue.jpg b/src/utils/dependencies/insightface/data/images/mask_blue.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f71336b9a0d3038ebd84e6995ebfbe54946fcbb4
Binary files /dev/null and b/src/utils/dependencies/insightface/data/images/mask_blue.jpg differ
diff --git a/src/utils/dependencies/insightface/data/images/mask_green.jpg b/src/utils/dependencies/insightface/data/images/mask_green.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ac2ad55f4fc580c915dfa4c157ca3bfc84e453f4
Binary files /dev/null and b/src/utils/dependencies/insightface/data/images/mask_green.jpg differ
diff --git a/src/utils/dependencies/insightface/data/images/mask_white.jpg b/src/utils/dependencies/insightface/data/images/mask_white.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..2148ab2d09fdee6e3f59315470e98ecfc54339e4
Binary files /dev/null and b/src/utils/dependencies/insightface/data/images/mask_white.jpg differ
diff --git a/src/utils/dependencies/insightface/data/images/t1.jpg b/src/utils/dependencies/insightface/data/images/t1.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8fd6427a177bd01650c0150e9d02457c3a5dcddd
--- /dev/null
+++ b/src/utils/dependencies/insightface/data/images/t1.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:47f682e945b659f93a9e490b9c9c4a2a864abe64dace9e1a2893845ddfd69489
+size 128824
diff --git a/src/utils/dependencies/insightface/data/objects/meanshape_68.pkl b/src/utils/dependencies/insightface/data/objects/meanshape_68.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..d5297e9e8ea5574298ddd287b058252e03aa18c1
--- /dev/null
+++ b/src/utils/dependencies/insightface/data/objects/meanshape_68.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39ffecf84ba73f0d0d7e49380833ba88713c9fcdec51df4f7ac45a48b8f4cc51
+size 974
diff --git a/src/utils/dependencies/insightface/data/pickle_object.py b/src/utils/dependencies/insightface/data/pickle_object.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbd87030ea15e1d01af1cd4cff1be2bc54cc82dd
--- /dev/null
+++ b/src/utils/dependencies/insightface/data/pickle_object.py
@@ -0,0 +1,17 @@
+import cv2
+import os
+import os.path as osp
+from pathlib import Path
+import pickle
+
+def get_object(name):
+    objects_dir = osp.join(Path(__file__).parent.absolute(), 'objects')
+    if not name.endswith('.pkl'):
+        name = name+".pkl"
+    filepath = osp.join(objects_dir, name)
+    if not osp.exists(filepath):
+        return None
+    with open(filepath, 'rb') as f:
+        obj = pickle.load(f)
+    return obj
+
diff --git a/src/utils/dependencies/insightface/data/rec_builder.py b/src/utils/dependencies/insightface/data/rec_builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..e02abc969da2f882639326f5bad3c7e8d08c1fde
--- /dev/null
+++ b/src/utils/dependencies/insightface/data/rec_builder.py
@@ -0,0 +1,71 @@
+import pickle
+import numpy as np
+import os
+import os.path as osp
+import sys
+import mxnet as mx
+
+
+class RecBuilder():
+    def __init__(self, path, image_size=(112, 112)):
+        self.path = path
+        self.image_size = image_size
+        self.widx = 0
+        self.wlabel = 0
+        self.max_label = -1
+        assert not osp.exists(path), '%s exists' % path
+        os.makedirs(path)
+        self.writer = mx.recordio.MXIndexedRecordIO(os.path.join(path, 'train.idx'), 
+                                                    os.path.join(path, 'train.rec'),
+                                                    'w')
+        self.meta = []
+
+    def add(self, imgs):
+        #!!! img should be BGR!!!!
+        #assert label >= 0
+        #assert label > self.last_label
+        assert len(imgs) > 0
+        label = self.wlabel
+        for img in imgs:
+            idx = self.widx
+            image_meta = {'image_index': idx, 'image_classes': [label]}
+            header = mx.recordio.IRHeader(0, label, idx, 0)
+            if isinstance(img, np.ndarray):
+                s = mx.recordio.pack_img(header,img,quality=95,img_fmt='.jpg')
+            else:
+                s = mx.recordio.pack(header, img)
+            self.writer.write_idx(idx, s)
+            self.meta.append(image_meta)
+            self.widx += 1
+        self.max_label = label
+        self.wlabel += 1
+
+
+    def add_image(self, img, label):
+        #!!! img should be BGR!!!!
+        #assert label >= 0
+        #assert label > self.last_label
+        idx = self.widx
+        header = mx.recordio.IRHeader(0, label, idx, 0)
+        if isinstance(label, list):
+            idlabel = label[0]
+        else:
+            idlabel = label
+        image_meta = {'image_index': idx, 'image_classes': [idlabel]}
+        if isinstance(img, np.ndarray):
+            s = mx.recordio.pack_img(header,img,quality=95,img_fmt='.jpg')
+        else:
+            s = mx.recordio.pack(header, img)
+        self.writer.write_idx(idx, s)
+        self.meta.append(image_meta)
+        self.widx += 1
+        self.max_label = max(self.max_label, idlabel)
+
+    def close(self):
+        with open(osp.join(self.path, 'train.meta'), 'wb') as pfile:
+            pickle.dump(self.meta, pfile, protocol=pickle.HIGHEST_PROTOCOL)
+        print('stat:', self.widx, self.wlabel)
+        with open(os.path.join(self.path, 'property'), 'w') as f:
+            f.write("%d,%d,%d\n" % (self.max_label+1, self.image_size[0], self.image_size[1]))
+            f.write("%d\n" % (self.widx))
+
diff --git a/src/utils/dependencies/insightface/model_zoo/__init__.py b/src/utils/dependencies/insightface/model_zoo/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..225623d6142c968b4040f391039bfab88bdd1b2a
--- /dev/null
+++ b/src/utils/dependencies/insightface/model_zoo/__init__.py
@@ -0,0 +1,6 @@
+from .model_zoo import get_model
+from .arcface_onnx import ArcFaceONNX
+from .retinaface import RetinaFace
+from .scrfd import SCRFD
+from .landmark import Landmark
+from .attribute import Attribute
diff --git a/src/utils/dependencies/insightface/model_zoo/arcface_onnx.py b/src/utils/dependencies/insightface/model_zoo/arcface_onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..b537ce2ee15d4a1834d54e185f34e336aab30a77
--- /dev/null
+++ b/src/utils/dependencies/insightface/model_zoo/arcface_onnx.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# @Organization  : insightface.ai
+# @Author        : Jia Guo
+# @Time          : 2021-05-04
+# @Function      : 
+
+from __future__ import division
+import numpy as np
+import cv2
+import onnx
+import onnxruntime
+from ..utils import face_align
+
+__all__ = [
+    'ArcFaceONNX',
+]
+
+
+class ArcFaceONNX:
+    def __init__(self, model_file=None, session=None):
+        assert model_file is not None
+        self.model_file = model_file
+        self.session = session
+        self.taskname = 'recognition'
+        find_sub = False
+        find_mul = False
+        model = onnx.load(self.model_file)
+        graph = model.graph
+        for nid, node in enumerate(graph.node[:8]):
+            #print(nid, node.name)
+            if node.name.startswith('Sub') or node.name.startswith('_minus'):
+                find_sub = True
+            if node.name.startswith('Mul') or node.name.startswith('_mul'):
+                find_mul = True
+        if find_sub and find_mul:
+            #mxnet arcface model
+            input_mean = 0.0
+            input_std = 1.0
+        else:
+            input_mean = 127.5
+            input_std = 127.5
+        self.input_mean = input_mean
+        self.input_std = input_std
+        #print('input mean and std:', self.input_mean, self.input_std)
+        if self.session is None:
+            self.session = onnxruntime.InferenceSession(self.model_file, None)
+        input_cfg = self.session.get_inputs()[0]
+        input_shape = input_cfg.shape
+        input_name = input_cfg.name
+        self.input_size = tuple(input_shape[2:4][::-1])
+        self.input_shape = input_shape
+        outputs = self.session.get_outputs()
+        output_names = []
+        for out in outputs:
+            output_names.append(out.name)
+        self.input_name = input_name
+        self.output_names = output_names
+        assert len(self.output_names)==1
+        self.output_shape = outputs[0].shape
+
+    def prepare(self, ctx_id, **kwargs):
+        if ctx_id<0:
+            self.session.set_providers(['CPUExecutionProvider'])
+
+    def get(self, img, face):
+        aimg = face_align.norm_crop(img, landmark=face.kps, image_size=self.input_size[0])
+        face.embedding = self.get_feat(aimg).flatten()
+        return face.embedding
+
+    def compute_sim(self, feat1, feat2):
+        from numpy.linalg import norm
+        feat1 = feat1.ravel()
+        feat2 = feat2.ravel()
+        sim = np.dot(feat1, feat2) / (norm(feat1) * norm(feat2))
+        return sim
+
+    def get_feat(self, imgs):
+        if not isinstance(imgs, list):
+            imgs = [imgs]
+        input_size = self.input_size
+        
+        blob = cv2.dnn.blobFromImages(imgs, 1.0 / self.input_std, input_size,
+                                      (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
+        net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
+        return net_out
+
+    def forward(self, batch_data):
+        blob = (batch_data - self.input_mean) / self.input_std
+        net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
+        return net_out
+
+
diff --git a/src/utils/dependencies/insightface/model_zoo/attribute.py b/src/utils/dependencies/insightface/model_zoo/attribute.py
new file mode 100644
index 0000000000000000000000000000000000000000..40c34de3f0995499448cf5779004cc1e5f3564fb
--- /dev/null
+++ b/src/utils/dependencies/insightface/model_zoo/attribute.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+# @Organization  : insightface.ai
+# @Author        : Jia Guo
+# @Time          : 2021-06-19
+# @Function      : 
+
+from __future__ import division
+import numpy as np
+import cv2
+import onnx
+import onnxruntime
+from ..utils import face_align
+
+__all__ = [
+    'Attribute',
+]
+
+
+class Attribute:
+    def __init__(self, model_file=None, session=None):
+        assert model_file is not None
+        self.model_file = model_file
+        self.session = session
+        find_sub = False
+        find_mul = False
+        model = onnx.load(self.model_file)
+        graph = model.graph
+        for nid, node in enumerate(graph.node[:8]):
+            #print(nid, node.name)
+            if node.name.startswith('Sub') or node.name.startswith('_minus'):
+                find_sub = True
+            if node.name.startswith('Mul') or node.name.startswith('_mul'):
+                find_mul = True
+            if nid<3 and node.name=='bn_data':
+                find_sub = True
+                find_mul = True
+        if find_sub and find_mul:
+            #mxnet arcface model
+            input_mean = 0.0
+            input_std = 1.0
+        else:
+            input_mean = 127.5
+            input_std = 128.0
+        self.input_mean = input_mean
+        self.input_std = input_std
+        #print('input mean and std:', model_file, self.input_mean, self.input_std)
+        if self.session is None:
+            self.session = onnxruntime.InferenceSession(self.model_file, None)
+        input_cfg = self.session.get_inputs()[0]
+        input_shape = input_cfg.shape
+        input_name = input_cfg.name
+        self.input_size = tuple(input_shape[2:4][::-1])
+        self.input_shape = input_shape
+        outputs = self.session.get_outputs()
+        output_names = []
+        for out in outputs:
+            output_names.append(out.name)
+        self.input_name = input_name
+        self.output_names = output_names
+        assert len(self.output_names)==1
+        output_shape = outputs[0].shape
+        #print('init output_shape:', output_shape)
+        if output_shape[1]==3:
+            self.taskname = 'genderage'
+        else:
+            self.taskname = 'attribute_%d'%output_shape[1]
+
+    def prepare(self, ctx_id, **kwargs):
+        if ctx_id<0:
+            self.session.set_providers(['CPUExecutionProvider'])
+
+    def get(self, img, face):
+        bbox = face.bbox
+        w, h = (bbox[2] - bbox[0]), (bbox[3] - bbox[1])
+        center = (bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2
+        rotate = 0
+        _scale = self.input_size[0]  / (max(w, h)*1.5)
+        #print('param:', img.shape, bbox, center, self.input_size, _scale, rotate)
+        aimg, M = face_align.transform(img, center, self.input_size[0], _scale, rotate)
+        input_size = tuple(aimg.shape[0:2][::-1])
+        #assert input_size==self.input_size
+        blob = cv2.dnn.blobFromImage(aimg, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
+        pred = self.session.run(self.output_names, {self.input_name : blob})[0][0]
+        if self.taskname=='genderage':
+            assert len(pred)==3
+            gender = np.argmax(pred[:2])
+            age = int(np.round(pred[2]*100))
+            face['gender'] = gender
+            face['age'] = age
+            return gender, age
+        else:
+            return pred
+
+
diff --git a/src/utils/dependencies/insightface/model_zoo/inswapper.py b/src/utils/dependencies/insightface/model_zoo/inswapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..f321c627ee66cceddcab98b561b997441dd4f768
--- /dev/null
+++ b/src/utils/dependencies/insightface/model_zoo/inswapper.py
@@ -0,0 +1,114 @@
+import time
+import numpy as np
+import onnxruntime
+import cv2
+import onnx
+from onnx import numpy_helper
+from ..utils import face_align
+
+
+
+
+class INSwapper():
+    def __init__(self, model_file=None, session=None):
+        self.model_file = model_file
+        self.session = session
+        model = onnx.load(self.model_file)
+        graph = model.graph
+        self.emap = numpy_helper.to_array(graph.initializer[-1])
+        self.input_mean = 0.0
+        self.input_std = 255.0
+        #print('input mean and std:', model_file, self.input_mean, self.input_std)
+        if self.session is None:
+            self.session = onnxruntime.InferenceSession(self.model_file, None)
+        inputs = self.session.get_inputs()
+        self.input_names = []
+        for inp in inputs:
+            self.input_names.append(inp.name)
+        outputs = self.session.get_outputs()
+        output_names = []
+        for out in outputs:
+            output_names.append(out.name)
+        self.output_names = output_names
+        assert len(self.output_names)==1
+        output_shape = outputs[0].shape
+        input_cfg = inputs[0]
+        input_shape = input_cfg.shape
+        self.input_shape = input_shape
+        # print('inswapper-shape:', self.input_shape)
+        self.input_size = tuple(input_shape[2:4][::-1])
+
+    def forward(self, img, latent):
+        img = (img - self.input_mean) / self.input_std
+        pred = self.session.run(self.output_names, {self.input_names[0]: img, self.input_names[1]: latent})[0]
+        return pred
+
+    def get(self, img, target_face, source_face, paste_back=True):
+        face_mask = np.zeros((img.shape[0], img.shape[1]), np.uint8)
+        cv2.fillPoly(face_mask, np.array([target_face.landmark_2d_106[[1,9,10,11,12,13,14,15,16,2,3,4,5,6,7,8,0,24,23,22,21,20,19,18,32,31,30,29,28,27,26,25,17,101,105,104,103,51,49,48,43]].astype('int64')]), 1)
+        aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0])
+        blob = cv2.dnn.blobFromImage(aimg, 1.0 / self.input_std, self.input_size,
+                                      (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
+        latent = source_face.normed_embedding.reshape((1,-1))
+        latent = np.dot(latent, self.emap)
+        latent /= np.linalg.norm(latent)
+        pred = self.session.run(self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent})[0]
+        #print(latent.shape, latent.dtype, pred.shape)
+        img_fake = pred.transpose((0,2,3,1))[0]
+        bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:,:,::-1]
+        if not paste_back:
+            return bgr_fake, M
+        else:
+            target_img = img
+            fake_diff = bgr_fake.astype(np.float32) - aimg.astype(np.float32)
+            fake_diff = np.abs(fake_diff).mean(axis=2)
+            fake_diff[:2,:] = 0
+            fake_diff[-2:,:] = 0
+            fake_diff[:,:2] = 0
+            fake_diff[:,-2:] = 0
+            IM = cv2.invertAffineTransform(M)
+            img_white = np.full((aimg.shape[0],aimg.shape[1]), 255, dtype=np.float32)
+            bgr_fake = cv2.warpAffine(bgr_fake, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
+            img_white = cv2.warpAffine(img_white, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
+            fake_diff = cv2.warpAffine(fake_diff, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
+            img_white[img_white>20] = 255
+            fthresh = 10
+            fake_diff[fake_diff
=fthresh] = 255
+            img_mask = img_white
+            mask_h_inds, mask_w_inds = np.where(img_mask==255)
+            mask_h = np.max(mask_h_inds) - np.min(mask_h_inds)
+            mask_w = np.max(mask_w_inds) - np.min(mask_w_inds)
+            mask_size = int(np.sqrt(mask_h*mask_w))
+            k = max(mask_size//10, 10)
+            #k = max(mask_size//20, 6)
+            #k = 6
+            kernel = np.ones((k,k),np.uint8)
+            img_mask = cv2.erode(img_mask,kernel,iterations = 1)
+            kernel = np.ones((2,2),np.uint8)
+            fake_diff = cv2.dilate(fake_diff,kernel,iterations = 1)
+
+            face_mask = cv2.erode(face_mask,np.ones((11,11),np.uint8),iterations = 1)
+            fake_diff[face_mask==1] = 255
+
+            k = max(mask_size//20, 5)
+            #k = 3
+            #k = 3
+            kernel_size = (k, k)
+            blur_size = tuple(2*i+1 for i in kernel_size)
+            img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
+            k = 5
+            kernel_size = (k, k)
+            blur_size = tuple(2*i+1 for i in kernel_size)
+            fake_diff = cv2.blur(fake_diff, (11,11), 0)
+            ##fake_diff = cv2.GaussianBlur(fake_diff, blur_size, 0)
+            # print('blur_size: ', blur_size)
+            # fake_diff = cv2.blur(fake_diff, (21, 21), 0) # blur_size
+            img_mask /= 255
+            fake_diff /= 255
+            # img_mask = fake_diff
+            img_mask = img_mask*fake_diff
+            img_mask = np.reshape(img_mask, [img_mask.shape[0],img_mask.shape[1],1])
+            fake_merged = img_mask * bgr_fake + (1-img_mask) * target_img.astype(np.float32)
+            fake_merged = fake_merged.astype(np.uint8)
+            return fake_merged
diff --git a/src/utils/dependencies/insightface/model_zoo/landmark.py b/src/utils/dependencies/insightface/model_zoo/landmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..598b4b29a2d0674d8bb25b681f921c61460d101c
--- /dev/null
+++ b/src/utils/dependencies/insightface/model_zoo/landmark.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+# @Organization  : insightface.ai
+# @Author        : Jia Guo
+# @Time          : 2021-05-04
+# @Function      : 
+
+from __future__ import division
+import numpy as np
+import cv2
+import onnx
+import onnxruntime
+from ..utils import face_align
+from ..utils import transform
+from ..data import get_object
+
+__all__ = [
+    'Landmark',
+]
+
+
+class Landmark:
+    def __init__(self, model_file=None, session=None):
+        assert model_file is not None
+        self.model_file = model_file
+        self.session = session
+        find_sub = False
+        find_mul = False
+        model = onnx.load(self.model_file)
+        graph = model.graph
+        for nid, node in enumerate(graph.node[:8]):
+            #print(nid, node.name)
+            if node.name.startswith('Sub') or node.name.startswith('_minus'):
+                find_sub = True
+            if node.name.startswith('Mul') or node.name.startswith('_mul'):
+                find_mul = True
+            if nid<3 and node.name=='bn_data':
+                find_sub = True
+                find_mul = True
+        if find_sub and find_mul:
+            #mxnet arcface model
+            input_mean = 0.0
+            input_std = 1.0
+        else:
+            input_mean = 127.5
+            input_std = 128.0
+        self.input_mean = input_mean
+        self.input_std = input_std
+        #print('input mean and std:', model_file, self.input_mean, self.input_std)
+        if self.session is None:
+            self.session = onnxruntime.InferenceSession(self.model_file, None)
+        input_cfg = self.session.get_inputs()[0]
+        input_shape = input_cfg.shape
+        input_name = input_cfg.name
+        self.input_size = tuple(input_shape[2:4][::-1])
+        self.input_shape = input_shape
+        outputs = self.session.get_outputs()
+        output_names = []
+        for out in outputs:
+            output_names.append(out.name)
+        self.input_name = input_name
+        self.output_names = output_names
+        assert len(self.output_names)==1
+        output_shape = outputs[0].shape
+        self.require_pose = False
+        #print('init output_shape:', output_shape)
+        if output_shape[1]==3309:
+            self.lmk_dim = 3
+            self.lmk_num = 68
+            self.mean_lmk = get_object('meanshape_68.pkl')
+            self.require_pose = True
+        else:
+            self.lmk_dim = 2
+            self.lmk_num = output_shape[1]//self.lmk_dim
+        self.taskname = 'landmark_%dd_%d'%(self.lmk_dim, self.lmk_num)
+
+    def prepare(self, ctx_id, **kwargs):
+        if ctx_id<0:
+            self.session.set_providers(['CPUExecutionProvider'])
+
+    def get(self, img, face):
+        bbox = face.bbox
+        w, h = (bbox[2] - bbox[0]), (bbox[3] - bbox[1])
+        center = (bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2
+        rotate = 0
+        _scale = self.input_size[0]  / (max(w, h)*1.5)
+        #print('param:', img.shape, bbox, center, self.input_size, _scale, rotate)
+        aimg, M = face_align.transform(img, center, self.input_size[0], _scale, rotate)
+        input_size = tuple(aimg.shape[0:2][::-1])
+        #assert input_size==self.input_size
+        blob = cv2.dnn.blobFromImage(aimg, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
+        pred = self.session.run(self.output_names, {self.input_name : blob})[0][0]
+        if pred.shape[0] >= 3000:
+            pred = pred.reshape((-1, 3))
+        else:
+            pred = pred.reshape((-1, 2))
+        if self.lmk_num < pred.shape[0]:
+            pred = pred[self.lmk_num*-1:,:]
+        pred[:, 0:2] += 1
+        pred[:, 0:2] *= (self.input_size[0] // 2)
+        if pred.shape[1] == 3:
+            pred[:, 2] *= (self.input_size[0] // 2)
+
+        IM = cv2.invertAffineTransform(M)
+        pred = face_align.trans_points(pred, IM)
+        face[self.taskname] = pred
+        if self.require_pose:
+            P = transform.estimate_affine_matrix_3d23d(self.mean_lmk, pred)
+            s, R, t = transform.P2sRt(P)
+            rx, ry, rz = transform.matrix2angle(R)
+            pose = np.array( [rx, ry, rz], dtype=np.float32 )
+            face['pose'] = pose #pitch, yaw, roll
+        return pred
+
+
diff --git a/src/utils/dependencies/insightface/model_zoo/model_store.py b/src/utils/dependencies/insightface/model_zoo/model_store.py
new file mode 100644
index 0000000000000000000000000000000000000000..50bb85d314f5b7a0ea8211d2cd21186e32791592
--- /dev/null
+++ b/src/utils/dependencies/insightface/model_zoo/model_store.py
@@ -0,0 +1,103 @@
+"""
+This code file mainly comes from https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/model_store.py
+"""
+from __future__ import print_function
+
+__all__ = ['get_model_file']
+import os
+import zipfile
+import glob
+
+from ..utils import download, check_sha1
+
+_model_sha1 = {
+    name: checksum
+    for checksum, name in [
+        ('95be21b58e29e9c1237f229dae534bd854009ce0', 'arcface_r100_v1'),
+        ('', 'arcface_mfn_v1'),
+        ('39fd1e087a2a2ed70a154ac01fecaa86c315d01b', 'retinaface_r50_v1'),
+        ('2c9de8116d1f448fd1d4661f90308faae34c990a', 'retinaface_mnet025_v1'),
+        ('0db1d07921d005e6c9a5b38e059452fc5645e5a4', 'retinaface_mnet025_v2'),
+        ('7dd8111652b7aac2490c5dcddeb268e53ac643e6', 'genderage_v1'),
+    ]
+}
+
+base_repo_url = 'https://insightface.ai/files/'
+_url_format = '{repo_url}models/{file_name}.zip'
+
+
+def short_hash(name):
+    if name not in _model_sha1:
+        raise ValueError(
+            'Pretrained model for {name} is not available.'.format(name=name))
+    return _model_sha1[name][:8]
+
+
+def find_params_file(dir_path):
+    if not os.path.exists(dir_path):
+        return None
+    paths = glob.glob("%s/*.params" % dir_path)
+    if len(paths) == 0:
+        return None
+    paths = sorted(paths)
+    return paths[-1]
+
+
+def get_model_file(name, root=os.path.join('~', '.insightface', 'models')):
+    r"""Return location for the pretrained on local file system.
+
+    This function will download from online model zoo when model cannot be found or has mismatch.
+    The root directory will be created if it doesn't exist.
+
+    Parameters
+    ----------
+    name : str
+        Name of the model.
+    root : str, default '~/.mxnet/models'
+        Location for keeping the model parameters.
+
+    Returns
+    -------
+    file_path
+        Path to the requested pretrained model file.
+    """
+
+    file_name = name
+    root = os.path.expanduser(root)
+    dir_path = os.path.join(root, name)
+    file_path = find_params_file(dir_path)
+    #file_path = os.path.join(root, file_name + '.params')
+    sha1_hash = _model_sha1[name]
+    if file_path is not None:
+        if check_sha1(file_path, sha1_hash):
+            return file_path
+        else:
+            print(
+                'Mismatch in the content of model file detected. Downloading again.'
+            )
+    else:
+        print('Model file is not found. Downloading.')
+
+    if not os.path.exists(root):
+        os.makedirs(root)
+    if not os.path.exists(dir_path):
+        os.makedirs(dir_path)
+
+    zip_file_path = os.path.join(root, file_name + '.zip')
+    repo_url = base_repo_url
+    if repo_url[-1] != '/':
+        repo_url = repo_url + '/'
+    download(_url_format.format(repo_url=repo_url, file_name=file_name),
+             path=zip_file_path,
+             overwrite=True)
+    with zipfile.ZipFile(zip_file_path) as zf:
+        zf.extractall(dir_path)
+    os.remove(zip_file_path)
+    file_path = find_params_file(dir_path)
+
+    if check_sha1(file_path, sha1_hash):
+        return file_path
+    else:
+        raise ValueError(
+            'Downloaded file has different hash. Please try again.')
+
diff --git a/src/utils/dependencies/insightface/model_zoo/model_zoo.py b/src/utils/dependencies/insightface/model_zoo/model_zoo.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8366e2a5461d5d6688f23e102a40944330084a4
--- /dev/null
+++ b/src/utils/dependencies/insightface/model_zoo/model_zoo.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+# @Organization  : insightface.ai
+# @Author        : Jia Guo
+# @Time          : 2021-05-04
+# @Function      :
+
+import os
+import os.path as osp
+import glob
+import onnxruntime
+from .arcface_onnx import *
+from .retinaface import *
+#from .scrfd import *
+from .landmark import *
+from .attribute import Attribute
+from .inswapper import INSwapper
+from ..utils import download_onnx
+
+__all__ = ['get_model']
+
+
+class PickableInferenceSession(onnxruntime.InferenceSession):
+    # This is a wrapper to make the current InferenceSession class pickable.
+    def __init__(self, model_path, **kwargs):
+        super().__init__(model_path, **kwargs)
+        self.model_path = model_path
+
+    def __getstate__(self):
+        return {'model_path': self.model_path}
+
+    def __setstate__(self, values):
+        model_path = values['model_path']
+        self.__init__(model_path)
+
+class ModelRouter:
+    def __init__(self, onnx_file):
+        self.onnx_file = onnx_file
+
+    def get_model(self, **kwargs):
+        session = PickableInferenceSession(self.onnx_file, **kwargs)
+        # print(f'Applied providers: {session._providers}, with options: {session._provider_options}')
+        inputs = session.get_inputs()
+        input_cfg = inputs[0]
+        input_shape = input_cfg.shape
+        outputs = session.get_outputs()
+
+        if len(outputs)>=5:
+            return RetinaFace(model_file=self.onnx_file, session=session)
+        elif input_shape[2]==192 and input_shape[3]==192:
+            return Landmark(model_file=self.onnx_file, session=session)
+        elif input_shape[2]==96 and input_shape[3]==96:
+            return Attribute(model_file=self.onnx_file, session=session)
+        elif len(inputs)==2 and input_shape[2]==128 and input_shape[3]==128:
+            return INSwapper(model_file=self.onnx_file, session=session)
+        elif input_shape[2]==input_shape[3] and input_shape[2]>=112 and input_shape[2]%16==0:
+            return ArcFaceONNX(model_file=self.onnx_file, session=session)
+        else:
+            #raise RuntimeError('error on model routing')
+            return None
+
+def find_onnx_file(dir_path):
+    if not os.path.exists(dir_path):
+        return None
+    paths = glob.glob("%s/*.onnx" % dir_path)
+    if len(paths) == 0:
+        return None
+    paths = sorted(paths)
+    return paths[-1]
+
+def get_default_providers():
+    return ['CUDAExecutionProvider', 'CPUExecutionProvider']
+
+def get_default_provider_options():
+    return None
+
+def get_model(name, **kwargs):
+    root = kwargs.get('root', '~/.insightface')
+    root = os.path.expanduser(root)
+    model_root = osp.join(root, 'models')
+    allow_download = kwargs.get('download', False)
+    download_zip = kwargs.get('download_zip', False)
+    if not name.endswith('.onnx'):
+        model_dir = os.path.join(model_root, name)
+        model_file = find_onnx_file(model_dir)
+        if model_file is None:
+            return None
+    else:
+        model_file = name
+    if not osp.exists(model_file) and allow_download:
+        model_file = download_onnx('models', model_file, root=root, download_zip=download_zip)
+    assert osp.exists(model_file), 'model_file %s should exist'%model_file
+    assert osp.isfile(model_file), 'model_file %s should be a file'%model_file
+    router = ModelRouter(model_file)
+    providers = kwargs.get('providers', get_default_providers())
+    provider_options = kwargs.get('provider_options', get_default_provider_options())
+    model = router.get_model(providers=providers, provider_options=provider_options)
+    return model
diff --git a/src/utils/dependencies/insightface/model_zoo/retinaface.py b/src/utils/dependencies/insightface/model_zoo/retinaface.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc4ad91ed70688b38503127137e928dc7e5433e1
--- /dev/null
+++ b/src/utils/dependencies/insightface/model_zoo/retinaface.py
@@ -0,0 +1,301 @@
+# -*- coding: utf-8 -*-
+# @Organization  : insightface.ai
+# @Author        : Jia Guo
+# @Time          : 2021-09-18
+# @Function      : 
+
+from __future__ import division
+import datetime
+import numpy as np
+import onnx
+import onnxruntime
+import os
+import os.path as osp
+import cv2
+import sys
+
+def softmax(z):
+    assert len(z.shape) == 2
+    s = np.max(z, axis=1)
+    s = s[:, np.newaxis] # necessary step to do broadcasting
+    e_x = np.exp(z - s)
+    div = np.sum(e_x, axis=1)
+    div = div[:, np.newaxis] # dito
+    return e_x / div
+
+def distance2bbox(points, distance, max_shape=None):
+    """Decode distance prediction to bounding box.
+
+    Args:
+        points (Tensor): Shape (n, 2), [x, y].
+        distance (Tensor): Distance from the given point to 4
+            boundaries (left, top, right, bottom).
+        max_shape (tuple): Shape of the image.
+
+    Returns:
+        Tensor: Decoded bboxes.
+    """
+    x1 = points[:, 0] - distance[:, 0]
+    y1 = points[:, 1] - distance[:, 1]
+    x2 = points[:, 0] + distance[:, 2]
+    y2 = points[:, 1] + distance[:, 3]
+    if max_shape is not None:
+        x1 = x1.clamp(min=0, max=max_shape[1])
+        y1 = y1.clamp(min=0, max=max_shape[0])
+        x2 = x2.clamp(min=0, max=max_shape[1])
+        y2 = y2.clamp(min=0, max=max_shape[0])
+    return np.stack([x1, y1, x2, y2], axis=-1)
+
+def distance2kps(points, distance, max_shape=None):
+    """Decode distance prediction to bounding box.
+
+    Args:
+        points (Tensor): Shape (n, 2), [x, y].
+        distance (Tensor): Distance from the given point to 4
+            boundaries (left, top, right, bottom).
+        max_shape (tuple): Shape of the image.
+
+    Returns:
+        Tensor: Decoded bboxes.
+    """
+    preds = []
+    for i in range(0, distance.shape[1], 2):
+        px = points[:, i%2] + distance[:, i]
+        py = points[:, i%2+1] + distance[:, i+1]
+        if max_shape is not None:
+            px = px.clamp(min=0, max=max_shape[1])
+            py = py.clamp(min=0, max=max_shape[0])
+        preds.append(px)
+        preds.append(py)
+    return np.stack(preds, axis=-1)
+
+class RetinaFace:
+    def __init__(self, model_file=None, session=None):
+        import onnxruntime
+        self.model_file = model_file
+        self.session = session
+        self.taskname = 'detection'
+        if self.session is None:
+            assert self.model_file is not None
+            assert osp.exists(self.model_file)
+            self.session = onnxruntime.InferenceSession(self.model_file, None)
+        self.center_cache = {}
+        self.nms_thresh = 0.4
+        self.det_thresh = 0.5
+        self._init_vars()
+
+    def _init_vars(self):
+        input_cfg = self.session.get_inputs()[0]
+        input_shape = input_cfg.shape
+        #print(input_shape)
+        if isinstance(input_shape[2], str):
+            self.input_size = None
+        else:
+            self.input_size = tuple(input_shape[2:4][::-1])
+        #print('image_size:', self.image_size)
+        input_name = input_cfg.name
+        self.input_shape = input_shape
+        outputs = self.session.get_outputs()
+        output_names = []
+        for o in outputs:
+            output_names.append(o.name)
+        self.input_name = input_name
+        self.output_names = output_names
+        self.input_mean = 127.5
+        self.input_std = 128.0
+        #print(self.output_names)
+        #assert len(outputs)==10 or len(outputs)==15
+        self.use_kps = False
+        self._anchor_ratio = 1.0
+        self._num_anchors = 1
+        if len(outputs)==6:
+            self.fmc = 3
+            self._feat_stride_fpn = [8, 16, 32]
+            self._num_anchors = 2
+        elif len(outputs)==9:
+            self.fmc = 3
+            self._feat_stride_fpn = [8, 16, 32]
+            self._num_anchors = 2
+            self.use_kps = True
+        elif len(outputs)==10:
+            self.fmc = 5
+            self._feat_stride_fpn = [8, 16, 32, 64, 128]
+            self._num_anchors = 1
+        elif len(outputs)==15:
+            self.fmc = 5
+            self._feat_stride_fpn = [8, 16, 32, 64, 128]
+            self._num_anchors = 1
+            self.use_kps = True
+
+    def prepare(self, ctx_id, **kwargs):
+        if ctx_id<0:
+            self.session.set_providers(['CPUExecutionProvider'])
+        nms_thresh = kwargs.get('nms_thresh', None)
+        if nms_thresh is not None:
+            self.nms_thresh = nms_thresh
+        det_thresh = kwargs.get('det_thresh', None)
+        if det_thresh is not None:
+            self.det_thresh = det_thresh
+        input_size = kwargs.get('input_size', None)
+        if input_size is not None:
+            if self.input_size is not None:
+                print('warning: det_size is already set in detection model, ignore')
+            else:
+                self.input_size = input_size
+
+    def forward(self, img, threshold):
+        scores_list = []
+        bboxes_list = []
+        kpss_list = []
+        input_size = tuple(img.shape[0:2][::-1])
+        blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
+        net_outs = self.session.run(self.output_names, {self.input_name : blob})
+
+        input_height = blob.shape[2]
+        input_width = blob.shape[3]
+        fmc = self.fmc
+        for idx, stride in enumerate(self._feat_stride_fpn):
+            scores = net_outs[idx]
+            bbox_preds = net_outs[idx+fmc]
+            bbox_preds = bbox_preds * stride
+            if self.use_kps:
+                kps_preds = net_outs[idx+fmc*2] * stride
+            height = input_height // stride
+            width = input_width // stride
+            K = height * width
+            key = (height, width, stride)
+            if key in self.center_cache:
+                anchor_centers = self.center_cache[key]
+            else:
+                #solution-1, c style:
+                #anchor_centers = np.zeros( (height, width, 2), dtype=np.float32 )
+                #for i in range(height):
+                #    anchor_centers[i, :, 1] = i
+                #for i in range(width):
+                #    anchor_centers[:, i, 0] = i
+
+                #solution-2:
+                #ax = np.arange(width, dtype=np.float32)
+                #ay = np.arange(height, dtype=np.float32)
+                #xv, yv = np.meshgrid(np.arange(width), np.arange(height))
+                #anchor_centers = np.stack([xv, yv], axis=-1).astype(np.float32)
+
+                #solution-3:
+                anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
+                #print(anchor_centers.shape)
+
+                anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
+                if self._num_anchors>1:
+                    anchor_centers = np.stack([anchor_centers]*self._num_anchors, axis=1).reshape( (-1,2) )
+                if len(self.center_cache)<100:
+                    self.center_cache[key] = anchor_centers
+
+            pos_inds = np.where(scores>=threshold)[0]
+            bboxes = distance2bbox(anchor_centers, bbox_preds)
+            pos_scores = scores[pos_inds]
+            pos_bboxes = bboxes[pos_inds]
+            scores_list.append(pos_scores)
+            bboxes_list.append(pos_bboxes)
+            if self.use_kps:
+                kpss = distance2kps(anchor_centers, kps_preds)
+                #kpss = kps_preds
+                kpss = kpss.reshape( (kpss.shape[0], -1, 2) )
+                pos_kpss = kpss[pos_inds]
+                kpss_list.append(pos_kpss)
+        return scores_list, bboxes_list, kpss_list
+
+    def detect(self, img, input_size = None, max_num=0, metric='default'):
+        assert input_size is not None or self.input_size is not None
+        input_size = self.input_size if input_size is None else input_size
+            
+        im_ratio = float(img.shape[0]) / img.shape[1]
+        model_ratio = float(input_size[1]) / input_size[0]
+        if im_ratio>model_ratio:
+            new_height = input_size[1]
+            new_width = int(new_height / im_ratio)
+        else:
+            new_width = input_size[0]
+            new_height = int(new_width * im_ratio)
+        det_scale = float(new_height) / img.shape[0]
+        resized_img = cv2.resize(img, (new_width, new_height))
+        det_img = np.zeros( (input_size[1], input_size[0], 3), dtype=np.uint8 )
+        det_img[:new_height, :new_width, :] = resized_img
+
+        scores_list, bboxes_list, kpss_list = self.forward(det_img, self.det_thresh)
+
+        scores = np.vstack(scores_list)
+        scores_ravel = scores.ravel()
+        order = scores_ravel.argsort()[::-1]
+        bboxes = np.vstack(bboxes_list) / det_scale
+        if self.use_kps:
+            kpss = np.vstack(kpss_list) / det_scale
+        pre_det = np.hstack((bboxes, scores)).astype(np.float32, copy=False)
+        pre_det = pre_det[order, :]
+        keep = self.nms(pre_det)
+        det = pre_det[keep, :]
+        if self.use_kps:
+            kpss = kpss[order,:,:]
+            kpss = kpss[keep,:,:]
+        else:
+            kpss = None
+        if max_num > 0 and det.shape[0] > max_num:
+            area = (det[:, 2] - det[:, 0]) * (det[:, 3] -
+                                                    det[:, 1])
+            img_center = img.shape[0] // 2, img.shape[1] // 2
+            offsets = np.vstack([
+                (det[:, 0] + det[:, 2]) / 2 - img_center[1],
+                (det[:, 1] + det[:, 3]) / 2 - img_center[0]
+            ])
+            offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
+            if metric=='max':
+                values = area
+            else:
+                values = area - offset_dist_squared * 2.0  # some extra weight on the centering
+            bindex = np.argsort(
+                values)[::-1]  # some extra weight on the centering
+            bindex = bindex[0:max_num]
+            det = det[bindex, :]
+            if kpss is not None:
+                kpss = kpss[bindex, :]
+        return det, kpss
+
+    def nms(self, dets):
+        thresh = self.nms_thresh
+        x1 = dets[:, 0]
+        y1 = dets[:, 1]
+        x2 = dets[:, 2]
+        y2 = dets[:, 3]
+        scores = dets[:, 4]
+
+        areas = (x2 - x1 + 1) * (y2 - y1 + 1)
+        order = scores.argsort()[::-1]
+
+        keep = []
+        while order.size > 0:
+            i = order[0]
+            keep.append(i)
+            xx1 = np.maximum(x1[i], x1[order[1:]])
+            yy1 = np.maximum(y1[i], y1[order[1:]])
+            xx2 = np.minimum(x2[i], x2[order[1:]])
+            yy2 = np.minimum(y2[i], y2[order[1:]])
+
+            w = np.maximum(0.0, xx2 - xx1 + 1)
+            h = np.maximum(0.0, yy2 - yy1 + 1)
+            inter = w * h
+            ovr = inter / (areas[i] + areas[order[1:]] - inter)
+
+            inds = np.where(ovr <= thresh)[0]
+            order = order[inds + 1]
+
+        return keep
+
+def get_retinaface(name, download=False, root='~/.insightface/models', **kwargs):
+    if not download:
+        assert os.path.exists(name)
+        return RetinaFace(name)
+    else:
+        from .model_store import get_model_file
+        _file = get_model_file("retinaface_%s" % name, root=root)
+        return retinaface(_file)
+
+
diff --git a/src/utils/dependencies/insightface/model_zoo/scrfd.py b/src/utils/dependencies/insightface/model_zoo/scrfd.py
new file mode 100644
index 0000000000000000000000000000000000000000..674db4bba761157592dfb95c5d1638da1099f89c
--- /dev/null
+++ b/src/utils/dependencies/insightface/model_zoo/scrfd.py
@@ -0,0 +1,348 @@
+# -*- coding: utf-8 -*-
+# @Organization  : insightface.ai
+# @Author        : Jia Guo
+# @Time          : 2021-05-04
+# @Function      : 
+
+from __future__ import division
+import datetime
+import numpy as np
+import onnx
+import onnxruntime
+import os
+import os.path as osp
+import cv2
+import sys
+
+def softmax(z):
+    assert len(z.shape) == 2
+    s = np.max(z, axis=1)
+    s = s[:, np.newaxis] # necessary step to do broadcasting
+    e_x = np.exp(z - s)
+    div = np.sum(e_x, axis=1)
+    div = div[:, np.newaxis] # dito
+    return e_x / div
+
+def distance2bbox(points, distance, max_shape=None):
+    """Decode distance prediction to bounding box.
+
+    Args:
+        points (Tensor): Shape (n, 2), [x, y].
+        distance (Tensor): Distance from the given point to 4
+            boundaries (left, top, right, bottom).
+        max_shape (tuple): Shape of the image.
+
+    Returns:
+        Tensor: Decoded bboxes.
+    """
+    x1 = points[:, 0] - distance[:, 0]
+    y1 = points[:, 1] - distance[:, 1]
+    x2 = points[:, 0] + distance[:, 2]
+    y2 = points[:, 1] + distance[:, 3]
+    if max_shape is not None:
+        x1 = x1.clamp(min=0, max=max_shape[1])
+        y1 = y1.clamp(min=0, max=max_shape[0])
+        x2 = x2.clamp(min=0, max=max_shape[1])
+        y2 = y2.clamp(min=0, max=max_shape[0])
+    return np.stack([x1, y1, x2, y2], axis=-1)
+
+def distance2kps(points, distance, max_shape=None):
+    """Decode distance prediction to bounding box.
+
+    Args:
+        points (Tensor): Shape (n, 2), [x, y].
+        distance (Tensor): Distance from the given point to 4
+            boundaries (left, top, right, bottom).
+        max_shape (tuple): Shape of the image.
+
+    Returns:
+        Tensor: Decoded bboxes.
+    """
+    preds = []
+    for i in range(0, distance.shape[1], 2):
+        px = points[:, i%2] + distance[:, i]
+        py = points[:, i%2+1] + distance[:, i+1]
+        if max_shape is not None:
+            px = px.clamp(min=0, max=max_shape[1])
+            py = py.clamp(min=0, max=max_shape[0])
+        preds.append(px)
+        preds.append(py)
+    return np.stack(preds, axis=-1)
+
+class SCRFD:
+    def __init__(self, model_file=None, session=None):
+        import onnxruntime
+        self.model_file = model_file
+        self.session = session
+        self.taskname = 'detection'
+        self.batched = False
+        if self.session is None:
+            assert self.model_file is not None
+            assert osp.exists(self.model_file)
+            self.session = onnxruntime.InferenceSession(self.model_file, None)
+        self.center_cache = {}
+        self.nms_thresh = 0.4
+        self.det_thresh = 0.5
+        self._init_vars()
+
+    def _init_vars(self):
+        input_cfg = self.session.get_inputs()[0]
+        input_shape = input_cfg.shape
+        #print(input_shape)
+        if isinstance(input_shape[2], str):
+            self.input_size = None
+        else:
+            self.input_size = tuple(input_shape[2:4][::-1])
+        #print('image_size:', self.image_size)
+        input_name = input_cfg.name
+        self.input_shape = input_shape
+        outputs = self.session.get_outputs()
+        if len(outputs[0].shape) == 3:
+            self.batched = True
+        output_names = []
+        for o in outputs:
+            output_names.append(o.name)
+        self.input_name = input_name
+        self.output_names = output_names
+        self.input_mean = 127.5
+        self.input_std = 128.0
+        #print(self.output_names)
+        #assert len(outputs)==10 or len(outputs)==15
+        self.use_kps = False
+        self._anchor_ratio = 1.0
+        self._num_anchors = 1
+        if len(outputs)==6:
+            self.fmc = 3
+            self._feat_stride_fpn = [8, 16, 32]
+            self._num_anchors = 2
+        elif len(outputs)==9:
+            self.fmc = 3
+            self._feat_stride_fpn = [8, 16, 32]
+            self._num_anchors = 2
+            self.use_kps = True
+        elif len(outputs)==10:
+            self.fmc = 5
+            self._feat_stride_fpn = [8, 16, 32, 64, 128]
+            self._num_anchors = 1
+        elif len(outputs)==15:
+            self.fmc = 5
+            self._feat_stride_fpn = [8, 16, 32, 64, 128]
+            self._num_anchors = 1
+            self.use_kps = True
+
+    def prepare(self, ctx_id, **kwargs):
+        if ctx_id<0:
+            self.session.set_providers(['CPUExecutionProvider'])
+        nms_thresh = kwargs.get('nms_thresh', None)
+        if nms_thresh is not None:
+            self.nms_thresh = nms_thresh
+        det_thresh = kwargs.get('det_thresh', None)
+        if det_thresh is not None:
+            self.det_thresh = det_thresh
+        input_size = kwargs.get('input_size', None)
+        if input_size is not None:
+            if self.input_size is not None:
+                print('warning: det_size is already set in scrfd model, ignore')
+            else:
+                self.input_size = input_size
+
+    def forward(self, img, threshold):
+        scores_list = []
+        bboxes_list = []
+        kpss_list = []
+        input_size = tuple(img.shape[0:2][::-1])
+        blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
+        net_outs = self.session.run(self.output_names, {self.input_name : blob})
+
+        input_height = blob.shape[2]
+        input_width = blob.shape[3]
+        fmc = self.fmc
+        for idx, stride in enumerate(self._feat_stride_fpn):
+            # If model support batch dim, take first output
+            if self.batched:
+                scores = net_outs[idx][0]
+                bbox_preds = net_outs[idx + fmc][0]
+                bbox_preds = bbox_preds * stride
+                if self.use_kps:
+                    kps_preds = net_outs[idx + fmc * 2][0] * stride
+            # If model doesn't support batching take output as is
+            else:
+                scores = net_outs[idx]
+                bbox_preds = net_outs[idx + fmc]
+                bbox_preds = bbox_preds * stride
+                if self.use_kps:
+                    kps_preds = net_outs[idx + fmc * 2] * stride
+
+            height = input_height // stride
+            width = input_width // stride
+            K = height * width
+            key = (height, width, stride)
+            if key in self.center_cache:
+                anchor_centers = self.center_cache[key]
+            else:
+                #solution-1, c style:
+                #anchor_centers = np.zeros( (height, width, 2), dtype=np.float32 )
+                #for i in range(height):
+                #    anchor_centers[i, :, 1] = i
+                #for i in range(width):
+                #    anchor_centers[:, i, 0] = i
+
+                #solution-2:
+                #ax = np.arange(width, dtype=np.float32)
+                #ay = np.arange(height, dtype=np.float32)
+                #xv, yv = np.meshgrid(np.arange(width), np.arange(height))
+                #anchor_centers = np.stack([xv, yv], axis=-1).astype(np.float32)
+
+                #solution-3:
+                anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
+                #print(anchor_centers.shape)
+
+                anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
+                if self._num_anchors>1:
+                    anchor_centers = np.stack([anchor_centers]*self._num_anchors, axis=1).reshape( (-1,2) )
+                if len(self.center_cache)<100:
+                    self.center_cache[key] = anchor_centers
+
+            pos_inds = np.where(scores>=threshold)[0]
+            bboxes = distance2bbox(anchor_centers, bbox_preds)
+            pos_scores = scores[pos_inds]
+            pos_bboxes = bboxes[pos_inds]
+            scores_list.append(pos_scores)
+            bboxes_list.append(pos_bboxes)
+            if self.use_kps:
+                kpss = distance2kps(anchor_centers, kps_preds)
+                #kpss = kps_preds
+                kpss = kpss.reshape( (kpss.shape[0], -1, 2) )
+                pos_kpss = kpss[pos_inds]
+                kpss_list.append(pos_kpss)
+        return scores_list, bboxes_list, kpss_list
+
+    def detect(self, img, input_size = None, max_num=0, metric='default'):
+        assert input_size is not None or self.input_size is not None
+        input_size = self.input_size if input_size is None else input_size
+            
+        im_ratio = float(img.shape[0]) / img.shape[1]
+        model_ratio = float(input_size[1]) / input_size[0]
+        if im_ratio>model_ratio:
+            new_height = input_size[1]
+            new_width = int(new_height / im_ratio)
+        else:
+            new_width = input_size[0]
+            new_height = int(new_width * im_ratio)
+        det_scale = float(new_height) / img.shape[0]
+        resized_img = cv2.resize(img, (new_width, new_height))
+        det_img = np.zeros( (input_size[1], input_size[0], 3), dtype=np.uint8 )
+        det_img[:new_height, :new_width, :] = resized_img
+
+        scores_list, bboxes_list, kpss_list = self.forward(det_img, self.det_thresh)
+
+        scores = np.vstack(scores_list)
+        scores_ravel = scores.ravel()
+        order = scores_ravel.argsort()[::-1]
+        bboxes = np.vstack(bboxes_list) / det_scale
+        if self.use_kps:
+            kpss = np.vstack(kpss_list) / det_scale
+        pre_det = np.hstack((bboxes, scores)).astype(np.float32, copy=False)
+        pre_det = pre_det[order, :]
+        keep = self.nms(pre_det)
+        det = pre_det[keep, :]
+        if self.use_kps:
+            kpss = kpss[order,:,:]
+            kpss = kpss[keep,:,:]
+        else:
+            kpss = None
+        if max_num > 0 and det.shape[0] > max_num:
+            area = (det[:, 2] - det[:, 0]) * (det[:, 3] -
+                                                    det[:, 1])
+            img_center = img.shape[0] // 2, img.shape[1] // 2
+            offsets = np.vstack([
+                (det[:, 0] + det[:, 2]) / 2 - img_center[1],
+                (det[:, 1] + det[:, 3]) / 2 - img_center[0]
+            ])
+            offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
+            if metric=='max':
+                values = area
+            else:
+                values = area - offset_dist_squared * 2.0  # some extra weight on the centering
+            bindex = np.argsort(
+                values)[::-1]  # some extra weight on the centering
+            bindex = bindex[0:max_num]
+            det = det[bindex, :]
+            if kpss is not None:
+                kpss = kpss[bindex, :]
+        return det, kpss
+
+    def nms(self, dets):
+        thresh = self.nms_thresh
+        x1 = dets[:, 0]
+        y1 = dets[:, 1]
+        x2 = dets[:, 2]
+        y2 = dets[:, 3]
+        scores = dets[:, 4]
+
+        areas = (x2 - x1 + 1) * (y2 - y1 + 1)
+        order = scores.argsort()[::-1]
+
+        keep = []
+        while order.size > 0:
+            i = order[0]
+            keep.append(i)
+            xx1 = np.maximum(x1[i], x1[order[1:]])
+            yy1 = np.maximum(y1[i], y1[order[1:]])
+            xx2 = np.minimum(x2[i], x2[order[1:]])
+            yy2 = np.minimum(y2[i], y2[order[1:]])
+
+            w = np.maximum(0.0, xx2 - xx1 + 1)
+            h = np.maximum(0.0, yy2 - yy1 + 1)
+            inter = w * h
+            ovr = inter / (areas[i] + areas[order[1:]] - inter)
+
+            inds = np.where(ovr <= thresh)[0]
+            order = order[inds + 1]
+
+        return keep
+
+def get_scrfd(name, download=False, root='~/.insightface/models', **kwargs):
+    if not download:
+        assert os.path.exists(name)
+        return SCRFD(name)
+    else:
+        from .model_store import get_model_file
+        _file = get_model_file("scrfd_%s" % name, root=root)
+        return SCRFD(_file)
+
+
+def scrfd_2p5gkps(**kwargs):
+    return get_scrfd("2p5gkps", download=True, **kwargs)
+
+
+if __name__ == '__main__':
+    import glob
+    detector = SCRFD(model_file='./det.onnx')
+    detector.prepare(-1)
+    img_paths = ['tests/data/t1.jpg']
+    for img_path in img_paths:
+        img = cv2.imread(img_path)
+
+        for _ in range(1):
+            ta = datetime.datetime.now()
+            #bboxes, kpss = detector.detect(img, 0.5, input_size = (640, 640))
+            bboxes, kpss = detector.detect(img, 0.5)
+            tb = datetime.datetime.now()
+            print('all cost:', (tb-ta).total_seconds()*1000)
+        print(img_path, bboxes.shape)
+        if kpss is not None:
+            print(kpss.shape)
+        for i in range(bboxes.shape[0]):
+            bbox = bboxes[i]
+            x1,y1,x2,y2,score = bbox.astype(np.int)
+            cv2.rectangle(img, (x1,y1)  , (x2,y2) , (255,0,0) , 2)
+            if kpss is not None:
+                kps = kpss[i]
+                for kp in kps:
+                    kp = kp.astype(np.int)
+                    cv2.circle(img, tuple(kp) , 1, (0,0,255) , 2)
+        filename = img_path.split('/')[-1]
+        print('output:', filename)
+        cv2.imwrite('./outputs/%s'%filename, img)
+
diff --git a/src/utils/dependencies/insightface/utils/__init__.py b/src/utils/dependencies/insightface/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6960431b1bd6db38890e391c4c94dd2182f2e1fd
--- /dev/null
+++ b/src/utils/dependencies/insightface/utils/__init__.py
@@ -0,0 +1,6 @@
+from __future__ import absolute_import
+
+from .storage import download, ensure_available, download_onnx
+from .filesystem import get_model_dir
+from .filesystem import makedirs, try_import_dali
+from .constant import *
diff --git a/src/utils/dependencies/insightface/utils/constant.py b/src/utils/dependencies/insightface/utils/constant.py
new file mode 100644
index 0000000000000000000000000000000000000000..8860ff077ae7227235591edfc84c0cdc227a6432
--- /dev/null
+++ b/src/utils/dependencies/insightface/utils/constant.py
@@ -0,0 +1,3 @@
+
+DEFAULT_MP_NAME = 'buffalo_l'
+
diff --git a/src/utils/dependencies/insightface/utils/download.py b/src/utils/dependencies/insightface/utils/download.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cda84dede45b81dcd99161d87792b6c409fa279
--- /dev/null
+++ b/src/utils/dependencies/insightface/utils/download.py
@@ -0,0 +1,95 @@
+"""
+This code file mainly comes from https://github.com/dmlc/gluon-cv/blob/master/gluoncv/utils/download.py
+"""
+import os
+import hashlib
+import requests
+from tqdm import tqdm
+
+
+def check_sha1(filename, sha1_hash):
+    """Check whether the sha1 hash of the file content matches the expected hash.
+    Parameters
+    ----------
+    filename : str
+        Path to the file.
+    sha1_hash : str
+        Expected sha1 hash in hexadecimal digits.
+    Returns
+    -------
+    bool
+        Whether the file content matches the expected hash.
+    """
+    sha1 = hashlib.sha1()
+    with open(filename, 'rb') as f:
+        while True:
+            data = f.read(1048576)
+            if not data:
+                break
+            sha1.update(data)
+
+    sha1_file = sha1.hexdigest()
+    l = min(len(sha1_file), len(sha1_hash))
+    return sha1.hexdigest()[0:l] == sha1_hash[0:l]
+
+
+def download_file(url, path=None, overwrite=False, sha1_hash=None):
+    """Download an given URL
+    Parameters
+    ----------
+    url : str
+        URL to download
+    path : str, optional
+        Destination path to store downloaded file. By default stores to the
+        current directory with same name as in url.
+    overwrite : bool, optional
+        Whether to overwrite destination file if already exists.
+    sha1_hash : str, optional
+        Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
+        but doesn't match.
+    Returns
+    -------
+    str
+        The file path of the downloaded file.
+    """
+    if path is None:
+        fname = url.split('/')[-1]
+    else:
+        path = os.path.expanduser(path)
+        if os.path.isdir(path):
+            fname = os.path.join(path, url.split('/')[-1])
+        else:
+            fname = path
+
+    if overwrite or not os.path.exists(fname) or (
+            sha1_hash and not check_sha1(fname, sha1_hash)):
+        dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
+        if not os.path.exists(dirname):
+            os.makedirs(dirname)
+
+        print('Downloading %s from %s...' % (fname, url))
+        r = requests.get(url, stream=True)
+        if r.status_code != 200:
+            raise RuntimeError("Failed downloading url %s" % url)
+        total_length = r.headers.get('content-length')
+        with open(fname, 'wb') as f:
+            if total_length is None:  # no content length header
+                for chunk in r.iter_content(chunk_size=1024):
+                    if chunk:  # filter out keep-alive new chunks
+                        f.write(chunk)
+            else:
+                total_length = int(total_length)
+                for chunk in tqdm(r.iter_content(chunk_size=1024),
+                                  total=int(total_length / 1024. + 0.5),
+                                  unit='KB',
+                                  unit_scale=False,
+                                  dynamic_ncols=True):
+                    f.write(chunk)
+
+        if sha1_hash and not check_sha1(fname, sha1_hash):
+            raise UserWarning('File {} is downloaded but the content hash does not match. ' \
+                              'The repo may be outdated or download may be incomplete. ' \
+                              'If the "repo_url" is overridden, consider switching to ' \
+                              'the default repo.'.format(fname))
+
+    return fname
diff --git a/src/utils/dependencies/insightface/utils/face_align.py b/src/utils/dependencies/insightface/utils/face_align.py
new file mode 100644
index 0000000000000000000000000000000000000000..226628b39cf743947df230feffbb97bf5c585e1d
--- /dev/null
+++ b/src/utils/dependencies/insightface/utils/face_align.py
@@ -0,0 +1,103 @@
+import cv2
+import numpy as np
+from skimage import transform as trans
+
+
+arcface_dst = np.array(
+    [[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366],
+     [41.5493, 92.3655], [70.7299, 92.2041]],
+    dtype=np.float32)
+
+def estimate_norm(lmk, image_size=112,mode='arcface'):
+    assert lmk.shape == (5, 2)
+    assert image_size%112==0 or image_size%128==0
+    if image_size%112==0:
+        ratio = float(image_size)/112.0
+        diff_x = 0
+    else:
+        ratio = float(image_size)/128.0
+        diff_x = 8.0*ratio
+    dst = arcface_dst * ratio
+    dst[:,0] += diff_x
+    tform = trans.SimilarityTransform()
+    tform.estimate(lmk, dst)
+    M = tform.params[0:2, :]
+    return M
+
+def norm_crop(img, landmark, image_size=112, mode='arcface'):
+    M = estimate_norm(landmark, image_size, mode)
+    warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
+    return warped
+
+def norm_crop2(img, landmark, image_size=112, mode='arcface'):
+    M = estimate_norm(landmark, image_size, mode)
+    warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
+    return warped, M
+
+def square_crop(im, S):
+    if im.shape[0] > im.shape[1]:
+        height = S
+        width = int(float(im.shape[1]) / im.shape[0] * S)
+        scale = float(S) / im.shape[0]
+    else:
+        width = S
+        height = int(float(im.shape[0]) / im.shape[1] * S)
+        scale = float(S) / im.shape[1]
+    resized_im = cv2.resize(im, (width, height))
+    det_im = np.zeros((S, S, 3), dtype=np.uint8)
+    det_im[:resized_im.shape[0], :resized_im.shape[1], :] = resized_im
+    return det_im, scale
+
+
+def transform(data, center, output_size, scale, rotation):
+    scale_ratio = scale
+    rot = float(rotation) * np.pi / 180.0
+    #translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
+    t1 = trans.SimilarityTransform(scale=scale_ratio)
+    cx = center[0] * scale_ratio
+    cy = center[1] * scale_ratio
+    t2 = trans.SimilarityTransform(translation=(-1 * cx, -1 * cy))
+    t3 = trans.SimilarityTransform(rotation=rot)
+    t4 = trans.SimilarityTransform(translation=(output_size / 2,
+                                                output_size / 2))
+    t = t1 + t2 + t3 + t4
+    M = t.params[0:2]
+    cropped = cv2.warpAffine(data,
+                             M, (output_size, output_size),
+                             borderValue=0.0)
+    return cropped, M
+
+
+def trans_points2d(pts, M):
+    new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
+    for i in range(pts.shape[0]):
+        pt = pts[i]
+        new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
+        new_pt = np.dot(M, new_pt)
+        #print('new_pt', new_pt.shape, new_pt)
+        new_pts[i] = new_pt[0:2]
+
+    return new_pts
+
+
+def trans_points3d(pts, M):
+    scale = np.sqrt(M[0][0] * M[0][0] + M[0][1] * M[0][1])
+    #print(scale)
+    new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
+    for i in range(pts.shape[0]):
+        pt = pts[i]
+        new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
+        new_pt = np.dot(M, new_pt)
+        #print('new_pt', new_pt.shape, new_pt)
+        new_pts[i][0:2] = new_pt[0:2]
+        new_pts[i][2] = pts[i][2] * scale
+
+    return new_pts
+
+
+def trans_points(pts, M):
+    if pts.shape[1] == 2:
+        return trans_points2d(pts, M)
+    else:
+        return trans_points3d(pts, M)
+
diff --git a/src/utils/dependencies/insightface/utils/filesystem.py b/src/utils/dependencies/insightface/utils/filesystem.py
new file mode 100644
index 0000000000000000000000000000000000000000..01e3851975bdcbbf7f5eeb7e68e70a36dc040535
--- /dev/null
+++ b/src/utils/dependencies/insightface/utils/filesystem.py
@@ -0,0 +1,157 @@
+"""
+This code file mainly comes from https://github.com/dmlc/gluon-cv/blob/master/gluoncv/utils/filesystem.py
+"""
+import os
+import os.path as osp
+import errno
+
+
+def get_model_dir(name, root='~/.insightface'):
+    root = os.path.expanduser(root)
+    model_dir = osp.join(root, 'models', name)
+    return model_dir
+
+def makedirs(path):
+    """Create directory recursively if not exists.
+    Similar to `makedir -p`, you can skip checking existence before this function.
+
+    Parameters
+    ----------
+    path : str
+        Path of the desired dir
+    """
+    try:
+        os.makedirs(path)
+    except OSError as exc:
+        if exc.errno != errno.EEXIST:
+            raise
+
+
+def try_import(package, message=None):
+    """Try import specified package, with custom message support.
+
+    Parameters
+    ----------
+    package : str
+        The name of the targeting package.
+    message : str, default is None
+        If not None, this function will raise customized error message when import error is found.
+
+
+    Returns
+    -------
+    module if found, raise ImportError otherwise
+
+    """
+    try:
+        return __import__(package)
+    except ImportError as e:
+        if not message:
+            raise e
+        raise ImportError(message)
+
+
+def try_import_cv2():
+    """Try import cv2 at runtime.
+
+    Returns
+    -------
+    cv2 module if found. Raise ImportError otherwise
+
+    """
+    msg = "cv2 is required, you can install by package manager, e.g. 'apt-get', \
+        or `pip install opencv-python --user` (note that this is unofficial PYPI package)."
+
+    return try_import('cv2', msg)
+
+
+def try_import_mmcv():
+    """Try import mmcv at runtime.
+
+    Returns
+    -------
+    mmcv module if found. Raise ImportError otherwise
+
+    """
+    msg = "mmcv is required, you can install by first `pip install Cython --user` \
+        and then `pip install mmcv --user` (note that this is unofficial PYPI package)."
+
+    return try_import('mmcv', msg)
+
+
+def try_import_rarfile():
+    """Try import rarfile at runtime.
+
+    Returns
+    -------
+    rarfile module if found. Raise ImportError otherwise
+
+    """
+    msg = "rarfile is required, you can install by first `sudo apt-get install unrar` \
+        and then `pip install rarfile --user` (note that this is unofficial PYPI package)."
+
+    return try_import('rarfile', msg)
+
+
+def import_try_install(package, extern_url=None):
+    """Try import the specified package.
+    If the package not installed, try use pip to install and import if success.
+
+    Parameters
+    ----------
+    package : str
+        The name of the package trying to import.
+    extern_url : str or None, optional
+        The external url if package is not hosted on PyPI.
+        For example, you can install a package using:
+         "pip install git+http://github.com/user/repo/tarball/master/egginfo=xxx".
+        In this case, you can pass the url to the extern_url.
+
+    Returns
+    -------
+    
+        The imported python module.
+
+    """
+    try:
+        return __import__(package)
+    except ImportError:
+        try:
+            from pip import main as pipmain
+        except ImportError:
+            from pip._internal import main as pipmain
+
+        # trying to install package
+        url = package if extern_url is None else extern_url
+        pipmain(['install', '--user',
+                 url])  # will raise SystemExit Error if fails
+
+        # trying to load again
+        try:
+            return __import__(package)
+        except ImportError:
+            import sys
+            import site
+            user_site = site.getusersitepackages()
+            if user_site not in sys.path:
+                sys.path.append(user_site)
+            return __import__(package)
+    return __import__(package)
+
+
+def try_import_dali():
+    """Try import NVIDIA DALI at runtime.
+    """
+    try:
+        dali = __import__('nvidia.dali', fromlist=['pipeline', 'ops', 'types'])
+        dali.Pipeline = dali.pipeline.Pipeline
+    except ImportError:
+
+        class dali:
+            class Pipeline:
+                def __init__(self):
+                    raise NotImplementedError(
+                        "DALI not found, please check if you installed it correctly."
+                    )
+
+    return dali
diff --git a/src/utils/dependencies/insightface/utils/storage.py b/src/utils/dependencies/insightface/utils/storage.py
new file mode 100644
index 0000000000000000000000000000000000000000..5bf37e2d17b28dee2a8839484778815f87fc4a9c
--- /dev/null
+++ b/src/utils/dependencies/insightface/utils/storage.py
@@ -0,0 +1,52 @@
+
+import os
+import os.path as osp
+import zipfile
+from .download import download_file
+
+BASE_REPO_URL = 'https://github.com/deepinsight/insightface/releases/download/v0.7'
+
+def download(sub_dir, name, force=False, root='~/.insightface'):
+    _root = os.path.expanduser(root)
+    dir_path = os.path.join(_root, sub_dir, name)
+    if osp.exists(dir_path) and not force:
+        return dir_path
+    print('download_path:', dir_path)
+    zip_file_path = os.path.join(_root, sub_dir, name + '.zip')
+    model_url = "%s/%s.zip"%(BASE_REPO_URL, name)
+    download_file(model_url,
+             path=zip_file_path,
+             overwrite=True)
+    if not os.path.exists(dir_path):
+        os.makedirs(dir_path)
+    with zipfile.ZipFile(zip_file_path) as zf:
+        zf.extractall(dir_path)
+    #os.remove(zip_file_path)
+    return dir_path
+
+def ensure_available(sub_dir, name, root='~/.insightface'):
+    return download(sub_dir, name, force=False, root=root)
+
+def download_onnx(sub_dir, model_file, force=False, root='~/.insightface', download_zip=False):
+    _root = os.path.expanduser(root)
+    model_root = osp.join(_root, sub_dir)
+    new_model_file = osp.join(model_root, model_file)
+    if osp.exists(new_model_file) and not force:
+        return new_model_file
+    if not osp.exists(model_root):
+        os.makedirs(model_root)
+    print('download_path:', new_model_file)
+    if not download_zip:
+        model_url = "%s/%s"%(BASE_REPO_URL, model_file)
+        download_file(model_url,
+                 path=new_model_file,
+                 overwrite=True)
+    else:
+        model_url = "%s/%s.zip"%(BASE_REPO_URL, model_file)
+        zip_file_path = new_model_file+".zip"
+        download_file(model_url,
+                 path=zip_file_path,
+                 overwrite=True)
+        with zipfile.ZipFile(zip_file_path) as zf:
+            zf.extractall(model_root)
+        return new_model_file
diff --git a/src/utils/dependencies/insightface/utils/transform.py b/src/utils/dependencies/insightface/utils/transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..06531d257b694211a0b9a09c9d741b9b2ff53bfe
--- /dev/null
+++ b/src/utils/dependencies/insightface/utils/transform.py
@@ -0,0 +1,116 @@
+import cv2
+import math
+import numpy as np
+from skimage import transform as trans
+
+
+def transform(data, center, output_size, scale, rotation):
+    scale_ratio = scale
+    rot = float(rotation) * np.pi / 180.0
+    #translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
+    t1 = trans.SimilarityTransform(scale=scale_ratio)
+    cx = center[0] * scale_ratio
+    cy = center[1] * scale_ratio
+    t2 = trans.SimilarityTransform(translation=(-1 * cx, -1 * cy))
+    t3 = trans.SimilarityTransform(rotation=rot)
+    t4 = trans.SimilarityTransform(translation=(output_size / 2,
+                                                output_size / 2))
+    t = t1 + t2 + t3 + t4
+    M = t.params[0:2]
+    cropped = cv2.warpAffine(data,
+                             M, (output_size, output_size),
+                             borderValue=0.0)
+    return cropped, M
+
+
+def trans_points2d(pts, M):
+    new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
+    for i in range(pts.shape[0]):
+        pt = pts[i]
+        new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
+        new_pt = np.dot(M, new_pt)
+        #print('new_pt', new_pt.shape, new_pt)
+        new_pts[i] = new_pt[0:2]
+
+    return new_pts
+
+
+def trans_points3d(pts, M):
+    scale = np.sqrt(M[0][0] * M[0][0] + M[0][1] * M[0][1])
+    #print(scale)
+    new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
+    for i in range(pts.shape[0]):
+        pt = pts[i]
+        new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
+        new_pt = np.dot(M, new_pt)
+        #print('new_pt', new_pt.shape, new_pt)
+        new_pts[i][0:2] = new_pt[0:2]
+        new_pts[i][2] = pts[i][2] * scale
+
+    return new_pts
+
+
+def trans_points(pts, M):
+    if pts.shape[1] == 2:
+        return trans_points2d(pts, M)
+    else:
+        return trans_points3d(pts, M)
+
+def estimate_affine_matrix_3d23d(X, Y):
+    ''' Using least-squares solution 
+    Args:
+        X: [n, 3]. 3d points(fixed)
+        Y: [n, 3]. corresponding 3d points(moving). Y = PX
+    Returns:
+        P_Affine: (3, 4). Affine camera matrix (the third row is [0, 0, 0, 1]).
+    '''
+    X_homo = np.hstack((X, np.ones([X.shape[0],1]))) #n x 4
+    P = np.linalg.lstsq(X_homo, Y)[0].T # Affine matrix. 3 x 4
+    return P
+
+def P2sRt(P):
+    ''' decompositing camera matrix P
+    Args: 
+        P: (3, 4). Affine Camera Matrix.
+    Returns:
+        s: scale factor.
+        R: (3, 3). rotation matrix.
+        t: (3,). translation. 
+    '''
+    t = P[:, 3]
+    R1 = P[0:1, :3]
+    R2 = P[1:2, :3]
+    s = (np.linalg.norm(R1) + np.linalg.norm(R2))/2.0
+    r1 = R1/np.linalg.norm(R1)
+    r2 = R2/np.linalg.norm(R2)
+    r3 = np.cross(r1, r2)
+
+    R = np.concatenate((r1, r2, r3), 0)
+    return s, R, t
+
+def matrix2angle(R):
+    ''' get three Euler angles from Rotation Matrix
+    Args:
+        R: (3,3). rotation matrix
+    Returns:
+        x: pitch
+        y: yaw
+        z: roll
+    '''
+    sy = math.sqrt(R[0,0] * R[0,0] +  R[1,0] * R[1,0])
+     
+    singular = sy < 1e-6
+ 
+    if  not singular :
+        x = math.atan2(R[2,1] , R[2,2])
+        y = math.atan2(-R[2,0], sy)
+        z = math.atan2(R[1,0], R[0,0])
+    else :
+        x = math.atan2(-R[1,2], R[1,1])
+        y = math.atan2(-R[2,0], sy)
+        z = 0
+
+    # rx, ry, rz = np.rad2deg(x), np.rad2deg(y), np.rad2deg(z)
+    rx, ry, rz = x*180/np.pi, y*180/np.pi, z*180/np.pi
+    return rx, ry, rz
+
diff --git a/src/utils/face_analysis_diy.py b/src/utils/face_analysis_diy.py
new file mode 100644
index 0000000000000000000000000000000000000000..f13a659134216958da3c7273aabf3b0f96fb320d
--- /dev/null
+++ b/src/utils/face_analysis_diy.py
@@ -0,0 +1,79 @@
+# coding: utf-8
+
+"""
+face detectoin and alignment using InsightFace
+"""
+
+import numpy as np
+from .rprint import rlog as log
+from .dependencies.insightface.app import FaceAnalysis
+from .dependencies.insightface.app.common import Face
+from .timer import Timer
+
+
+def sort_by_direction(faces, direction: str = 'large-small', face_center=None):
+    if len(faces) <= 0:
+        return faces
+
+    if direction == 'left-right':
+        return sorted(faces, key=lambda face: face['bbox'][0])
+    if direction == 'right-left':
+        return sorted(faces, key=lambda face: face['bbox'][0], reverse=True)
+    if direction == 'top-bottom':
+        return sorted(faces, key=lambda face: face['bbox'][1])
+    if direction == 'bottom-top':
+        return sorted(faces, key=lambda face: face['bbox'][1], reverse=True)
+    if direction == 'small-large':
+        return sorted(faces, key=lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]))
+    if direction == 'large-small':
+        return sorted(faces, key=lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]), reverse=True)
+    if direction == 'distance-from-retarget-face':
+        return sorted(faces, key=lambda face: (((face['bbox'][2]+face['bbox'][0])/2-face_center[0])**2+((face['bbox'][3]+face['bbox'][1])/2-face_center[1])**2)**0.5)
+    return faces
+
+
+class FaceAnalysisDIY(FaceAnalysis):
+    def __init__(self, name='buffalo_l', root='~/.insightface', allowed_modules=None, **kwargs):
+        super().__init__(name=name, root=root, allowed_modules=allowed_modules, **kwargs)
+
+        self.timer = Timer()
+
+    def get(self, img_bgr, **kwargs):
+        max_num = kwargs.get('max_face_num', 0)  # the number of the detected faces, 0 means no limit
+        flag_do_landmark_2d_106 = kwargs.get('flag_do_landmark_2d_106', True)  # whether to do 106-point detection
+        direction = kwargs.get('direction', 'large-small')  # sorting direction
+        face_center = None
+
+        bboxes, kpss = self.det_model.detect(img_bgr, max_num=max_num, metric='default')
+        if bboxes.shape[0] == 0:
+            return []
+        ret = []
+        for i in range(bboxes.shape[0]):
+            bbox = bboxes[i, 0:4]
+            det_score = bboxes[i, 4]
+            kps = None
+            if kpss is not None:
+                kps = kpss[i]
+            face = Face(bbox=bbox, kps=kps, det_score=det_score)
+            for taskname, model in self.models.items():
+                if taskname == 'detection':
+                    continue
+
+                if (not flag_do_landmark_2d_106) and taskname == 'landmark_2d_106':
+                    continue
+
+                # print(f'taskname: {taskname}')
+                model.get(img_bgr, face)
+            ret.append(face)
+
+        ret = sort_by_direction(ret, direction, face_center)
+        return ret
+
+    def warmup(self):
+        self.timer.tic()
+
+        img_bgr = np.zeros((512, 512, 3), dtype=np.uint8)
+        self.get(img_bgr)
+
+        elapse = self.timer.toc()
+        log(f'FaceAnalysisDIY warmup time: {elapse:.3f}s')
diff --git a/src/utils/helper.py b/src/utils/helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e2af94e137b6447c88ec4df3c7c2c1b1bd94b8a
--- /dev/null
+++ b/src/utils/helper.py
@@ -0,0 +1,145 @@
+# coding: utf-8
+
+"""
+utility functions and classes to handle feature extraction and model loading
+"""
+
+import os
+import os.path as osp
+import torch
+from collections import OrderedDict
+
+from ..modules.spade_generator import SPADEDecoder
+from ..modules.warping_network import WarpingNetwork
+from ..modules.motion_extractor import MotionExtractor
+from ..modules.appearance_feature_extractor import AppearanceFeatureExtractor
+from ..modules.stitching_retargeting_network import StitchingRetargetingNetwork
+
+
+def suffix(filename):
+    """a.jpg -> jpg"""
+    pos = filename.rfind(".")
+    if pos == -1:
+        return ""
+    return filename[pos + 1:]
+
+
+def prefix(filename):
+    """a.jpg -> a"""
+    pos = filename.rfind(".")
+    if pos == -1:
+        return filename
+    return filename[:pos]
+
+
+def basename(filename):
+    """a/b/c.jpg -> c"""
+    return prefix(osp.basename(filename))
+
+
+def remove_suffix(filepath):
+    """a/b/c.jpg -> a/b/c"""
+    return osp.join(osp.dirname(filepath), basename(filepath))
+
+
+def is_video(file_path):
+    if file_path.lower().endswith((".mp4", ".mov", ".avi", ".webm")) or osp.isdir(file_path):
+        return True
+    return False
+
+
+def is_template(file_path):
+    if file_path.endswith(".pkl"):
+        return True
+    return False
+
+
+def mkdir(d, log=False):
+    # return self-assined `d`, for one line code
+    if not osp.exists(d):
+        os.makedirs(d, exist_ok=True)
+        if log:
+            print(f"Make dir: {d}")
+    return d
+
+
+def squeeze_tensor_to_numpy(tensor):
+    out = tensor.data.squeeze(0).cpu().numpy()
+    return out
+
+
+def dct2device(dct: dict, device):
+    for key in dct:
+        dct[key] = torch.tensor(dct[key]).to(device)
+    return dct
+
+
+def concat_feat(kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
+    """
+    kp_source: (bs, k, 3)
+    kp_driving: (bs, k, 3)
+    Return: (bs, 2k*3)
+    """
+    bs_src = kp_source.shape[0]
+    bs_dri = kp_driving.shape[0]
+    assert bs_src == bs_dri, 'batch size must be equal'
+
+    feat = torch.cat([kp_source.view(bs_src, -1), kp_driving.view(bs_dri, -1)], dim=1)
+    return feat
+
+
+def remove_ddp_dumplicate_key(state_dict):
+    state_dict_new = OrderedDict()
+    for key in state_dict.keys():
+        state_dict_new[key.replace('module.', '')] = state_dict[key]
+    return state_dict_new
+
+
+def load_model(ckpt_path, model_config, device, model_type):
+    model_params = model_config['model_params'][f'{model_type}_params']
+
+    if model_type == 'appearance_feature_extractor':
+        model = AppearanceFeatureExtractor(**model_params).to(device)
+    elif model_type == 'motion_extractor':
+        model = MotionExtractor(**model_params).to(device)
+    elif model_type == 'warping_module':
+        model = WarpingNetwork(**model_params).to(device)
+    elif model_type == 'spade_generator':
+        model = SPADEDecoder(**model_params).to(device)
+    elif model_type == 'stitching_retargeting_module':
+        # Special handling for stitching and retargeting module
+        config = model_config['model_params']['stitching_retargeting_module_params']
+        checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
+
+        stitcher = StitchingRetargetingNetwork(**config.get('stitching'))
+        stitcher.load_state_dict(remove_ddp_dumplicate_key(checkpoint['retarget_shoulder']))
+        stitcher = stitcher.to(device)
+        stitcher.eval()
+
+        retargetor_lip = StitchingRetargetingNetwork(**config.get('lip'))
+        retargetor_lip.load_state_dict(remove_ddp_dumplicate_key(checkpoint['retarget_mouth']))
+        retargetor_lip = retargetor_lip.to(device)
+        retargetor_lip.eval()
+
+        retargetor_eye = StitchingRetargetingNetwork(**config.get('eye'))
+        retargetor_eye.load_state_dict(remove_ddp_dumplicate_key(checkpoint['retarget_eye']))
+        retargetor_eye = retargetor_eye.to(device)
+        retargetor_eye.eval()
+
+        return {
+            'stitching': stitcher,
+            'lip': retargetor_lip,
+            'eye': retargetor_eye
+        }
+    else:
+        raise ValueError(f"Unknown model type: {model_type}")
+
+    model.load_state_dict(torch.load(ckpt_path, map_location=lambda storage, loc: storage))
+    model.eval()
+    return model
+
+
+def load_description(fp):
+    with open(fp, 'r', encoding='utf-8') as f:
+        content = f.read()
+    return content
diff --git a/src/utils/hparams.py b/src/utils/hparams.py
new file mode 100644
index 0000000000000000000000000000000000000000..743c5c7d5a5a9e686f1ccd6fb3c2fb5cb382d62b
--- /dev/null
+++ b/src/utils/hparams.py
@@ -0,0 +1,160 @@
+from glob import glob
+import os
+
+class HParams:
+	def __init__(self, **kwargs):
+		self.data = {}
+
+		for key, value in kwargs.items():
+			self.data[key] = value
+
+	def __getattr__(self, key):
+		if key not in self.data:
+			raise AttributeError("'HParams' object has no attribute %s" % key)
+		return self.data[key]
+
+	def set_hparam(self, key, value):
+		self.data[key] = value
+
+
+# Default hyperparameters
+hparams = HParams(
+	num_mels=80,  # Number of mel-spectrogram channels and local conditioning dimensionality
+	#  network
+	rescale=True,  # Whether to rescale audio prior to preprocessing
+	rescaling_max=0.9,  # Rescaling value
+	
+	# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
+	# It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
+	# Does not work if n_ffit is not multiple of hop_size!!
+	use_lws=False,
+	
+	n_fft=800,  # Extra window size is filled with 0 paddings to match this parameter
+	hop_size=200,  # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate)
+	win_size=800,  # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
+	sample_rate=16000,  # 16000Hz (corresponding to librispeech) (sox --i )
+	
+	frame_shift_ms=None,  # Can replace hop_size parameter. (Recommended: 12.5)
+	
+	# Mel and Linear spectrograms normalization/scaling and clipping
+	signal_normalization=True,
+	# Whether to normalize mel spectrograms to some predefined range (following below parameters)
+	allow_clipping_in_normalization=True,  # Only relevant if mel_normalization = True
+	symmetric_mels=True,
+	# Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2, 
+	# faster and cleaner convergence)
+	max_abs_value=4.,
+	# max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not 
+	# be too big to avoid gradient explosion, 
+	# not too small for fast convergence)
+	# Contribution by @begeekmyfriend
+	# Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude 
+	# levels. Also allows for better G&L phase reconstruction)
+	preemphasize=True,  # whether to apply filter
+	preemphasis=0.97,  # filter coefficient.
+	
+	# Limits
+	min_level_db=-100,
+	ref_level_db=20,
+	fmin=55,
+	# Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To 
+	# test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
+	fmax=7600,  # To be increased/reduced depending on data.
+
+	###################### Our training parameters #################################
+	img_size=96,
+	fps=25,
+	
+	batch_size=16,
+	initial_learning_rate=1e-4,
+	nepochs=300000,  ### ctrl + c, stop whenever eval loss is consistently greater than train loss for ~10 epochs
+	num_workers=20,
+	checkpoint_interval=3000,
+	eval_interval=3000,
+	writer_interval=300,
+    save_optimizer_state=True,
+
+    syncnet_wt=0.0, # is initially zero, will be set automatically to 0.03 later. Leads to faster convergence. 
+	syncnet_batch_size=64,
+	syncnet_lr=1e-4,
+	syncnet_eval_interval=1000,
+	syncnet_checkpoint_interval=10000,
+
+	disc_wt=0.07,
+	disc_initial_learning_rate=1e-4,
+)
+
+
+
+# Default hyperparameters
+hparamsdebug = HParams(
+	num_mels=80,  # Number of mel-spectrogram channels and local conditioning dimensionality
+	#  network
+	rescale=True,  # Whether to rescale audio prior to preprocessing
+	rescaling_max=0.9,  # Rescaling value
+	
+	# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
+	# It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
+	# Does not work if n_ffit is not multiple of hop_size!!
+	use_lws=False,
+	
+	n_fft=800,  # Extra window size is filled with 0 paddings to match this parameter
+	hop_size=200,  # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate)
+	win_size=800,  # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
+	sample_rate=16000,  # 16000Hz (corresponding to librispeech) (sox --i )
+	
+	frame_shift_ms=None,  # Can replace hop_size parameter. (Recommended: 12.5)
+	
+	# Mel and Linear spectrograms normalization/scaling and clipping
+	signal_normalization=True,
+	# Whether to normalize mel spectrograms to some predefined range (following below parameters)
+	allow_clipping_in_normalization=True,  # Only relevant if mel_normalization = True
+	symmetric_mels=True,
+	# Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2, 
+	# faster and cleaner convergence)
+	max_abs_value=4.,
+	# max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not 
+	# be too big to avoid gradient explosion, 
+	# not too small for fast convergence)
+	# Contribution by @begeekmyfriend
+	# Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude 
+	# levels. Also allows for better G&L phase reconstruction)
+	preemphasize=True,  # whether to apply filter
+	preemphasis=0.97,  # filter coefficient.
+	
+	# Limits
+	min_level_db=-100,
+	ref_level_db=20,
+	fmin=55,
+	# Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To 
+	# test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
+	fmax=7600,  # To be increased/reduced depending on data.
+
+	###################### Our training parameters #################################
+	img_size=96,
+	fps=25,
+	
+	batch_size=2,
+	initial_learning_rate=1e-3,
+	nepochs=100000,  ### ctrl + c, stop whenever eval loss is consistently greater than train loss for ~10 epochs
+	num_workers=0,
+	checkpoint_interval=10000,
+	eval_interval=10,
+	writer_interval=5,
+    save_optimizer_state=True,
+
+    syncnet_wt=0.0, # is initially zero, will be set automatically to 0.03 later. Leads to faster convergence. 
+	syncnet_batch_size=64,
+	syncnet_lr=1e-4,
+	syncnet_eval_interval=10000,
+	syncnet_checkpoint_interval=10000,
+
+	disc_wt=0.07,
+	disc_initial_learning_rate=1e-4,
+)
+
+
+def hparams_debug_string():
+	values = hparams.values()
+	hp = ["  %s: %s" % (name, values[name]) for name in sorted(values) if name != "sentences"]
+	return "Hyperparameters:\n" + "\n".join(hp)
diff --git a/src/utils/io.py b/src/utils/io.py
new file mode 100644
index 0000000000000000000000000000000000000000..28c2d99f09421fc9eb1f6475419cb1c6e6dcd028
--- /dev/null
+++ b/src/utils/io.py
@@ -0,0 +1,125 @@
+# coding: utf-8
+
+import os
+from glob import glob
+import os.path as osp
+import imageio
+import numpy as np
+import pickle
+import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False)
+
+from .helper import mkdir, suffix
+
+
+def load_image_rgb(image_path: str):
+    if not osp.exists(image_path):
+        raise FileNotFoundError(f"Image not found: {image_path}")
+    img = cv2.imread(image_path, cv2.IMREAD_COLOR)
+    return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+
+
+def load_driving_info(driving_info):
+    driving_video_ori = []
+
+    def load_images_from_directory(directory):
+        image_paths = sorted(glob(osp.join(directory, '*.png')) + glob(osp.join(directory, '*.jpg')))
+        return [load_image_rgb(im_path) for im_path in image_paths]
+
+    def load_images_from_video(file_path):
+        reader = imageio.get_reader(file_path, "ffmpeg")
+        return [image for _, image in enumerate(reader)]
+
+    if osp.isdir(driving_info):
+        driving_video_ori = load_images_from_directory(driving_info)
+    elif osp.isfile(driving_info):
+        driving_video_ori = load_images_from_video(driving_info)
+
+    return driving_video_ori
+
+
+def contiguous(obj):
+    if not obj.flags.c_contiguous:
+        obj = obj.copy(order="C")
+    return obj
+
+
+def resize_to_limit(img: np.ndarray, max_dim=1920, division=2):
+    """
+    ajust the size of the image so that the maximum dimension does not exceed max_dim, and the width and the height of the image are multiples of n.
+    :param img: the image to be processed.
+    :param max_dim: the maximum dimension constraint.
+    :param n: the number that needs to be multiples of.
+    :return: the adjusted image.
+    """
+    h, w = img.shape[:2]
+
+    # ajust the size of the image according to the maximum dimension
+    if max_dim > 0 and max(h, w) > max_dim:
+        if h > w:
+            new_h = max_dim
+            new_w = int(w * (max_dim / h))
+        else:
+            new_w = max_dim
+            new_h = int(h * (max_dim / w))
+        img = cv2.resize(img, (new_w, new_h))
+
+    # ensure that the image dimensions are multiples of n
+    division = max(division, 1)
+    new_h = img.shape[0] - (img.shape[0] % division)
+    new_w = img.shape[1] - (img.shape[1] % division)
+
+    if new_h == 0 or new_w == 0:
+        # when the width or height is less than n, no need to process
+        return img
+
+    if new_h != img.shape[0] or new_w != img.shape[1]:
+        img = img[:new_h, :new_w]
+
+    return img
+
+
+def load_img_online(obj, mode="bgr", **kwargs):
+    max_dim = kwargs.get("max_dim", 1920)
+    n = kwargs.get("n", 2)
+    if isinstance(obj, str):
+        if mode.lower() == "gray":
+            img = cv2.imread(obj, cv2.IMREAD_GRAYSCALE)
+        else:
+            img = cv2.imread(obj, cv2.IMREAD_COLOR)
+    else:
+        img = obj
+
+    # Resize image to satisfy constraints
+    img = resize_to_limit(img, max_dim=max_dim, division=n)
+
+    if mode.lower() == "bgr":
+        return contiguous(img)
+    elif mode.lower() == "rgb":
+        return contiguous(img[..., ::-1])
+    else:
+        raise Exception(f"Unknown mode {mode}")
+
+
+def load(fp):
+    suffix_ = suffix(fp)
+
+    if suffix_ == "npy":
+        return np.load(fp)
+    elif suffix_ == "pkl":
+        return pickle.load(open(fp, "rb"))
+    else:
+        raise Exception(f"Unknown type: {suffix}")
+
+
+def dump(wfp, obj):
+    wd = osp.split(wfp)[0]
+    if wd != "" and not osp.exists(wd):
+        mkdir(wd)
+
+    _suffix = suffix(wfp)
+    if _suffix == "npy":
+        np.save(wfp, obj)
+    elif _suffix == "pkl":
+        pickle.dump(obj, open(wfp, "wb"))
+    else:
+        raise Exception("Unknown type: {}".format(_suffix))
diff --git a/src/utils/landmark_runner.py b/src/utils/landmark_runner.py
new file mode 100644
index 0000000000000000000000000000000000000000..7680a2c4a65ebe7f4dadbafc4a35603ab9f90be6
--- /dev/null
+++ b/src/utils/landmark_runner.py
@@ -0,0 +1,89 @@
+# coding: utf-8
+
+import os.path as osp
+import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False)
+import torch
+import numpy as np
+import onnxruntime
+from .timer import Timer
+from .rprint import rlog
+from .crop import crop_image, _transform_pts
+
+
+def make_abs_path(fn):
+    return osp.join(osp.dirname(osp.realpath(__file__)), fn)
+
+
+def to_ndarray(obj):
+    if isinstance(obj, torch.Tensor):
+        return obj.cpu().numpy()
+    elif isinstance(obj, np.ndarray):
+        return obj
+    else:
+        return np.array(obj)
+
+
+class LandmarkRunner(object):
+    """landmark runner"""
+
+    def __init__(self, **kwargs):
+        ckpt_path = kwargs.get('ckpt_path')
+        onnx_provider = kwargs.get('onnx_provider', 'cuda')  # 默认用cuda
+        device_id = kwargs.get('device_id', 0)
+        self.dsize = kwargs.get('dsize', 224)
+        self.timer = Timer()
+
+        if onnx_provider.lower() == 'cuda':
+            self.session = onnxruntime.InferenceSession(
+                ckpt_path, providers=[
+                    ('CUDAExecutionProvider', {'device_id': device_id})
+                ]
+            )
+        else:
+            opts = onnxruntime.SessionOptions()
+            opts.intra_op_num_threads = 4  # 默认线程数为 4
+            self.session = onnxruntime.InferenceSession(
+                ckpt_path, providers=['CPUExecutionProvider'],
+                sess_options=opts
+            )
+
+    def _run(self, inp):
+        out = self.session.run(None, {'input': inp})
+        return out
+
+    def run(self, img_rgb: np.ndarray, lmk=None):
+        if lmk is not None:
+            crop_dct = crop_image(img_rgb, lmk, dsize=self.dsize, scale=1.5, vy_ratio=-0.1)
+            img_crop_rgb = crop_dct['img_crop']
+        else:
+            # NOTE: force resize to 224x224, NOT RECOMMEND!
+            img_crop_rgb = cv2.resize(img_rgb, (self.dsize, self.dsize))
+            scale = max(img_rgb.shape[:2]) / self.dsize
+            crop_dct = {
+                'M_c2o': np.array([
+                    [scale, 0., 0.],
+                    [0., scale, 0.],
+                    [0., 0., 1.],
+                ], dtype=np.float32),
+            }
+
+        inp = (img_crop_rgb.astype(np.float32) / 255.).transpose(2, 0, 1)[None, ...]  # HxWx3 (BGR) -> 1x3xHxW (RGB!)
+
+        out_lst = self._run(inp)
+        out_pts = out_lst[2]
+
+        # 2d landmarks 203 points
+        lmk = to_ndarray(out_pts[0]).reshape(-1, 2) * self.dsize  # scale to 0-224
+        lmk = _transform_pts(lmk, M=crop_dct['M_c2o'])
+
+        return lmk
+
+    def warmup(self):
+        self.timer.tic()
+
+        dummy_image = np.zeros((1, 3, self.dsize, self.dsize), dtype=np.float32)
+
+        _ = self._run(dummy_image)
+
+        elapse = self.timer.toc()
+        rlog(f'LandmarkRunner warmup time: {elapse:.3f}s')
diff --git a/src/utils/resources/mask_template.png b/src/utils/resources/mask_template.png
new file mode 100644
index 0000000000000000000000000000000000000000..bca6ca5977ba820d0d2c05b3793c6231cc82e715
Binary files /dev/null and b/src/utils/resources/mask_template.png differ
diff --git a/src/utils/retargeting_utils.py b/src/utils/retargeting_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae2e5f52effe8107503586c9f5a24f39dfdbbbcf
--- /dev/null
+++ b/src/utils/retargeting_utils.py
@@ -0,0 +1,24 @@
+
+"""
+Functions to compute distance ratios between specific pairs of facial landmarks
+"""
+
+import numpy as np
+
+
+def calculate_distance_ratio(lmk: np.ndarray, idx1: int, idx2: int, idx3: int, idx4: int, eps: float = 1e-6) -> np.ndarray:
+    return (np.linalg.norm(lmk[:, idx1] - lmk[:, idx2], axis=1, keepdims=True) /
+            (np.linalg.norm(lmk[:, idx3] - lmk[:, idx4], axis=1, keepdims=True) + eps))
+
+
+def calc_eye_close_ratio(lmk: np.ndarray, target_eye_ratio: np.ndarray = None) -> np.ndarray:
+    lefteye_close_ratio = calculate_distance_ratio(lmk, 6, 18, 0, 12)
+    righteye_close_ratio = calculate_distance_ratio(lmk, 30, 42, 24, 36)
+    if target_eye_ratio is not None:
+        return np.concatenate([lefteye_close_ratio, righteye_close_ratio, target_eye_ratio], axis=1)
+    else:
+        return np.concatenate([lefteye_close_ratio, righteye_close_ratio], axis=1)
+
+
+def calc_lip_close_ratio(lmk: np.ndarray) -> np.ndarray:
+    return calculate_distance_ratio(lmk, 90, 102, 48, 66)
diff --git a/src/utils/rprint.py b/src/utils/rprint.py
new file mode 100644
index 0000000000000000000000000000000000000000..c43a42f9855bbb019725e6c2b6c6c50e6fa4d0c5
--- /dev/null
+++ b/src/utils/rprint.py
@@ -0,0 +1,16 @@
+# coding: utf-8
+
+"""
+custom print and log functions 
+"""
+
+__all__ = ['rprint', 'rlog']
+
+try:
+    from rich.console import Console
+    console = Console()
+    rprint = console.print
+    rlog = console.log
+except:
+    rprint = print
+    rlog = print
diff --git a/src/utils/timer.py b/src/utils/timer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3570fa45d3ff36376471b82a5b3c02efe46eed98
--- /dev/null
+++ b/src/utils/timer.py
@@ -0,0 +1,29 @@
+# coding: utf-8
+
+"""
+tools to measure elapsed time
+"""
+
+import time
+
+class Timer(object):
+    """A simple timer."""
+
+    def __init__(self):
+        self.total_time = 0.
+        self.calls = 0
+        self.start_time = 0.
+        self.diff = 0.
+
+    def tic(self):
+        # using time.time instead of time.clock because time time.clock
+        # does not normalize for multithreading
+        self.start_time = time.time()
+
+    def toc(self, average=True):
+        self.diff = time.time() - self.start_time
+        return self.diff
+
+    def clear(self):
+        self.start_time = 0.
+        self.diff = 0.
diff --git a/src/utils/video.py b/src/utils/video.py
new file mode 100644
index 0000000000000000000000000000000000000000..c62729049e0349c02fb92bdd3149ac21dade294b
--- /dev/null
+++ b/src/utils/video.py
@@ -0,0 +1,211 @@
+# coding: utf-8
+
+"""
+Functions for processing video
+
+ATTENTION: you need to install ffmpeg and ffprobe in your env!
+"""
+
+import os.path as osp
+import numpy as np
+import subprocess
+import imageio
+import cv2
+from rich.progress import track
+
+from .rprint import rlog as log
+from .rprint import rprint as print
+from .helper import prefix
+
+
+def exec_cmd(cmd):
+    return subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+
+def images2video(images, wfp, **kwargs):
+    fps = kwargs.get('fps', 25)
+    video_format = kwargs.get('format', 'mp4')  # default is mp4 format
+    codec = kwargs.get('codec', 'libx264')  # default is libx264 encoding
+    quality = kwargs.get('quality')  # video quality
+    pixelformat = kwargs.get('pixelformat', 'yuv420p')  # video pixel format
+    image_mode = kwargs.get('image_mode', 'rgb')
+    macro_block_size = kwargs.get('macro_block_size', 2)
+    ffmpeg_params = ['-crf', str(kwargs.get('crf', 18))]
+
+    writer = imageio.get_writer(
+        wfp, fps=fps, format=video_format,
+        codec=codec, quality=quality, ffmpeg_params=ffmpeg_params, pixelformat=pixelformat, macro_block_size=macro_block_size
+    )
+
+    n = len(images)
+    for i in track(range(n), description='Writing', transient=True):
+        if image_mode.lower() == 'bgr':
+            writer.append_data(images[i][..., ::-1])
+        else:
+            writer.append_data(images[i])
+
+    writer.close()
+
+
+def video2gif(video_fp, fps=30, size=256):
+    if osp.exists(video_fp):
+        d = osp.split(video_fp)[0]
+        fn = prefix(osp.basename(video_fp))
+        palette_wfp = osp.join(d, 'palette.png')
+        gif_wfp = osp.join(d, f'{fn}.gif')
+        # generate the palette
+        cmd = f'ffmpeg -i "{video_fp}" -vf "fps={fps},scale={size}:-1:flags=lanczos,palettegen" "{palette_wfp}" -y'
+        exec_cmd(cmd)
+        # use the palette to generate the gif
+        cmd = f'ffmpeg -i "{video_fp}" -i "{palette_wfp}" -filter_complex "fps={fps},scale={size}:-1:flags=lanczos[x];[x][1:v]paletteuse" "{gif_wfp}" -y'
+        exec_cmd(cmd)
+    else:
+        print(f'video_fp: {video_fp} not exists!')
+
+
+def merge_audio_video(video_fp, audio_fp, wfp):
+    if osp.exists(video_fp) and osp.exists(audio_fp):
+        cmd = f'ffmpeg -i "{video_fp}" -i "{audio_fp}" -c:v copy -c:a aac "{wfp}" -y'
+        exec_cmd(cmd)
+        print(f'merge {video_fp} and {audio_fp} to {wfp}')
+    else:
+        print(f'video_fp: {video_fp} or audio_fp: {audio_fp} not exists!')
+
+
+def blend(img: np.ndarray, mask: np.ndarray, background_color=(255, 255, 255)):
+    mask_float = mask.astype(np.float32) / 255.
+    background_color = np.array(background_color).reshape([1, 1, 3])
+    bg = np.ones_like(img) * background_color
+    img = np.clip(mask_float * img + (1 - mask_float) * bg, 0, 255).astype(np.uint8)
+    return img
+
+
+def concat_frames(driving_image_lst, source_image, I_p_lst):
+    # TODO: add more concat style, e.g., left-down corner driving
+    out_lst = []
+    h, w, _ = I_p_lst[0].shape
+
+    for idx, _ in track(enumerate(I_p_lst), total=len(I_p_lst), description='Concatenating result...'):
+        I_p = I_p_lst[idx]
+        source_image_resized = cv2.resize(source_image, (w, h))
+
+        if driving_image_lst is None:
+            out = np.hstack((source_image_resized, I_p))
+        else:
+            driving_image = driving_image_lst[idx]
+            driving_image_resized = cv2.resize(driving_image, (w, h))
+            out = np.hstack((driving_image_resized, source_image_resized, I_p))
+
+        out_lst.append(out)
+    return out_lst
+
+
+class VideoWriter:
+    def __init__(self, **kwargs):
+        self.fps = kwargs.get('fps', 30)
+        self.wfp = kwargs.get('wfp', 'video.mp4')
+        self.video_format = kwargs.get('format', 'mp4')
+        self.codec = kwargs.get('codec', 'libx264')
+        self.quality = kwargs.get('quality')
+        self.pixelformat = kwargs.get('pixelformat', 'yuv420p')
+        self.image_mode = kwargs.get('image_mode', 'rgb')
+        self.ffmpeg_params = kwargs.get('ffmpeg_params')
+
+        self.writer = imageio.get_writer(
+            self.wfp, fps=self.fps, format=self.video_format,
+            codec=self.codec, quality=self.quality,
+            ffmpeg_params=self.ffmpeg_params, pixelformat=self.pixelformat
+        )
+
+    def write(self, image):
+        if self.image_mode.lower() == 'bgr':
+            self.writer.append_data(image[..., ::-1])
+        else:
+            self.writer.append_data(image)
+
+    def close(self):
+        if self.writer is not None:
+            self.writer.close()
+
+
+def change_video_fps(input_file, output_file, fps=20, codec='libx264', crf=12):
+    cmd = f'ffmpeg -i "{input_file}" -c:v {codec} -crf {crf} -r {fps} "{output_file}" -y'
+    exec_cmd(cmd)
+
+
+def get_fps(filepath, default_fps=25):
+    try:
+        fps = cv2.VideoCapture(filepath).get(cv2.CAP_PROP_FPS)
+
+        if fps in (0, None):
+            fps = default_fps
+    except Exception as e:
+        log(e)
+        fps = default_fps
+
+    return fps
+
+
+def has_audio_stream(video_path: str) -> bool:
+    """
+    Check if the video file contains an audio stream.
+
+    :param video_path: Path to the video file
+    :return: True if the video contains an audio stream, False otherwise
+    """
+    if osp.isdir(video_path):
+        return False
+
+    cmd = [
+        'ffprobe',
+        '-v', 'error',
+        '-select_streams', 'a',
+        '-show_entries', 'stream=codec_type',
+        '-of', 'default=noprint_wrappers=1:nokey=1',
+        f'"{video_path}"'
+    ]
+
+    try:
+        # result = subprocess.run(cmd, capture_output=True, text=True)
+        result = exec_cmd(' '.join(cmd))
+        if result.returncode != 0:
+            log(f"Error occurred while probing video: {result.stderr}")
+            return False
+
+        # Check if there is any output from ffprobe command
+        return bool(result.stdout.strip())
+    except Exception as e:
+        log(f"Error occurred while probing video: {video_path}, you may need to install ffprobe! Now set audio to false!", style="bold red")
+        return False
+
+
+def add_audio_to_video(silent_video_path: str, audio_video_path: str, output_video_path: str):
+    cmd = [
+        'ffmpeg',
+        '-y',
+        '-i', f'"{silent_video_path}"',
+        '-i', f'"{audio_video_path}"',
+        '-map', '0:v',
+        '-map', '1:a',
+        '-c:v', 'copy',
+        '-shortest',
+        f'"{output_video_path}"'
+    ]
+
+    try:
+        exec_cmd(' '.join(cmd))
+        log(f"Video with audio generated successfully: {output_video_path}")
+    except subprocess.CalledProcessError as e:
+        log(f"Error occurred: {e}")
+
+
+def bb_intersection_over_union(boxA, boxB):
+    xA = max(boxA[0], boxB[0])
+    yA = max(boxA[1], boxB[1])
+    xB = min(boxA[2], boxB[2])
+    yB = min(boxA[3], boxB[3])
+    interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
+    boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
+    boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
+    iou = interArea / float(boxAArea + boxBArea - interArea)
+    return iou
diff --git a/src/utils/viz.py b/src/utils/viz.py
new file mode 100644
index 0000000000000000000000000000000000000000..59443cbf207f3395bee241f63c7acb95b9402530
--- /dev/null
+++ b/src/utils/viz.py
@@ -0,0 +1,19 @@
+# coding: utf-8
+
+import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False)
+
+
+def viz_lmk(img_, vps, **kwargs):
+    """可视化点"""
+    lineType = kwargs.get("lineType", cv2.LINE_8)  # cv2.LINE_AA
+    img_for_viz = img_.copy()
+    for pt in vps:
+        cv2.circle(
+            img_for_viz,
+            (int(pt[0]), int(pt[1])),
+            radius=kwargs.get("radius", 1),
+            color=(0, 255, 0),
+            thickness=kwargs.get("thickness", 1),
+            lineType=lineType,
+        )
+    return img_for_viz