ChaolongYang commited on
Commit
475d332
·
verified ·
1 Parent(s): 11e698b

Upload 242 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. app.py +124 -0
  3. difpoint/.DS_Store +0 -0
  4. difpoint/configs/onnx_infer.yaml +105 -0
  5. difpoint/configs/onnx_mp_infer.yaml +99 -0
  6. difpoint/configs/trt_infer.yaml +105 -0
  7. difpoint/configs/trt_mp_infer.yaml +99 -0
  8. difpoint/croper.py +269 -0
  9. difpoint/dataset_process/.DS_Store +0 -0
  10. difpoint/dataset_process/__pycache__/audio.cpython-310.pyc +0 -0
  11. difpoint/dataset_process/__pycache__/audio.cpython-38.pyc +0 -0
  12. difpoint/dataset_process/audio.py +156 -0
  13. difpoint/dataset_process/wav2lip.py +75 -0
  14. difpoint/datasets/norm_info_d6.5_c8.5_vox1_train.npz +3 -0
  15. difpoint/inference.py +474 -0
  16. difpoint/model/__init__.py +6 -0
  17. difpoint/model/__pycache__/__init__.cpython-310.pyc +0 -0
  18. difpoint/model/__pycache__/__init__.cpython-38.pyc +0 -0
  19. difpoint/model/__pycache__/model.cpython-310.pyc +0 -0
  20. difpoint/model/__pycache__/model.cpython-38.pyc +0 -0
  21. difpoint/model/__pycache__/model_utils.cpython-310.pyc +0 -0
  22. difpoint/model/__pycache__/model_utils.cpython-38.pyc +0 -0
  23. difpoint/model/__pycache__/point_model.cpython-310.pyc +0 -0
  24. difpoint/model/__pycache__/point_model.cpython-38.pyc +0 -0
  25. difpoint/model/model.py +409 -0
  26. difpoint/model/model_utils.py +35 -0
  27. difpoint/model/point_model.py +38 -0
  28. difpoint/model/temporaltrans/__pycache__/temptrans.cpython-310.pyc +0 -0
  29. difpoint/model/temporaltrans/__pycache__/temptrans.cpython-38.pyc +0 -0
  30. difpoint/model/temporaltrans/__pycache__/transformer_utils.cpython-310.pyc +0 -0
  31. difpoint/model/temporaltrans/__pycache__/transformer_utils.cpython-38.pyc +0 -0
  32. difpoint/model/temporaltrans/pointnet_util.py +311 -0
  33. difpoint/model/temporaltrans/pointtransformerv2.py +250 -0
  34. difpoint/model/temporaltrans/temptrans.py +347 -0
  35. difpoint/model/temporaltrans/transformer_utils.py +146 -0
  36. difpoint/src/__init__.py +5 -0
  37. difpoint/src/__pycache__/__init__.cpython-310.pyc +0 -0
  38. difpoint/src/__pycache__/__init__.cpython-38.pyc +0 -0
  39. difpoint/src/__pycache__/live_portrait_pipeline.cpython-310.pyc +0 -0
  40. difpoint/src/__pycache__/live_portrait_wrapper.cpython-310.pyc +0 -0
  41. difpoint/src/config/__init__.py +0 -0
  42. difpoint/src/config/__pycache__/__init__.cpython-310.pyc +0 -0
  43. difpoint/src/config/__pycache__/argument_config.cpython-310.pyc +0 -0
  44. difpoint/src/config/__pycache__/base_config.cpython-310.pyc +0 -0
  45. difpoint/src/config/__pycache__/crop_config.cpython-310.pyc +0 -0
  46. difpoint/src/config/__pycache__/inference_config.cpython-310.pyc +0 -0
  47. difpoint/src/config/argument_config.py +48 -0
  48. difpoint/src/config/base_config.py +29 -0
  49. difpoint/src/config/crop_config.py +29 -0
  50. difpoint/src/config/inference_config.py +52 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ difpoint/src/utils/dependencies/insightface/data/images/t1.jpg filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ import gradio as gr
3
+ from difpoint.inference import Inferencer
4
+ from TTS.api import TTS
5
+ import torch
6
+ import time
7
+ from flask import send_from_directory
8
+ from huggingface_hub import snapshot_download
9
+ import spaces
10
+ import tensorrt
11
+ import multiprocessing as mp
12
+ import pickle
13
+ mp.set_start_method('spawn', force=True)
14
+
15
+
16
+ repo_id = "ChaolongYang/KDTalker"
17
+ local_dir = "./downloaded_repo"
18
+ snapshot_download(repo_id=repo_id, local_dir=local_dir)
19
+ print("\nFiles downloaded:")
20
+ for root, dirs, files in os.walk(local_dir):
21
+ for file in files:
22
+ file_path = os.path.join(root, file)
23
+ print(file_path)
24
+
25
+ result_dir = "results"
26
+ def set_upload():
27
+ return "upload"
28
+ def set_microphone():
29
+ return "microphone"
30
+ def set_tts():
31
+ return "tts"
32
+ def create_kd_talker():
33
+ return Inferencer()
34
+
35
+ @spaces.GPU
36
+ def predict(prompt, upload_reference_audio, microphone_reference_audio, reference_audio_type):
37
+ global result_dir
38
+ output_file_path = os.path.join('./downloaded_repo/', 'output.wav')
39
+ if reference_audio_type == 'upload':
40
+ audio_file_pth = upload_reference_audio
41
+ elif reference_audio_type == 'microphone':
42
+ audio_file_pth = microphone_reference_audio
43
+ tts = TTS('tts_models/multilingual/multi-dataset/your_tts')
44
+ tts.tts_to_file(
45
+ text=prompt,
46
+ file_path=output_file_path,
47
+ speaker_wav=audio_file_pth,
48
+ language="en",
49
+ )
50
+ return gr.Audio(value=output_file_path, type='filepath')
51
+
52
+ @spaces.GPU
53
+ def generate(upload_driven_audio, tts_driven_audio, driven_audio_type, source_image, smoothed_pitch, smoothed_yaw, smoothed_roll, smoothed_t):
54
+ return Inferencer().generate_with_audio_img(upload_driven_audio, tts_driven_audio, driven_audio_type, source_image,
55
+ smoothed_pitch, smoothed_yaw, smoothed_roll, smoothed_t)
56
+
57
+
58
+ def main():
59
+ if torch.cuda.is_available():
60
+ device = "cuda"
61
+ else:
62
+ device = "cpu"
63
+
64
+ with gr.Blocks(analytics_enabled=False) as interface:
65
+ gr.Markdown(
66
+ """
67
+ <div align='center'>
68
+ <h2> Unlock Pose Diversity: Accurate and Efficient Implicit Keypoint-based Spatiotemporal Diffusion for Audio-driven Talking Portrait </h2>
69
+ <div style="display: flex; justify-content: center; align-items: center; gap: 20px;">
70
+ <img src='https://newstatic.dukekunshan.edu.cn/mainsite/2021/08/07161629/large_dku-Logo-e1649298929570.png' alt='Logo' width='150'/>
71
+ <img src='https://www.xjtlu.edu.cn/wp-content/uploads/2023/12/7c52fd62e9cf26cb493faa7f91c2782.png' width='250'/>
72
+ </div>
73
+ </div>
74
+ """
75
+ )
76
+ driven_audio_type = gr.Textbox(value="upload", visible=False)
77
+ reference_audio_type = gr.Textbox(value="upload", visible=False)
78
+
79
+ with gr.Row():
80
+ with gr.Column(variant="panel"):
81
+ with gr.Tabs(elem_id="kdtalker_source_image"):
82
+ with gr.TabItem("Upload image"):
83
+ source_image = gr.Image(label="Source image", sources="upload", type="filepath", scale=256)
84
+
85
+ with gr.Tabs(elem_id="kdtalker_driven_audio"):
86
+ with gr.TabItem("Upload"):
87
+ upload_driven_audio = gr.Audio(label="Upload audio", sources="upload", type="filepath")
88
+ upload_driven_audio.change(set_upload, outputs=driven_audio_type)
89
+ with gr.TabItem("TTS"):
90
+ upload_reference_audio = gr.Audio(label="Upload Reference Audio", sources="upload", type="filepath")
91
+ upload_reference_audio.change(set_upload, outputs=reference_audio_type)
92
+ microphone_reference_audio = gr.Audio(label="Recorded Reference Audio", sources="microphone", type="filepath")
93
+ microphone_reference_audio.change(set_microphone, outputs=reference_audio_type)
94
+ input_text = gr.Textbox(
95
+ label="Generating audio from text",
96
+ lines=5,
97
+ placeholder="please enter some text here, we generate the audio from text using @Coqui.ai TTS."
98
+ )
99
+ tts_button = gr.Button("Generate audio", elem_id="kdtalker_audio_generate", variant="primary")
100
+ tts_driven_audio = gr.Audio(label="Synthesised Audio", type="filepath")
101
+ tts_button.click(fn=predict, inputs=[input_text, upload_reference_audio, microphone_reference_audio, reference_audio_type], outputs=[tts_driven_audio])
102
+ tts_button.click(set_tts, outputs=driven_audio_type)
103
+ with gr.Column(variant="panel"):
104
+ gen_video = gr.Video(label="Generated video", format="mp4", width=256)
105
+ with gr.Tabs(elem_id="talker_checkbox"):
106
+ with gr.TabItem("KDTalker"):
107
+ smoothed_pitch = gr.Slider(minimum=0, maximum=1, step=0.1, label="Pitch", value=0.8)
108
+ smoothed_yaw = gr.Slider(minimum=0, maximum=1, step=0.1, label="Yaw", value=0.8)
109
+ smoothed_roll = gr.Slider(minimum=0, maximum=1, step=0.1, label="Roll", value=0.8)
110
+ smoothed_t = gr.Slider(minimum=0, maximum=1, step=0.1, label="T", value=0.8)
111
+ kd_submit = gr.Button("Generate", elem_id="kdtalker_generate", variant="primary")
112
+ kd_submit.click(
113
+ fn=generate,
114
+ inputs=[
115
+ upload_driven_audio, tts_driven_audio, driven_audio_type, source_image,
116
+ smoothed_pitch, smoothed_yaw, smoothed_roll, smoothed_t
117
+ ],
118
+ outputs=[gen_video]
119
+ )
120
+ return interface
121
+
122
+
123
+ demo = main()
124
+ demo.queue().launch()
difpoint/.DS_Store ADDED
Binary file (6.15 kB). View file
 
difpoint/configs/onnx_infer.yaml ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models:
2
+ warping_spade:
3
+ name: "WarpingSpadeModel"
4
+ predict_type: "ort"
5
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/warping_spade.onnx"
6
+ motion_extractor:
7
+ name: "MotionExtractorModel"
8
+ predict_type: "ort"
9
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/motion_extractor.onnx"
10
+ landmark:
11
+ name: "LandmarkModel"
12
+ predict_type: "ort"
13
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/landmark.onnx"
14
+ face_analysis:
15
+ name: "FaceAnalysisModel"
16
+ predict_type: "ort"
17
+ model_path:
18
+ - "./downloaded_repo/pretrained_weights/liveportrait_onnx/retinaface_det_static.onnx"
19
+ - "./downloaded_repo/pretrained_weights/liveportrait_onnx/face_2dpose_106_static.onnx"
20
+ app_feat_extractor:
21
+ name: "AppearanceFeatureExtractorModel"
22
+ predict_type: "ort"
23
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/appearance_feature_extractor.onnx"
24
+ stitching:
25
+ name: "StitchingModel"
26
+ predict_type: "ort"
27
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching.onnx"
28
+ stitching_eye_retarget:
29
+ name: "StitchingModel"
30
+ predict_type: "ort"
31
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching_eye.onnx"
32
+ stitching_lip_retarget:
33
+ name: "StitchingModel"
34
+ predict_type: "ort"
35
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching_lip.onnx"
36
+
37
+ animal_models:
38
+ warping_spade:
39
+ name: "WarpingSpadeModel"
40
+ predict_type: "ort"
41
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/warping_spade.onnx"
42
+ motion_extractor:
43
+ name: "MotionExtractorModel"
44
+ predict_type: "ort"
45
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/motion_extractor.onnx"
46
+ app_feat_extractor:
47
+ name: "AppearanceFeatureExtractorModel"
48
+ predict_type: "ort"
49
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/appearance_feature_extractor.onnx"
50
+ stitching:
51
+ name: "StitchingModel"
52
+ predict_type: "ort"
53
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching.onnx"
54
+ stitching_eye_retarget:
55
+ name: "StitchingModel"
56
+ predict_type: "ort"
57
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching_eye.onnx"
58
+ stitching_lip_retarget:
59
+ name: "StitchingModel"
60
+ predict_type: "ort"
61
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching_lip.onnx"
62
+ landmark:
63
+ name: "LandmarkModel"
64
+ predict_type: "ort"
65
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/landmark.onnx"
66
+ face_analysis:
67
+ name: "FaceAnalysisModel"
68
+ predict_type: "ort"
69
+ model_path:
70
+ - "./downloaded_repo/pretrained_weights/liveportrait_onnx/retinaface_det_static.onnx"
71
+ - "./downloaded_repo/pretrained_weights/liveportrait_onnx/face_2dpose_106_static.onnx"
72
+
73
+ crop_params:
74
+ src_dsize: 512
75
+ src_scale: 2.3
76
+ src_vx_ratio: 0.0
77
+ src_vy_ratio: -0.125
78
+ dri_scale: 2.2
79
+ dri_vx_ratio: 0.0
80
+ dri_vy_ratio: -0.1
81
+
82
+
83
+ infer_params:
84
+ flag_crop_driving_video: False
85
+ flag_normalize_lip: True
86
+ flag_source_video_eye_retargeting: False
87
+ flag_video_editing_head_rotation: False
88
+ flag_eye_retargeting: False
89
+ flag_lip_retargeting: False
90
+ flag_stitching: True
91
+ flag_relative_motion: True
92
+ flag_pasteback: True
93
+ flag_do_crop: True
94
+ flag_do_rot: True
95
+
96
+ # NOT EXPOERTED PARAMS
97
+ lip_normalize_threshold: 0.03 # threshold for flag_normalize_lip
98
+ source_video_eye_retargeting_threshold: 0.18 # threshold for eyes retargeting if the input is a source video
99
+ driving_smooth_observation_variance: 1e-7 # smooth strength scalar for the animated video when the input is a source video, the larger the number, the smoother the animated video; too much smoothness would result in loss of motion accuracy
100
+ anchor_frame: 0 # TO IMPLEMENT
101
+ mask_crop_path: "./assets/mask_template.png"
102
+ driving_multiplier: 1.0
103
+
104
+ source_max_dim: 1280 # the max dim of height and width of source image
105
+ source_division: 2 # make sure the height and width of source image can be divided by this number
difpoint/configs/onnx_mp_infer.yaml ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models:
2
+ warping_spade:
3
+ name: "WarpingSpadeModel"
4
+ predict_type: "ort"
5
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/warping_spade.onnx"
6
+ motion_extractor:
7
+ name: "MotionExtractorModel"
8
+ predict_type: "ort"
9
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/motion_extractor.onnx"
10
+ landmark:
11
+ name: "LandmarkModel"
12
+ predict_type: "ort"
13
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/landmark.onnx"
14
+ face_analysis:
15
+ name: "MediaPipeFaceModel"
16
+ predict_type: "mp"
17
+ app_feat_extractor:
18
+ name: "AppearanceFeatureExtractorModel"
19
+ predict_type: "ort"
20
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/appearance_feature_extractor.onnx"
21
+ stitching:
22
+ name: "StitchingModel"
23
+ predict_type: "ort"
24
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching.onnx"
25
+ stitching_eye_retarget:
26
+ name: "StitchingModel"
27
+ predict_type: "ort"
28
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching_eye.onnx"
29
+ stitching_lip_retarget:
30
+ name: "StitchingModel"
31
+ predict_type: "ort"
32
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching_lip.onnx"
33
+
34
+ animal_models:
35
+ warping_spade:
36
+ name: "WarpingSpadeModel"
37
+ predict_type: "ort"
38
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/warping_spade.onnx"
39
+ motion_extractor:
40
+ name: "MotionExtractorModel"
41
+ predict_type: "ort"
42
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/motion_extractor.onnx"
43
+ app_feat_extractor:
44
+ name: "AppearanceFeatureExtractorModel"
45
+ predict_type: "ort"
46
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/appearance_feature_extractor.onnx"
47
+ stitching:
48
+ name: "StitchingModel"
49
+ predict_type: "ort"
50
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching.onnx"
51
+ stitching_eye_retarget:
52
+ name: "StitchingModel"
53
+ predict_type: "ort"
54
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching_eye.onnx"
55
+ stitching_lip_retarget:
56
+ name: "StitchingModel"
57
+ predict_type: "ort"
58
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching_lip.onnx"
59
+ landmark:
60
+ name: "LandmarkModel"
61
+ predict_type: "ort"
62
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/landmark.onnx"
63
+ face_analysis:
64
+ name: "MediaPipeFaceModel"
65
+ predict_type: "mp"
66
+
67
+ crop_params:
68
+ src_dsize: 512
69
+ src_scale: 2.3
70
+ src_vx_ratio: 0.0
71
+ src_vy_ratio: -0.125
72
+ dri_scale: 2.2
73
+ dri_vx_ratio: 0.0
74
+ dri_vy_ratio: -0.1
75
+
76
+
77
+ infer_params:
78
+ flag_crop_driving_video: False
79
+ flag_normalize_lip: True
80
+ flag_source_video_eye_retargeting: False
81
+ flag_video_editing_head_rotation: False
82
+ flag_eye_retargeting: False
83
+ flag_lip_retargeting: False
84
+ flag_stitching: True
85
+ flag_relative_motion: True
86
+ flag_pasteback: True
87
+ flag_do_crop: True
88
+ flag_do_rot: True
89
+
90
+ # NOT EXPOERTED PARAMS
91
+ lip_normalize_threshold: 0.03 # threshold for flag_normalize_lip
92
+ source_video_eye_retargeting_threshold: 0.18 # threshold for eyes retargeting if the input is a source video
93
+ driving_smooth_observation_variance: 1e-7 # smooth strength scalar for the animated video when the input is a source video, the larger the number, the smoother the animated video; too much smoothness would result in loss of motion accuracy
94
+ anchor_frame: 0 # TO IMPLEMENT
95
+ mask_crop_path: "./assets/mask_template.png"
96
+ driving_multiplier: 1.0
97
+
98
+ source_max_dim: 1280 # the max dim of height and width of source image
99
+ source_division: 2 # make sure the height and width of source image can be divided by this number
difpoint/configs/trt_infer.yaml ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models:
2
+ warping_spade:
3
+ name: "WarpingSpadeModel"
4
+ predict_type: "trt"
5
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/warping_spade-fix.trt"
6
+ motion_extractor:
7
+ name: "MotionExtractorModel"
8
+ predict_type: "trt"
9
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/motion_extractor.trt"
10
+ landmark:
11
+ name: "LandmarkModel"
12
+ predict_type: "trt"
13
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/landmark.trt"
14
+ face_analysis:
15
+ name: "FaceAnalysisModel"
16
+ predict_type: "trt"
17
+ model_path:
18
+ - "./downloaded_repo/pretrained_weights/liveportrait_onnx/retinaface_det_static.trt"
19
+ - "./downloaded_repo/pretrained_weights/liveportrait_onnx/face_2dpose_106_static.trt"
20
+ app_feat_extractor:
21
+ name: "AppearanceFeatureExtractorModel"
22
+ predict_type: "trt"
23
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/appearance_feature_extractor.trt"
24
+ stitching:
25
+ name: "StitchingModel"
26
+ predict_type: "trt"
27
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching.trt"
28
+ stitching_eye_retarget:
29
+ name: "StitchingModel"
30
+ predict_type: "trt"
31
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching_eye.trt"
32
+ stitching_lip_retarget:
33
+ name: "StitchingModel"
34
+ predict_type: "trt"
35
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching_lip.trt"
36
+
37
+ animal_models:
38
+ warping_spade:
39
+ name: "WarpingSpadeModel"
40
+ predict_type: "trt"
41
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/warping_spade-fix.trt"
42
+ motion_extractor:
43
+ name: "MotionExtractorModel"
44
+ predict_type: "trt"
45
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/motion_extractor.trt"
46
+ app_feat_extractor:
47
+ name: "AppearanceFeatureExtractorModel"
48
+ predict_type: "trt"
49
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/appearance_feature_extractor.trt"
50
+ stitching:
51
+ name: "StitchingModel"
52
+ predict_type: "trt"
53
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching.trt"
54
+ stitching_eye_retarget:
55
+ name: "StitchingModel"
56
+ predict_type: "trt"
57
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching_eye.trt"
58
+ stitching_lip_retarget:
59
+ name: "StitchingModel"
60
+ predict_type: "trt"
61
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching_lip.trt"
62
+ landmark:
63
+ name: "LandmarkModel"
64
+ predict_type: "trt"
65
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/landmark.trt"
66
+ face_analysis:
67
+ name: "FaceAnalysisModel"
68
+ predict_type: "trt"
69
+ model_path:
70
+ - "./downloaded_repo/pretrained_weights/liveportrait_onnx/retinaface_det_static.trt"
71
+ - "./downloaded_repo/pretrained_weights/liveportrait_onnx/face_2dpose_106_static.trt"
72
+
73
+ crop_params:
74
+ src_dsize: 512
75
+ src_scale: 2.3
76
+ src_vx_ratio: 0.0
77
+ src_vy_ratio: -0.125
78
+ dri_scale: 2.2
79
+ dri_vx_ratio: 0.0
80
+ dri_vy_ratio: -0.1
81
+
82
+
83
+ infer_params:
84
+ flag_crop_driving_video: False
85
+ flag_normalize_lip: False
86
+ flag_source_video_eye_retargeting: False
87
+ flag_video_editing_head_rotation: False
88
+ flag_eye_retargeting: False
89
+ flag_lip_retargeting: False
90
+ flag_stitching: True
91
+ flag_relative_motion: True
92
+ flag_pasteback: True
93
+ flag_do_crop: True
94
+ flag_do_rot: True
95
+
96
+ # NOT EXPOERTED PARAMS
97
+ lip_normalize_threshold: 0.03 # threshold for flag_normalize_lip
98
+ source_video_eye_retargeting_threshold: 0.18 # threshold for eyes retargeting if the input is a source video
99
+ driving_smooth_observation_variance: 1e-7 # smooth strength scalar for the animated video when the input is a source video, the larger the number, the smoother the animated video; too much smoothness would result in loss of motion accuracy
100
+ anchor_frame: 0 # TO IMPLEMENT
101
+ mask_crop_path: "./assets/mask_template.png"
102
+ driving_multiplier: 1.0
103
+
104
+ source_max_dim: 1280 # the max dim of height and width of source image
105
+ source_division: 2 # make sure the height and width of source image can be divided by this number
difpoint/configs/trt_mp_infer.yaml ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models:
2
+ warping_spade:
3
+ name: "WarpingSpadeModel"
4
+ predict_type: "trt"
5
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/warping_spade-fix.trt"
6
+ motion_extractor:
7
+ name: "MotionExtractorModel"
8
+ predict_type: "trt"
9
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/motion_extractor.trt"
10
+ landmark:
11
+ name: "LandmarkModel"
12
+ predict_type: "trt"
13
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/landmark.trt"
14
+ face_analysis:
15
+ name: "MediaPipeFaceModel"
16
+ predict_type: "mp"
17
+ app_feat_extractor:
18
+ name: "AppearanceFeatureExtractorModel"
19
+ predict_type: "trt"
20
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/appearance_feature_extractor.trt"
21
+ stitching:
22
+ name: "StitchingModel"
23
+ predict_type: "trt"
24
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching.trt"
25
+ stitching_eye_retarget:
26
+ name: "StitchingModel"
27
+ predict_type: "trt"
28
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching_eye.trt"
29
+ stitching_lip_retarget:
30
+ name: "StitchingModel"
31
+ predict_type: "trt"
32
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/stitching_lip.trt"
33
+
34
+ animal_models:
35
+ warping_spade:
36
+ name: "WarpingSpadeModel"
37
+ predict_type: "trt"
38
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/warping_spade-fix.trt"
39
+ motion_extractor:
40
+ name: "MotionExtractorModel"
41
+ predict_type: "trt"
42
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/motion_extractor.trt"
43
+ app_feat_extractor:
44
+ name: "AppearanceFeatureExtractorModel"
45
+ predict_type: "trt"
46
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/appearance_feature_extractor.trt"
47
+ stitching:
48
+ name: "StitchingModel"
49
+ predict_type: "trt"
50
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching.trt"
51
+ stitching_eye_retarget:
52
+ name: "StitchingModel"
53
+ predict_type: "trt"
54
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching_eye.trt"
55
+ stitching_lip_retarget:
56
+ name: "StitchingModel"
57
+ predict_type: "trt"
58
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_animal_onnx/stitching_lip.trt"
59
+ landmark:
60
+ name: "LandmarkModel"
61
+ predict_type: "trt"
62
+ model_path: "./downloaded_repo/pretrained_weights/liveportrait_onnx/landmark.trt"
63
+ face_analysis:
64
+ name: "MediaPipeFaceModel"
65
+ predict_type: "mp"
66
+
67
+ crop_params:
68
+ src_dsize: 512
69
+ src_scale: 2.0
70
+ src_vx_ratio: 0.0
71
+ src_vy_ratio: -0.125
72
+ dri_scale: 2.2
73
+ dri_vx_ratio: 0.0
74
+ dri_vy_ratio: -0.1
75
+
76
+
77
+ infer_params:
78
+ flag_crop_driving_video: False
79
+ flag_normalize_lip: False
80
+ flag_source_video_eye_retargeting: False
81
+ flag_video_editing_head_rotation: False
82
+ flag_eye_retargeting: False
83
+ flag_lip_retargeting: False
84
+ flag_stitching: False
85
+ flag_relative_motion: False
86
+ flag_pasteback: False
87
+ flag_do_crop: False
88
+ flag_do_rot: False
89
+
90
+ # NOT EXPOERTED PARAMS
91
+ lip_normalize_threshold: 0.03 # threshold for flag_normalize_lip
92
+ source_video_eye_retargeting_threshold: 0.18 # threshold for eyes retargeting if the input is a source video
93
+ driving_smooth_observation_variance: 1e-7 # smooth strength scalar for the animated video when the input is a source video, the larger the number, the smoother the animated video; too much smoothness would result in loss of motion accuracy
94
+ anchor_frame: 0 # TO IMPLEMENT
95
+ mask_crop_path: "./assets/mask_template.png"
96
+ driving_multiplier: 1.0
97
+
98
+ source_max_dim: 1280 # the max dim of height and width of source image
99
+ source_division: 2 # make sure the height and width of source image can be divided by this number
difpoint/croper.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import time
4
+ import glob
5
+ import argparse
6
+ import scipy
7
+ import numpy as np
8
+ from PIL import Image
9
+ from tqdm import tqdm
10
+ from itertools import cycle
11
+
12
+
13
+ """
14
+ brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset)
15
+ author: lzhbrian (https://lzhbrian.me)
16
+ date: 2020.1.5
17
+ note: code is heavily borrowed from
18
+ https://github.com/NVlabs/ffhq-dataset
19
+ http://dlib.net/face_landmark_detection.py.html
20
+ requirements:
21
+ apt install cmake
22
+ conda install Pillow numpy scipy
23
+ pip install dlib
24
+ # download face landmark model from:
25
+ # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
26
+ """
27
+
28
+ import numpy as np
29
+ from PIL import Image
30
+ import dlib
31
+
32
+
33
+ class Croper:
34
+
35
+ def __init__(self, path_of_lm):
36
+ # download model from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
37
+ self.predictor = dlib.shape_predictor(path_of_lm)
38
+
39
+ def get_landmark(self, img_np):
40
+ """get landmark with dlib
41
+ :return: np.array shape=(68, 2)
42
+ """
43
+ detector = dlib.get_frontal_face_detector()
44
+ dets = detector(img_np, 1)
45
+ # print("Number of faces detected: {}".format(len(dets)))
46
+ # for k, d in enumerate(dets):
47
+ if len(dets) == 0:
48
+ return None
49
+ d = dets[0]
50
+ # Get the landmarks/parts for the face in box d.
51
+ shape = self.predictor(img_np, d)
52
+ # print("Part 0: {}, Part 1: {} ...".format(shape.part(0), shape.part(1)))
53
+ t = list(shape.parts())
54
+ a = []
55
+ for tt in t:
56
+ a.append([tt.x, tt.y])
57
+ lm = np.array(a)
58
+ # lm is a shape=(68,2) np.array
59
+ return lm
60
+
61
+ def align_face(self, img, lm, output_size=1024):
62
+ """
63
+ :param filepath: str
64
+ :return: PIL Image
65
+ """
66
+ lm_chin = lm[0: 17] # left-right
67
+ lm_eyebrow_left = lm[17: 22] # left-right
68
+ lm_eyebrow_right = lm[22: 27] # left-right
69
+ lm_nose = lm[27: 31] # top-down
70
+ lm_nostrils = lm[31: 36] # top-down
71
+ lm_eye_left = lm[36: 42] # left-clockwise
72
+ lm_eye_right = lm[42: 48] # left-clockwise
73
+ lm_mouth_outer = lm[48: 60] # left-clockwise
74
+ lm_mouth_inner = lm[60: 68] # left-clockwise
75
+
76
+ # Calculate auxiliary vectors.
77
+ eye_left = np.mean(lm_eye_left, axis=0)
78
+ eye_right = np.mean(lm_eye_right, axis=0)
79
+ eye_avg = (eye_left + eye_right) * 0.5
80
+ eye_to_eye = eye_right - eye_left
81
+ mouth_left = lm_mouth_outer[0]
82
+ mouth_right = lm_mouth_outer[6]
83
+ mouth_avg = (mouth_left + mouth_right) * 0.5
84
+ eye_to_mouth = mouth_avg - eye_avg
85
+
86
+ # Choose oriented crop rectangle.
87
+ x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] # Addition of binocular difference and double mouth difference
88
+ x /= np.hypot(*x) # hypot函数计算直角三角形的斜边长,用斜边长对三角形两条直边做归一化
89
+ x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) # 双眼差和眼嘴差,选较大的作为基准尺度
90
+ y = np.flipud(x) * [-1, 1]
91
+ c = eye_avg + eye_to_mouth * 0.1
92
+ quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) # 定义四边形,以面部基准位置为中心上下左右平移得到四个顶点
93
+ qsize = np.hypot(*x) * 2 # 定义四边形的大小(边长),为基准尺度的2倍
94
+
95
+ # Shrink.
96
+ # 如果计算出的四边形太大了,就按比例缩小它
97
+ shrink = int(np.floor(qsize / output_size * 0.5))
98
+ if shrink > 1:
99
+ rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
100
+ img = img.resize(rsize, Image.ANTIALIAS)
101
+ quad /= shrink
102
+ qsize /= shrink
103
+ else:
104
+ rsize = (int(np.rint(float(img.size[0]))), int(np.rint(float(img.size[1]))))
105
+
106
+ # Crop.
107
+ border = max(int(np.rint(qsize * 0.1)), 3)
108
+ crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
109
+ int(np.ceil(max(quad[:, 1]))))
110
+ crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
111
+ min(crop[3] + border, img.size[1]))
112
+ if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
113
+ # img = img.crop(crop)
114
+ quad -= crop[0:2]
115
+
116
+ # Pad.
117
+ pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
118
+ int(np.ceil(max(quad[:, 1]))))
119
+ pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
120
+ max(pad[3] - img.size[1] + border, 0))
121
+ # if enable_padding and max(pad) > border - 4:
122
+ # pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
123
+ # img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
124
+ # h, w, _ = img.shape
125
+ # y, x, _ = np.ogrid[:h, :w, :1]
126
+ # mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
127
+ # 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
128
+ # blur = qsize * 0.02
129
+ # img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
130
+ # img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
131
+ # img = Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
132
+ # quad += pad[:2]
133
+
134
+ # Transform.
135
+ quad = (quad + 0.5).flatten()
136
+ lx = max(min(quad[0], quad[2]), 0)
137
+ ly = max(min(quad[1], quad[7]), 0)
138
+ rx = min(max(quad[4], quad[6]), img.size[0])
139
+ ry = min(max(quad[3], quad[5]), img.size[0])
140
+ # img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(),
141
+ # Image.BILINEAR)
142
+ # if output_size < transform_size:
143
+ # img = img.resize((output_size, output_size), Image.ANTIALIAS)
144
+
145
+ # Save aligned image.
146
+ return rsize, crop, [lx, ly, rx, ry]
147
+
148
+ # def crop(self, img_np_list):
149
+ # for _i in range(len(img_np_list)):
150
+ # img_np = img_np_list[_i]
151
+ # lm = self.get_landmark(img_np)
152
+ # if lm is None:
153
+ # return None
154
+ # crop, quad = self.align_face(img=Image.fromarray(img_np), lm=lm, output_size=512)
155
+ # clx, cly, crx, cry = crop
156
+ # lx, ly, rx, ry = quad
157
+ # lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)
158
+
159
+ # _inp = img_np_list[_i]
160
+ # _inp = _inp[cly:cry, clx:crx]
161
+ # _inp = _inp[ly:ry, lx:rx]
162
+ # img_np_list[_i] = _inp
163
+ # return img_np_list
164
+
165
+ def crop(self, img_np_list, still=False, xsize=512): # first frame for all video
166
+ img_np = img_np_list[0]
167
+ lm = self.get_landmark(img_np)
168
+ if lm is None:
169
+ raise 'can not detect the landmark from source image'
170
+ rsize, crop, quad = self.align_face(img=Image.fromarray(img_np), lm=lm, output_size=xsize)
171
+ clx, cly, crx, cry = crop
172
+ lx, ly, rx, ry = quad
173
+ lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)
174
+ for _i in range(len(img_np_list)):
175
+ _inp = img_np_list[_i]
176
+ _inp = cv2.resize(_inp, (rsize[0], rsize[1]))
177
+ _inp = _inp[cly:cry, clx:crx]
178
+ # cv2.imwrite('test1.jpg', _inp)
179
+ if not still:
180
+ _inp = _inp[ly:ry, lx:rx]
181
+ # cv2.imwrite('test2.jpg', _inp)
182
+ img_np_list[_i] = _inp
183
+ return img_np_list, crop, quad
184
+
185
+
186
+ def read_video(filename, uplimit=100):
187
+ frames = []
188
+ cap = cv2.VideoCapture(filename)
189
+ cnt = 0
190
+ while cap.isOpened():
191
+ ret, frame = cap.read()
192
+ if ret:
193
+ frame = cv2.resize(frame, (512, 512))
194
+ frames.append(frame)
195
+ else:
196
+ break
197
+ cnt += 1
198
+ if cnt >= uplimit:
199
+ break
200
+ cap.release()
201
+ assert len(frames) > 0, f'{filename}: video with no frames!'
202
+ return frames
203
+
204
+
205
+ def create_video(video_name, frames, fps=25, video_format='.mp4', resize_ratio=1):
206
+ # video_name = os.path.dirname(image_folder) + video_format
207
+ # img_list = glob.glob1(image_folder, 'frame*')
208
+ # img_list.sort()
209
+ # frame = cv2.imread(os.path.join(image_folder, img_list[0]))
210
+ # frame = cv2.resize(frame, (0, 0), fx=resize_ratio, fy=resize_ratio)
211
+ # height, width, layers = frames[0].shape
212
+ height, width, layers = 512, 512, 3
213
+ if video_format == '.mp4':
214
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
215
+ elif video_format == '.avi':
216
+ fourcc = cv2.VideoWriter_fourcc(*'XVID')
217
+ video = cv2.VideoWriter(video_name, fourcc, fps, (width, height))
218
+ for _frame in frames:
219
+ _frame = cv2.resize(_frame, (height, width), interpolation=cv2.INTER_LINEAR)
220
+ video.write(_frame)
221
+
222
+ def create_images(video_name, frames):
223
+ height, width, layers = 512, 512, 3
224
+ images_dir = video_name.split('.')[0]
225
+ os.makedirs(images_dir, exist_ok=True)
226
+ for i, _frame in enumerate(frames):
227
+ _frame = cv2.resize(_frame, (height, width), interpolation=cv2.INTER_LINEAR)
228
+ _frame_path = os.path.join(images_dir, str(i)+'.jpg')
229
+ cv2.imwrite(_frame_path, _frame)
230
+
231
+ def run(data):
232
+ filename, opt, device = data
233
+ os.environ['CUDA_VISIBLE_DEVICES'] = device
234
+ croper = Croper()
235
+
236
+ frames = read_video(filename, uplimit=opt.uplimit)
237
+ name = filename.split('/')[-1] # .split('.')[0]
238
+ name = os.path.join(opt.output_dir, name)
239
+
240
+ frames = croper.crop(frames)
241
+ if frames is None:
242
+ print(f'{name}: detect no face. should removed')
243
+ return
244
+ # create_video(name, frames)
245
+ create_images(name, frames)
246
+
247
+
248
+ def get_data_path(video_dir):
249
+ eg_video_files = ['/apdcephfs/share_1290939/quincheng/datasets/HDTF/backup_fps25/WDA_KatieHill_000.mp4']
250
+ # filenames = list()
251
+ # VIDEO_EXTENSIONS_LOWERCASE = {'mp4'}
252
+ # VIDEO_EXTENSIONS = VIDEO_EXTENSIONS_LOWERCASE.union({f.upper() for f in VIDEO_EXTENSIONS_LOWERCASE})
253
+ # extensions = VIDEO_EXTENSIONS
254
+ # for ext in extensions:
255
+ # filenames = sorted(glob.glob(f'{opt.input_dir}/**/*.{ext}'))
256
+ # print('Total number of videos:', len(filenames))
257
+ return eg_video_files
258
+
259
+
260
+ def get_wra_data_path(video_dir):
261
+ if opt.option == 'video':
262
+ videos_path = sorted(glob.glob(f'{video_dir}/*.mp4'))
263
+ elif opt.option == 'image':
264
+ videos_path = sorted(glob.glob(f'{video_dir}/*/'))
265
+ else:
266
+ raise NotImplementedError
267
+ print('Example videos: ', videos_path[:2])
268
+ return videos_path
269
+
difpoint/dataset_process/.DS_Store ADDED
Binary file (6.15 kB). View file
 
difpoint/dataset_process/__pycache__/audio.cpython-310.pyc ADDED
Binary file (4.61 kB). View file
 
difpoint/dataset_process/__pycache__/audio.cpython-38.pyc ADDED
Binary file (4.65 kB). View file
 
difpoint/dataset_process/audio.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import librosa
2
+ import librosa.filters
3
+ import numpy as np
4
+ # import tensorflow as tf
5
+ from scipy import signal
6
+ from scipy.io import wavfile
7
+ from difpoint.src.utils.hparams import hparams as hp
8
+
9
+
10
+ def load_wav(path, sr):
11
+ return librosa.core.load(path, sr=sr)[0]
12
+
13
+
14
+ def save_wav(wav, path, sr):
15
+ wav *= 32767 / max(0.01, np.max(np.abs(wav)))
16
+ # proposed by @dsmiller
17
+ wavfile.write(path, sr, wav.astype(np.int16))
18
+
19
+
20
+ def save_wavenet_wav(wav, path, sr):
21
+ librosa.output.write_wav(path, wav, sr=sr)
22
+
23
+
24
+ def preemphasis(wav, k, preemphasize=True):
25
+ if preemphasize:
26
+ return signal.lfilter([1, -k], [1], wav)
27
+ return wav
28
+
29
+
30
+ def inv_preemphasis(wav, k, inv_preemphasize=True):
31
+ if inv_preemphasize:
32
+ return signal.lfilter([1], [1, -k], wav)
33
+ return wav
34
+
35
+
36
+ def get_hop_size():
37
+ hop_size = hp.hop_size
38
+ if hop_size is None:
39
+ assert hp.frame_shift_ms is not None
40
+ hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate)
41
+ return hop_size
42
+
43
+
44
+ def linearspectrogram(wav):
45
+ D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
46
+ S = _amp_to_db(np.abs(D)) - hp.ref_level_db
47
+
48
+ if hp.signal_normalization:
49
+ return _normalize(S)
50
+ return S
51
+
52
+
53
+ def melspectrogram(wav):
54
+ D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
55
+ S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db
56
+
57
+ if hp.signal_normalization:
58
+ return _normalize(S)
59
+ return S
60
+
61
+
62
+ def _lws_processor():
63
+ import lws
64
+ return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode="speech")
65
+
66
+
67
+ def _stft(y):
68
+ if hp.use_lws:
69
+ return _lws_processor(hp).stft(y).T
70
+ else:
71
+ return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size)
72
+
73
+
74
+ ##########################################################
75
+ # Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
76
+ def num_frames(length, fsize, fshift):
77
+ """Compute number of time frames of spectrogram
78
+ """
79
+ pad = (fsize - fshift)
80
+ if length % fshift == 0:
81
+ M = (length + pad * 2 - fsize) // fshift + 1
82
+ else:
83
+ M = (length + pad * 2 - fsize) // fshift + 2
84
+ return M
85
+
86
+
87
+ def pad_lr(x, fsize, fshift):
88
+ """Compute left and right padding
89
+ """
90
+ M = num_frames(len(x), fsize, fshift)
91
+ pad = (fsize - fshift)
92
+ T = len(x) + 2 * pad
93
+ r = (M - 1) * fshift + fsize - T
94
+ return pad, pad + r
95
+
96
+
97
+ ##########################################################
98
+ # Librosa correct padding
99
+ def librosa_pad_lr(x, fsize, fshift):
100
+ return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
101
+
102
+
103
+ # Conversions
104
+ _mel_basis = None
105
+
106
+
107
+ def _linear_to_mel(spectogram):
108
+ global _mel_basis
109
+ if _mel_basis is None:
110
+ _mel_basis = _build_mel_basis()
111
+ return np.dot(_mel_basis, spectogram)
112
+
113
+
114
+ def _build_mel_basis():
115
+ assert hp.fmax <= hp.sample_rate // 2
116
+ return librosa.filters.mel(sr=hp.sample_rate, n_fft=hp.n_fft, n_mels=hp.num_mels,
117
+ fmin=hp.fmin, fmax=hp.fmax)
118
+
119
+
120
+ def _amp_to_db(x):
121
+ min_level = np.exp(hp.min_level_db / 20 * np.log(10))
122
+ return 20 * np.log10(np.maximum(min_level, x))
123
+
124
+
125
+ def _db_to_amp(x):
126
+ return np.power(10.0, (x) * 0.05)
127
+
128
+
129
+ def _normalize(S):
130
+ if hp.allow_clipping_in_normalization:
131
+ if hp.symmetric_mels:
132
+ return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value,
133
+ -hp.max_abs_value, hp.max_abs_value)
134
+ else:
135
+ return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value)
136
+
137
+ assert S.max() <= 0 and S.min() - hp.min_level_db >= 0
138
+ if hp.symmetric_mels:
139
+ return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value
140
+ else:
141
+ return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db))
142
+
143
+
144
+ def _denormalize(D):
145
+ if hp.allow_clipping_in_normalization:
146
+ if hp.symmetric_mels:
147
+ return (((np.clip(D, -hp.max_abs_value,
148
+ hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value))
149
+ + hp.min_level_db)
150
+ else:
151
+ return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
152
+
153
+ if hp.symmetric_mels:
154
+ return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db)
155
+ else:
156
+ return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
difpoint/dataset_process/wav2lip.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ from torch import nn
4
+
5
+
6
+ class Conv2d(nn.Module):
7
+ def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, use_act=True, *args, **kwargs):
8
+ super().__init__(*args, **kwargs)
9
+ self.conv_block = nn.Sequential(
10
+ nn.Conv2d(cin, cout, kernel_size, stride, padding),
11
+ nn.BatchNorm2d(cout)
12
+ )
13
+ self.act = nn.ReLU()
14
+ self.residual = residual
15
+ self.use_act = use_act
16
+
17
+ def forward(self, x):
18
+ out = self.conv_block(x)
19
+ if self.residual:
20
+ out += x
21
+
22
+ if self.use_act:
23
+ return self.act(out)
24
+ else:
25
+ return out
26
+
27
+ class AudioEncoder(nn.Module):
28
+ def __init__(self, wav2lip_checkpoint, device):
29
+ super(AudioEncoder, self).__init__()
30
+
31
+ self.audio_encoder = nn.Sequential(
32
+ Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
33
+ Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
34
+ Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
35
+
36
+ Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1),
37
+ Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
38
+ Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
39
+
40
+ Conv2d(64, 128, kernel_size=3, stride=3, padding=1),
41
+ Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
42
+ Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
43
+
44
+ Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1),
45
+ Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),
46
+
47
+ Conv2d(256, 512, kernel_size=3, stride=1, padding=0),
48
+ Conv2d(512, 512, kernel_size=1, stride=1, padding=0),)
49
+
50
+ #### load the pre-trained audio_encoder
51
+ wav2lip_state_dict = torch.load(wav2lip_checkpoint, map_location=torch.device(device))['state_dict']
52
+ state_dict = self.audio_encoder.state_dict()
53
+
54
+ for k,v in wav2lip_state_dict.items():
55
+ if 'audio_encoder' in k:
56
+ state_dict[k.replace('module.audio_encoder.', '')] = v
57
+ self.audio_encoder.load_state_dict(state_dict)
58
+
59
+
60
+ def forward(self, audio_sequences):
61
+ # audio_sequences = (B, T, 1, 80, 16)
62
+ B = audio_sequences.size(0)
63
+
64
+ audio_sequences = torch.cat([audio_sequences[:, i] for i in range(audio_sequences.size(1))], dim=0)
65
+
66
+ audio_embedding = self.audio_encoder(audio_sequences) # B, 512, 1, 1
67
+ dim = audio_embedding.shape[1]
68
+ audio_embedding = audio_embedding.reshape((B, -1, dim, 1, 1))
69
+
70
+ return audio_embedding.squeeze(-1).squeeze(-1) #B seq_len+1 512
71
+
72
+ wav2lip_checkpoint='ckpts/wav2lip.pth'
73
+ wav2lip_model = AudioEncoder(wav2lip_checkpoint, 'cuda')
74
+ wav2lip_model.cuda()
75
+ wav2lip_model.eval()
difpoint/datasets/norm_info_d6.5_c8.5_vox1_train.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9422e503e75df9d1bd455d8e0f9f5e2826b12956cdedbb5566097c0151bddafb
3
+ size 5580
difpoint/inference.py ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: UTF-8 -*-
2
+ '''
3
+ @File :inference.py
4
+ @Author :Chaolong Yang
5
+ @Date :2024/5/29 19:26
6
+ '''
7
+ import glob
8
+
9
+ import os
10
+ os.environ['HYDRA_FULL_ERROR']='1'
11
+
12
+
13
+ import os
14
+ import time
15
+ import shutil
16
+ import uuid
17
+ import os
18
+ import cv2
19
+ import tyro
20
+
21
+ import numpy as np
22
+ from tqdm import tqdm
23
+ import cv2
24
+ from rich.progress import track
25
+
26
+ from difpoint.croper import Croper
27
+ from PIL import Image
28
+ import time
29
+
30
+
31
+ import torch
32
+ import torch.nn.functional as F
33
+ from torch import nn
34
+ import imageio
35
+ from pydub import AudioSegment
36
+ from pykalman import KalmanFilter
37
+ import scipy
38
+ import matplotlib.pyplot as plt
39
+ import matplotlib
40
+ matplotlib.use('Agg')
41
+
42
+ from difpoint.dataset_process import audio
43
+ import os
44
+ import argparse
45
+ import pdb
46
+ import ffmpeg
47
+ import cv2
48
+ import time
49
+ import numpy as np
50
+ import os
51
+ import datetime
52
+ import platform
53
+ from omegaconf import OmegaConf
54
+ #from difpoint.src.pipelines.faster_live_portrait_pipeline import FasterLivePortraitPipeline
55
+ from difpoint.src.live_portrait_pipeline import LivePortraitPipeline
56
+ from difpoint.src.config.argument_config import ArgumentConfig
57
+ from difpoint.src.config.inference_config import InferenceConfig
58
+ from difpoint.src.config.crop_config import CropConfig
59
+ from difpoint.src.live_portrait_pipeline import LivePortraitPipeline
60
+ from difpoint.src.utils.retargeting_utils import calc_eye_close_ratio, calc_lip_close_ratio
61
+ from difpoint.src.utils.camera import get_rotation_matrix
62
+ from difpoint.src.utils.video import images2video, concat_frames, get_fps, add_audio_to_video, has_audio_stream
63
+
64
+
65
+ FFMPEG = "ffmpeg"
66
+
67
+ def parse_audio_length(audio_length, sr, fps):
68
+ bit_per_frames = sr / fps
69
+ num_frames = int(audio_length / bit_per_frames)
70
+ audio_length = int(num_frames * bit_per_frames)
71
+ return audio_length, num_frames
72
+
73
+ def crop_pad_audio(wav, audio_length):
74
+ if len(wav) > audio_length:
75
+ wav = wav[:audio_length]
76
+ elif len(wav) < audio_length:
77
+ wav = np.pad(wav, [0, audio_length - len(wav)], mode='constant', constant_values=0)
78
+ return wav
79
+
80
+ class Conv2d(nn.Module):
81
+ def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, use_act=True, *args, **kwargs):
82
+ super().__init__(*args, **kwargs)
83
+ self.conv_block = nn.Sequential(
84
+ nn.Conv2d(cin, cout, kernel_size, stride, padding),
85
+ nn.BatchNorm2d(cout)
86
+ )
87
+ self.act = nn.ReLU()
88
+ self.residual = residual
89
+ self.use_act = use_act
90
+
91
+ def forward(self, x):
92
+ out = self.conv_block(x)
93
+ if self.residual:
94
+ out += x
95
+
96
+ if self.use_act:
97
+ return self.act(out)
98
+ else:
99
+ return out
100
+
101
+ class AudioEncoder(nn.Module):
102
+ def __init__(self, wav2lip_checkpoint, device):
103
+ super(AudioEncoder, self).__init__()
104
+
105
+ self.audio_encoder = nn.Sequential(
106
+ Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
107
+ Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
108
+ Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
109
+
110
+ Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1),
111
+ Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
112
+ Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
113
+
114
+ Conv2d(64, 128, kernel_size=3, stride=3, padding=1),
115
+ Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
116
+ Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
117
+
118
+ Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1),
119
+ Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),
120
+
121
+ Conv2d(256, 512, kernel_size=3, stride=1, padding=0),
122
+ Conv2d(512, 512, kernel_size=1, stride=1, padding=0),)
123
+
124
+ #### load the pre-trained audio_encoder
125
+ wav2lip_state_dict = torch.load(wav2lip_checkpoint, map_location=torch.device(device))['state_dict']
126
+ state_dict = self.audio_encoder.state_dict()
127
+
128
+ for k,v in wav2lip_state_dict.items():
129
+ if 'audio_encoder' in k:
130
+ state_dict[k.replace('module.audio_encoder.', '')] = v
131
+ self.audio_encoder.load_state_dict(state_dict)
132
+
133
+ def forward(self, audio_sequences):
134
+ # audio_sequences = (B, T, 1, 80, 16)
135
+ B = audio_sequences.size(0)
136
+
137
+ audio_sequences = torch.cat([audio_sequences[:, i] for i in range(audio_sequences.size(1))], dim=0)
138
+
139
+ audio_embedding = self.audio_encoder(audio_sequences) # B, 512, 1, 1
140
+ dim = audio_embedding.shape[1]
141
+ audio_embedding = audio_embedding.reshape((B, -1, dim, 1, 1))
142
+
143
+ return audio_embedding.squeeze(-1).squeeze(-1) #B seq_len+1 512
144
+
145
+ def partial_fields(target_class, kwargs):
146
+ return target_class(**{k: v for k, v in kwargs.items() if hasattr(target_class, k)})
147
+
148
+ def dct2device(dct: dict, device):
149
+ for key in dct:
150
+ dct[key] = torch.tensor(dct[key]).to(device)
151
+ return dct
152
+
153
+ def save_video_with_watermark(video, audio, save_path, watermark=False):
154
+ temp_file = str(uuid.uuid4())+'.mp4'
155
+ cmd = r'ffmpeg -y -i "%s" -i "%s" -vcodec copy "%s"' % (video, audio, temp_file)
156
+ os.system(cmd)
157
+ shutil.move(temp_file, save_path)
158
+
159
+
160
+
161
+ class Inferencer(object):
162
+ def __init__(self):
163
+
164
+ st=time.time()
165
+ print('#'*25+'Start initialization'+'#'*25)
166
+ self.device = 'cuda'
167
+ from difpoint.model import get_model
168
+ self.point_diffusion = get_model()
169
+ ckpt = torch.load('./downloaded_repo/ckpts/KDTalker.pth', weights_only=False)
170
+
171
+ self.point_diffusion.load_state_dict(ckpt['model'])
172
+ print('model', self.point_diffusion.children())
173
+ self.point_diffusion.eval()
174
+ self.point_diffusion.to(self.device)
175
+
176
+ lm_croper_checkpoint = './downloaded_repo/ckpts/shape_predictor_68_face_landmarks.dat'
177
+ self.croper = Croper(lm_croper_checkpoint)
178
+
179
+ self.norm_info = dict(np.load(r'difpoint/datasets/norm_info_d6.5_c8.5_vox1_train.npz'))
180
+
181
+ wav2lip_checkpoint = './downloaded_repo/ckpts/wav2lip.pth'
182
+ self.wav2lip_model = AudioEncoder(wav2lip_checkpoint, 'cuda')
183
+ self.wav2lip_model.cuda()
184
+ self.wav2lip_model.eval()
185
+
186
+ args = tyro.cli(ArgumentConfig)
187
+
188
+ self.inf_cfg = partial_fields(InferenceConfig, args.__dict__) # use attribute of args to initial InferenceConfig
189
+ self.crop_cfg = partial_fields(CropConfig, args.__dict__) # use attribute of args to initial CropConfig
190
+
191
+ self.live_portrait_pipeline = LivePortraitPipeline(inference_cfg=self.inf_cfg, crop_cfg=self.crop_cfg)
192
+ print('#'*25+f'End initialization, cost time {time.time()-st}'+'#'*25)
193
+
194
+ def _norm(self, data_dict):
195
+ for k in data_dict.keys():
196
+ if k in ['yaw', 'pitch', 'roll', 't', 'exp', 'scale', 'kp', 'c_lip', 'c_eye']:
197
+ v=data_dict[k]
198
+ data_dict[k] = (v - self.norm_info[k+'_mean'])/self.norm_info[k+'_std']
199
+ return data_dict
200
+
201
+ def _denorm(self, data_dict):
202
+ for k in data_dict.keys():
203
+ if k in ['yaw', 'pitch', 'roll', 't', 'exp', 'scale', 'kp', 'c_lip', 'c_eye']:
204
+ v=data_dict[k]
205
+ data_dict[k] = v * self.norm_info[k+'_std'] + self.norm_info[k+'_mean']
206
+ return data_dict
207
+
208
+
209
+ def output_to_dict(self, data):
210
+ output = {}
211
+
212
+ output['scale'] = data[:, 0]
213
+ output['yaw'] = data[:, 1, None]
214
+ output['pitch'] = data[:, 2, None]
215
+ output['roll'] = data[:, 3, None]
216
+ output['t'] = data[:, 4:7]
217
+ output['exp'] = data[:, 7:]
218
+
219
+ return output
220
+
221
+ def extract_mel_from_audio(self, audio_file_path):
222
+ syncnet_mel_step_size = 16
223
+ fps = 25
224
+ wav = audio.load_wav(audio_file_path, 16000)
225
+ wav_length, num_frames = parse_audio_length(len(wav), 16000, 25)
226
+ wav = crop_pad_audio(wav, wav_length)
227
+ orig_mel = audio.melspectrogram(wav).T
228
+ spec = orig_mel.copy()
229
+ indiv_mels = []
230
+
231
+ for i in tqdm(range(num_frames), 'mel:'):
232
+ start_frame_num = i - 2
233
+ start_idx = int(80. * (start_frame_num / float(fps)))
234
+ end_idx = start_idx + syncnet_mel_step_size
235
+ seq = list(range(start_idx, end_idx))
236
+ seq = [min(max(item, 0), orig_mel.shape[0] - 1) for item in seq]
237
+ m = spec[seq, :]
238
+ indiv_mels.append(m.T)
239
+ indiv_mels = np.asarray(indiv_mels) # T 80 16
240
+ return indiv_mels
241
+
242
+ def extract_wav2lip_from_audio(self, audio_file_path):
243
+ asd_mel = self.extract_mel_from_audio(audio_file_path)
244
+ asd_mel = torch.FloatTensor(asd_mel).cuda().unsqueeze(0).unsqueeze(2)
245
+ with torch.no_grad():
246
+ hidden = self.wav2lip_model(asd_mel)
247
+ return hidden[0].cpu().detach().numpy()
248
+
249
+ def headpose_pred_to_degree(self, pred):
250
+ device = pred.device
251
+ idx_tensor = [idx for idx in range(66)]
252
+ idx_tensor = torch.FloatTensor(idx_tensor).to(device)
253
+ pred = F.softmax(pred)
254
+ degree = torch.sum(pred * idx_tensor, 1) * 3 - 99
255
+ return degree
256
+
257
+ def calc_combined_eye_ratio(self, c_d_eyes_i, c_s_eyes):
258
+ c_s_eyes_tensor = torch.from_numpy(c_s_eyes).float().to(self.device)
259
+ c_d_eyes_i_tensor = c_d_eyes_i[0].reshape(1, 1).to(self.device)
260
+ # [c_s,eyes, c_d,eyes,i]
261
+ combined_eye_ratio_tensor = torch.cat([c_s_eyes_tensor, c_d_eyes_i_tensor], dim=1)
262
+ return combined_eye_ratio_tensor
263
+
264
+ def calc_combined_lip_ratio(self, c_d_lip_i, c_s_lip):
265
+ c_s_lip_tensor = torch.from_numpy(c_s_lip).float().to(self.device)
266
+ c_d_lip_i_tensor = c_d_lip_i[0].to(self.device).reshape(1, 1) # 1x1
267
+ # [c_s,lip, c_d,lip,i]
268
+ combined_lip_ratio_tensor = torch.cat([c_s_lip_tensor, c_d_lip_i_tensor], dim=1) # 1x2
269
+ return combined_lip_ratio_tensor
270
+
271
+ # 2024.06.26
272
+ @torch.no_grad()
273
+ def generate_with_audio_img(self, upload_audio_path, tts_audio_path, audio_type, image_path, smoothed_pitch, smoothed_yaw, smoothed_roll, smoothed_t, save_path='./downloaded_repo/'):
274
+ print(audio_type)
275
+ if audio_type == 'upload':
276
+ audio_path = upload_audio_path
277
+ elif audio_type == 'tts':
278
+ audio_path = tts_audio_path
279
+ save_path = os.path.join(save_path, "output.mp4")
280
+ image = [np.array(Image.open(image_path).convert('RGB'))]
281
+ if image[0].shape[0] != 256 or image[0].shape[1] != 256:
282
+ cropped_image, crop, quad = self.croper.crop(image, still=False, xsize=512)
283
+ input_image = cv2.resize(cropped_image[0], (256, 256))
284
+ else:
285
+ input_image = image[0]
286
+
287
+ I_s = torch.FloatTensor(input_image.transpose((2, 0, 1))).unsqueeze(0).cuda() / 255
288
+
289
+ x_s_info = self.live_portrait_pipeline.live_portrait_wrapper.get_kp_info(I_s)
290
+ x_c_s = x_s_info['kp'].reshape(1, 21, -1)
291
+ R_s = get_rotation_matrix(x_s_info['pitch'], x_s_info['yaw'], x_s_info['roll'])
292
+ f_s = self.live_portrait_pipeline.live_portrait_wrapper.extract_feature_3d(I_s)
293
+ x_s = self.live_portrait_pipeline.live_portrait_wrapper.transform_keypoint(x_s_info)
294
+
295
+ flag_lip_zero = self.inf_cfg.flag_lip_zero # not overwrite
296
+
297
+
298
+ ######## process driving info ########
299
+ kp_info = {}
300
+ for k in x_s_info.keys():
301
+ kp_info[k] = x_s_info[k].cpu().numpy()
302
+ # kp_info['c_lip'] = c_s_lip
303
+ # kp_info['c_eye'] = c_s_eye
304
+
305
+ kp_info = self._norm(kp_info)
306
+
307
+ ori_kp = torch.cat([torch.zeros([1, 7]), torch.Tensor(kp_info['kp'])], -1).cuda()
308
+
309
+ input_x = np.concatenate([kp_info[k] for k in ['scale', 'yaw', 'pitch', 'roll', 't']], 1)
310
+ input_x = np.concatenate((input_x, kp_info['exp'].reshape(1, 63)), axis=1)
311
+ input_x = np.expand_dims(input_x, -1)
312
+ input_x = np.expand_dims(input_x, 0)
313
+ input_x = np.concatenate([input_x, input_x, input_x], -1)
314
+
315
+ aud_feat = self.extract_wav2lip_from_audio(audio_path)
316
+
317
+ outputs = [input_x]
318
+
319
+ st = time.time()
320
+ print('#' * 25 + 'Start Inference' + '#' * 25)
321
+ sample_frame = 64 # 32 aud_feat.shape[0]
322
+
323
+ for i in range(0, aud_feat.shape[0] - 1, sample_frame):
324
+ input_mel = torch.Tensor(aud_feat[i: i + sample_frame]).unsqueeze(0).cuda()
325
+ kp0 = torch.Tensor(outputs[-1])[:, -1].cuda()
326
+ pred_kp = self.point_diffusion.forward_sample(70, ref_kps=kp0, ori_kps=ori_kp, aud_feat=input_mel,
327
+ scheduler='ddim', num_inference_steps=50)
328
+ outputs.append(pred_kp.cpu().numpy())
329
+
330
+
331
+ outputs = np.mean(np.concatenate(outputs, 1)[0], -1)[1:, ]
332
+ output_dict = self.output_to_dict(outputs)
333
+ output_dict = self._denorm(output_dict)
334
+
335
+ num_frame = output_dict['yaw'].shape[0]
336
+ x_d_info = {}
337
+ for key in output_dict:
338
+ x_d_info[key] = torch.tensor(output_dict[key]).cuda()
339
+
340
+ # smooth
341
+ def smooth(sequence, n_dim_state=1):
342
+ kf = KalmanFilter(initial_state_mean=sequence[0],
343
+ transition_covariance=0.05 * np.eye(n_dim_state), # 较小的过程噪声
344
+ observation_covariance=0.001 * np.eye(n_dim_state)) # 可以增大观测噪声,减少敏感性
345
+ state_means, _ = kf.smooth(sequence)
346
+ return state_means
347
+
348
+ # scale_data = x_d_info['scale'].cpu().numpy()
349
+ yaw_data = x_d_info['yaw'].cpu().numpy()
350
+ pitch_data = x_d_info['pitch'].cpu().numpy()
351
+ roll_data = x_d_info['roll'].cpu().numpy()
352
+ t_data = x_d_info['t'].cpu().numpy()
353
+ exp_data = x_d_info['exp'].cpu().numpy()
354
+
355
+ smoothed_pitch = smooth(pitch_data, n_dim_state=1) * smoothed_pitch
356
+ smoothed_yaw = smooth(yaw_data, n_dim_state=1) * smoothed_yaw
357
+ smoothed_roll = smooth(roll_data, n_dim_state=1) * smoothed_roll
358
+ # smoothed_scale = smooth(scale_data, n_dim_state=1)
359
+ smoothed_t = smooth(t_data, n_dim_state=3) * smoothed_t
360
+ smoothed_exp = smooth(exp_data, n_dim_state=63)
361
+
362
+ # x_d_info['scale'] = torch.Tensor(smoothed_scale).cuda()
363
+ x_d_info['pitch'] = torch.Tensor(smoothed_pitch).cuda()
364
+ x_d_info['yaw'] = torch.Tensor(smoothed_yaw).cuda()
365
+ x_d_info['roll'] = torch.Tensor(smoothed_roll).cuda()
366
+ x_d_info['t'] = torch.Tensor(smoothed_t).cuda()
367
+ x_d_info['exp'] = torch.Tensor(smoothed_exp).cuda()
368
+
369
+
370
+
371
+ template_dct = {'motion': [], 'c_d_eyes_lst': [], 'c_d_lip_lst': []}
372
+ for i in track(range(num_frame), description='Making motion templates...', total=num_frame):
373
+ # collect s_d, R_d, δ_d and t_d for inference
374
+ x_d_i_info = x_d_info
375
+ R_d_i = get_rotation_matrix(x_d_i_info['pitch'][i], x_d_i_info['yaw'][i], x_d_i_info['roll'][i])
376
+
377
+ item_dct = {
378
+ 'scale': x_d_i_info['scale'][i].cpu().numpy().astype(np.float32),
379
+ 'R_d': R_d_i.cpu().numpy().astype(np.float32),
380
+ 'exp': x_d_i_info['exp'][i].reshape(1, 21, -1).cpu().numpy().astype(np.float32),
381
+ 't': x_d_i_info['t'][i].cpu().numpy().astype(np.float32),
382
+ }
383
+
384
+ template_dct['motion'].append(item_dct)
385
+ # template_dct['c_d_eyes_lst'].append(x_d_i_info['c_eye'][i])
386
+ # template_dct['c_d_lip_lst'].append(x_d_i_info['c_lip'][i])
387
+
388
+ I_p_lst = []
389
+ R_d_0, x_d_0_info = None, None
390
+
391
+ for i in track(range(num_frame), description='Animating...', total=num_frame):
392
+ x_d_i_info = template_dct['motion'][i]
393
+
394
+ for key in x_d_i_info:
395
+ x_d_i_info[key] = torch.tensor(x_d_i_info[key]).cuda()
396
+
397
+ R_d_i = x_d_i_info['R_d']
398
+
399
+ if i == 0:
400
+ R_d_0 = R_d_i
401
+ x_d_0_info = x_d_i_info
402
+
403
+
404
+ if self.inf_cfg.flag_relative_motion:
405
+ R_new = (R_d_i @ R_d_0.permute(0, 2, 1)) @ R_s
406
+ delta_new = x_s_info['exp'].reshape(1, 21, -1) + (x_d_i_info['exp'] - x_d_0_info['exp'])
407
+ scale_new = x_s_info['scale'] * (x_d_i_info['scale'] / x_d_0_info['scale'])
408
+ t_new = x_s_info['t'] + (x_d_i_info['t'] - x_d_0_info['t'])
409
+ else:
410
+ R_new = R_d_i
411
+ delta_new = x_d_i_info['exp']
412
+ scale_new = x_s_info['scale']
413
+ t_new = x_d_i_info['t']
414
+ t_new[..., 2] = 0 # zero tz
415
+
416
+ x_d_i_new = scale_new * (x_c_s @ R_new + delta_new) + t_new
417
+
418
+
419
+ # Algorithm 1:
420
+ if not self.inf_cfg.flag_stitching and not self.inf_cfg.flag_eye_retargeting and not self.inf_cfg.flag_lip_retargeting:
421
+ # without stitching or retargeting
422
+ if flag_lip_zero:
423
+ x_d_i_new += lip_delta_before_animation.reshape(-1, x_s.shape[1], 3)
424
+ else:
425
+ pass
426
+ elif self.inf_cfg.flag_stitching and not self.inf_cfg.flag_eye_retargeting and not self.inf_cfg.flag_lip_retargeting:
427
+ # with stitching and without retargeting
428
+ if flag_lip_zero:
429
+ x_d_i_new = self.live_portrait_pipeline.live_portrait_wrapper.stitching(x_s, x_d_i_new) + lip_delta_before_animation.reshape(-1, x_s.shape[1], 3)
430
+ else:
431
+ x_d_i_new = self.live_portrait_pipeline.live_portrait_wrapper.stitching(x_s, x_d_i_new)
432
+ else:
433
+ eyes_delta, lip_delta = None, None
434
+
435
+ if self.inf_cfg.flag_relative_motion: # use x_s
436
+ x_d_i_new = x_s + \
437
+ (eyes_delta.reshape(-1, x_s.shape[1], 3) if eyes_delta is not None else 0) + \
438
+ (lip_delta.reshape(-1, x_s.shape[1], 3) if lip_delta is not None else 0)
439
+ else: # use x_d,i
440
+ x_d_i_new = x_d_i_new + \
441
+ (eyes_delta.reshape(-1, x_s.shape[1], 3) if eyes_delta is not None else 0) + \
442
+ (lip_delta.reshape(-1, x_s.shape[1], 3) if lip_delta is not None else 0)
443
+
444
+ if self.inf_cfg.flag_stitching:
445
+ x_d_i_new = self.live_portrait_pipeline.live_portrait_wrapper.stitching(x_s, x_d_i_new)
446
+
447
+
448
+ out = self.live_portrait_pipeline.live_portrait_wrapper.warp_decode(f_s, x_s, x_d_i_new)
449
+ I_p_i = self.live_portrait_pipeline.live_portrait_wrapper.parse_output(out['out'])[0]
450
+ I_p_lst.append(I_p_i)
451
+
452
+ video_name = os.path.basename(save_path)
453
+ video_save_dir = os.path.dirname(save_path)
454
+ path = os.path.join(video_save_dir, video_name)
455
+
456
+ imageio.mimsave(path, I_p_lst, fps=float(25))
457
+
458
+ audio_name = audio_path.split('/')[-1]
459
+ new_audio_path = os.path.join(video_save_dir, audio_name)
460
+ start_time = 0
461
+ # cog will not keep the .mp3 filename
462
+ sound = AudioSegment.from_file(audio_path)
463
+ end_time = start_time + num_frame * 1 / 25 * 1000
464
+ word1 = sound.set_frame_rate(16000)
465
+ word = word1[start_time:end_time]
466
+ word.export(new_audio_path, format="wav")
467
+
468
+ save_video_with_watermark(path, new_audio_path, save_path, watermark=False)
469
+ print(f'The generated video is named {video_save_dir}/{video_name}')
470
+
471
+ print('#' * 25 + f'End Inference, cost time {time.time() - st}' + '#' * 25)
472
+ return save_path
473
+
474
+
difpoint/model/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .model import ConditionalPointCloudDiffusionModel
2
+
3
+ def get_model():
4
+ model = ConditionalPointCloudDiffusionModel()
5
+ return model
6
+
difpoint/model/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (325 Bytes). View file
 
difpoint/model/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (328 Bytes). View file
 
difpoint/model/__pycache__/model.cpython-310.pyc ADDED
Binary file (5.33 kB). View file
 
difpoint/model/__pycache__/model.cpython-38.pyc ADDED
Binary file (5.25 kB). View file
 
difpoint/model/__pycache__/model_utils.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
difpoint/model/__pycache__/model_utils.cpython-38.pyc ADDED
Binary file (1.7 kB). View file
 
difpoint/model/__pycache__/point_model.cpython-310.pyc ADDED
Binary file (1.78 kB). View file
 
difpoint/model/__pycache__/point_model.cpython-38.pyc ADDED
Binary file (1.73 kB). View file
 
difpoint/model/model.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Optional
3
+ from einops import rearrange
4
+ import torch
5
+ import torch.nn.functional as F
6
+ from diffusers.schedulers.scheduling_ddpm import DDPMScheduler
7
+ from diffusers.schedulers.scheduling_ddim import DDIMScheduler
8
+ from diffusers.schedulers.scheduling_pndm import PNDMScheduler
9
+
10
+ from torch import Tensor
11
+ from tqdm import tqdm
12
+ from diffusers import ModelMixin
13
+ from .model_utils import get_custom_betas
14
+ from .point_model import PointModel
15
+
16
+ import copy
17
+ class ConditionalPointCloudDiffusionModel(ModelMixin):
18
+ def __init__(
19
+ self,
20
+ beta_start: float = 1e-5,
21
+ beta_end: float = 8e-3,
22
+ beta_schedule: str = 'linear',
23
+ point_cloud_model: str = 'simple',
24
+ point_cloud_model_embed_dim: int = 64,
25
+ ):
26
+ super().__init__()
27
+ self.in_channels = 70 # 3 for 3D point positions
28
+ self.out_channels = 70
29
+
30
+ # Checks
31
+ # Create diffusion model schedulers which define the sampling timesteps
32
+ scheduler_kwargs = {}
33
+ if beta_schedule == 'custom':
34
+ scheduler_kwargs.update(dict(trained_betas=get_custom_betas(beta_start=beta_start, beta_end=beta_end)))
35
+ else:
36
+ scheduler_kwargs.update(dict(beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule))
37
+ self.schedulers_map = {
38
+ 'ddpm': DDPMScheduler(**scheduler_kwargs, clip_sample=False),
39
+ 'ddim': DDIMScheduler(**scheduler_kwargs, clip_sample=False),
40
+ 'pndm': PNDMScheduler(**scheduler_kwargs),
41
+ }
42
+ self.scheduler = self.schedulers_map['ddim'] # this can be changed for inference
43
+
44
+ # Create point cloud model for processing point cloud at each diffusion step
45
+ self.point_model = PointModel(
46
+ model_type=point_cloud_model,
47
+ embed_dim=point_cloud_model_embed_dim,
48
+ in_channels=self.in_channels,
49
+ out_channels=self.out_channels,
50
+ )
51
+
52
+ def forward_train(
53
+ self,
54
+ pc: Optional[Tensor],
55
+ ref_kps: Optional[Tensor],
56
+ ori_kps: Optional[Tensor],
57
+ aud_feat: Optional[Tensor],
58
+ mode: str = 'train',
59
+ return_intermediate_steps: bool = False
60
+ ):
61
+
62
+ # Normalize colors and convert to tensor
63
+ x_0 = pc
64
+ B, Nf, Np, D = x_0.shape# batch, nums of frames, nums of points, 3
65
+
66
+
67
+ x_0=x_0[:,:,:,0]# batch, nums of frames, 70
68
+
69
+ # Sample random noise
70
+ noise = torch.randn_like(x_0)
71
+
72
+ # Sample random timesteps for each point_cloud
73
+ timestep = torch.randint(0, self.scheduler.num_train_timesteps, (B,),
74
+ device=self.device, dtype=torch.long)
75
+
76
+ # Add noise to points
77
+ x_t = self.scheduler.add_noise(x_0, noise, timestep)
78
+
79
+ # Conditioning
80
+ ref_kps = ref_kps[:, :, 0]
81
+
82
+ x_t_input = torch.cat([ori_kps.unsqueeze(1), ref_kps.unsqueeze(1), x_t], dim=1)
83
+
84
+ # x_t_input = torch.cat([ref_kps.unsqueeze(1), x_t], dim=1)
85
+
86
+ # ori_kps_repeat = torch.repeat_interleave(ori_kps.unsqueeze(1), repeats=Nf+1, dim=1)
87
+
88
+ # x_t_input = torch.cat([x_t_input, ori_kps_repeat], dim=-1) #B, 32+1, 51+45
89
+
90
+
91
+ aud_feat = torch.cat([torch.zeros(B, 2, 512).cuda(), aud_feat], 1)
92
+
93
+ # Augmentation for audio feature
94
+ if mode in 'train':
95
+ if torch.rand(1) > 0.3:
96
+ mean = torch.mean(aud_feat)
97
+ std = torch.std(aud_feat)
98
+ sample = torch.normal(mean=torch.full(aud_feat.shape, mean), std=torch.full(aud_feat.shape, std)).cuda()
99
+ aud_feat = sample + aud_feat
100
+ else:
101
+ pass
102
+ else:
103
+ pass
104
+
105
+ # Forward
106
+ noise_pred = self.point_model(x_t_input, timestep, context=aud_feat) #torch.cat([mel_feat,style_embed],-1))
107
+ noise_pred = noise_pred[:, 2:]
108
+ #
109
+ # Check
110
+ if not noise_pred.shape == noise.shape:
111
+ raise ValueError(f'{noise_pred.shape=} and {noise.shape=}')
112
+
113
+ # Loss
114
+ loss = F.mse_loss(noise_pred, noise)
115
+
116
+ loss_pose = F.mse_loss(noise_pred[:, :, :6], noise[:, :, :6])
117
+ loss_exp = F.mse_loss(noise_pred[:, :, 6:], noise[:, :, 6:])
118
+
119
+
120
+ # Whether to return intermediate steps
121
+ if return_intermediate_steps:
122
+ return loss, (x_0, x_t, noise, noise_pred)
123
+
124
+ return loss, loss_exp, loss_pose
125
+
126
+ # def forward_train(
127
+ # self,
128
+ # pc: Optional[Tensor],
129
+ # ref_kps: Optional[Tensor],
130
+ # ori_kps: Optional[Tensor],
131
+ # aud_feat: Optional[Tensor],
132
+ # mode: str = 'train',
133
+ # return_intermediate_steps: bool = False
134
+ # ):
135
+ #
136
+ # # Normalize colors and convert to tensor
137
+ # x_0 = pc
138
+ # B, Nf, Np, D = x_0.shape# batch, nums of frames, nums of points, 3
139
+ #
140
+ # # ori_kps = torch.repeat_interleave(ori_kps.unsqueeze(1), Nf, dim=1) # B, Nf, 45
141
+ # #
142
+ # # ref_kps = ref_kps[:, :, 0]
143
+ # # ref_kps = torch.repeat_interleave(ref_kps.unsqueeze(1), Nf, dim=1) # B, Nf, 91
144
+ #
145
+ # x_0 = x_0[:,:,:,0]
146
+ #
147
+ # # Sample random noise
148
+ # noise = torch.randn_like(x_0)
149
+ #
150
+ # # Sample random timesteps for each point_cloud
151
+ # timestep = torch.randint(0, self.scheduler.num_train_timesteps, (B,),
152
+ # device=self.device, dtype=torch.long)
153
+ #
154
+ # # Add noise to points
155
+ # x_t = self.scheduler.add_noise(x_0, noise, timestep)
156
+ #
157
+ # # Conditioning
158
+ # ref_kps = ref_kps[:,:,0]
159
+ #
160
+ # # x_t_input = torch.cat([ref_kps.unsqueeze(1), x_t], dim=1)
161
+ #
162
+ # # x_0 = torch.cat([x_0, ref_kps, ori_kps], dim=2) # B, Nf, 91+91+45
163
+ #
164
+ # x_t_input = torch.cat([ref_kps.unsqueeze(1), x_t], dim=1)
165
+ # # x_t_input = torch.cat([ori_kps.unsqueeze(1), ref_kps.unsqueeze(1), x_t], dim=1)
166
+ #
167
+ # aud_feat = torch.cat([torch.zeros(B, 1, 512).cuda(), aud_feat], 1)
168
+ #
169
+ # # Augmentation for audio feature
170
+ # if mode in 'train':
171
+ # if torch.rand(1) > 0.3:
172
+ # mean = torch.mean(aud_feat)
173
+ # std = torch.std(aud_feat)
174
+ # sample = torch.normal(mean=torch.full(aud_feat.shape, mean), std=torch.full(aud_feat.shape, std)).cuda()
175
+ # aud_feat = sample + aud_feat
176
+ # else:
177
+ # pass
178
+ # else:
179
+ # pass
180
+ #
181
+ # # Forward
182
+ # noise_pred = self.point_model(x_t_input, timestep, context=aud_feat)
183
+ # noise_pred = noise_pred[:, 1:]
184
+ #
185
+ # # Check
186
+ # # if not noise_pred.shape == noise.shape:
187
+ # # raise ValueError(f'{noise_pred.shape=} and {noise.shape=}')
188
+ #
189
+ # # Loss
190
+ # loss = F.mse_loss(noise_pred, noise)
191
+ #
192
+ # # loss_kp = F.mse_loss(noise_pred[:, :, :45], noise[:, :, :45])
193
+ #
194
+ # # Whether to return intermediate steps
195
+ # if return_intermediate_steps:
196
+ # return loss, (x_0, x_t, noise, noise_pred)
197
+ #
198
+ # return loss
199
+
200
+ # @torch.no_grad()
201
+ # def forward_sample(
202
+ # self,
203
+ # num_points: int,
204
+ # ref_kps: Optional[Tensor],
205
+ # ori_kps: Optional[Tensor],
206
+ # aud_feat: Optional[Tensor],
207
+ # # Optional overrides
208
+ # scheduler: Optional[str] = 'ddpm',
209
+ # # Inference parameters
210
+ # num_inference_steps: Optional[int] = 1000,
211
+ # eta: Optional[float] = 0.0, # for DDIM
212
+ # # Whether to return all the intermediate steps in generation
213
+ # return_sample_every_n_steps: int = -1,
214
+ # # Whether to disable tqdm
215
+ # disable_tqdm: bool = False,
216
+ # ):
217
+ #
218
+ # # Get scheduler from mapping, or use self.scheduler if None
219
+ # scheduler = self.scheduler if scheduler is None else self.schedulers_map[scheduler]
220
+ #
221
+ # # Get the size of the noise
222
+ # Np = num_points
223
+ # Nf = aud_feat.size(1)
224
+ # B = 1
225
+ # D = 3
226
+ # device = self.device
227
+ #
228
+ # # Sample noise
229
+ # x_t = torch.randn(B, Nf, Np, D, device=device)
230
+ #
231
+ # x_t = x_t[:, :, :, 0]
232
+ #
233
+ # # ori_kps = torch.repeat_interleave(ori_kps.unsqueeze(1), Nf, dim=1) # B, Nf, 45
234
+ #
235
+ # ref_kps = ref_kps[:, :, 0]
236
+ # # ref_kps = torch.repeat_interleave(ref_kps.unsqueeze(1), Nf, dim=1) # B, Nf, 91
237
+ #
238
+ # # Set timesteps
239
+ # accepts_offset = "offset" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
240
+ # extra_set_kwargs = {"offset": 1} if accepts_offset else {}
241
+ # scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
242
+ #
243
+ # # Prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
244
+ # # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
245
+ # # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
246
+ # # and should be between [0, 1]
247
+ # accepts_eta = "eta" in set(inspect.signature(scheduler.step).parameters.keys())
248
+ # extra_step_kwargs = {"eta": eta} if accepts_eta else {}
249
+ #
250
+ # # Loop over timesteps
251
+ # all_outputs = []
252
+ # return_all_outputs = (return_sample_every_n_steps > 0)
253
+ # progress_bar = tqdm(scheduler.timesteps.to(device), desc=f'Sampling ({x_t.shape})', disable=disable_tqdm)
254
+ #
255
+ # # ori_kps = torch.repeat_interleave(ori_kps[:, 6:].unsqueeze(1), Nf + 1, dim=1)
256
+ # aud_feat = torch.cat([torch.zeros(B, 1, 512).cuda(), aud_feat], 1)
257
+ # # aud_feat = torch.cat([ori_kps, aud_feat], -1)
258
+ #
259
+ # # aud_feat = torch.cat([torch.zeros(B, 2, 512).cuda(), aud_feat], 1)
260
+ #
261
+ # for i, t in enumerate(progress_bar):
262
+ #
263
+ # # Conditioning
264
+ # x_t_input = torch.cat([ref_kps.unsqueeze(1).detach(), x_t], dim=1)
265
+ # # x_t_input = torch.cat([ori_kps.unsqueeze(1).detach(), ref_kps.unsqueeze(1).detach(), x_t], dim=1)
266
+ # # x_t_input = torch.cat([x_t, ref_kps, ori_kps], dim=2) # B, Nf, 91+91+45
267
+ #
268
+ # # Forward
269
+ # # noise_pred = self.point_model(x_t_input, t.reshape(1).expand(B), context=aud_feat)[:, 1:]
270
+ # noise_pred = self.point_model(x_t_input, t.reshape(1).expand(B), context=aud_feat)[:, 1:]
271
+ #
272
+ # # noise_pred = noise_pred[:, :, :51]
273
+ #
274
+ # # Step
275
+ # # x_t = x_t[:, :, :51]
276
+ # x_t = scheduler.step(noise_pred, t, x_t, **extra_step_kwargs).prev_sample
277
+ #
278
+ # # Append to output list if desired
279
+ # if (return_all_outputs and (i % return_sample_every_n_steps == 0 or i == len(scheduler.timesteps) - 1)):
280
+ # all_outputs.append(x_t)
281
+ #
282
+ # # Convert output back into a point cloud, undoing normalization and scaling
283
+ # output = x_t
284
+ # output = torch.stack([output, output, output], -1)
285
+ # if return_all_outputs:
286
+ # all_outputs = torch.stack(all_outputs, dim=1) # (B, sample_steps, N, D)
287
+ # return (output, all_outputs) if return_all_outputs else output
288
+
289
+
290
+ @torch.no_grad()
291
+ def forward_sample(
292
+ self,
293
+ num_points: int,
294
+ ref_kps: Optional[Tensor],
295
+ ori_kps: Optional[Tensor],
296
+ aud_feat: Optional[Tensor],
297
+ # Optional overrides
298
+ scheduler: Optional[str] = 'ddpm',
299
+ # Inference parameters
300
+ num_inference_steps: Optional[int] = 1000,
301
+ eta: Optional[float] = 0.0, # for DDIM
302
+ # Whether to return all the intermediate steps in generation
303
+ return_sample_every_n_steps: int = -1,
304
+ # Whether to disable tqdm
305
+ disable_tqdm: bool = False,
306
+ ):
307
+
308
+ # Get scheduler from mapping, or use self.scheduler if None
309
+ scheduler = self.scheduler if scheduler is None else self.schedulers_map[scheduler]
310
+
311
+ # Get the size of the noise
312
+ Np = num_points
313
+ Nf = aud_feat.size(1)
314
+ B = 1
315
+ D = 3
316
+ device = self.device
317
+
318
+ # Sample noise
319
+ x_t = torch.randn(B, Nf, Np, D, device=device)
320
+
321
+ x_t = x_t[:, :, :, 0]
322
+
323
+ ref_kps = ref_kps[:,:,0]
324
+
325
+ # Set timesteps
326
+ accepts_offset = "offset" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
327
+ extra_set_kwargs = {"offset": 1} if accepts_offset else {}
328
+ scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
329
+
330
+ # Prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
331
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
332
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
333
+ # and should be between [0, 1]
334
+ accepts_eta = "eta" in set(inspect.signature(scheduler.step).parameters.keys())
335
+ extra_step_kwargs = {"eta": eta} if accepts_eta else {}
336
+
337
+ # Loop over timesteps
338
+ all_outputs = []
339
+ return_all_outputs = (return_sample_every_n_steps > 0)
340
+ progress_bar = tqdm(scheduler.timesteps.to(device), desc=f'Sampling ({x_t.shape})', disable=disable_tqdm)
341
+
342
+ # ori_kps = torch.repeat_interleave(ori_kps[:, 6:].unsqueeze(1), Nf + 1, dim=1)
343
+ # aud_feat = torch.cat([torch.zeros(B, 1, 512).cuda(), aud_feat], 1)
344
+ # aud_feat = torch.cat([ori_kps, aud_feat], -1)
345
+
346
+ aud_feat = torch.cat([torch.zeros(B, 2, 512).cuda(), aud_feat], 1)
347
+
348
+ for i, t in enumerate(progress_bar):
349
+
350
+ # Conditioning
351
+ # x_t_input = torch.cat([ref_kps.unsqueeze(1), x_t], dim=1)
352
+ #
353
+ # ori_kps_repeat = torch.repeat_interleave(ori_kps.unsqueeze(1), repeats=Nf + 1, dim=1)
354
+ #
355
+ # x_t_input = torch.cat([x_t_input.detach(), ori_kps_repeat.detach()], dim=-1) # B, 32+1, 51+45
356
+
357
+
358
+ x_t_input = torch.cat([ori_kps.unsqueeze(1).detach(),ref_kps.unsqueeze(1).detach(), x_t], dim=1)
359
+ # x_t_input = torch.cat([ref_kps.unsqueeze(1).detach(), x_t], dim=1)
360
+
361
+
362
+ # Forward
363
+ # noise_pred = self.point_model(x_t_input, t.reshape(1).expand(B), context=aud_feat)[:, 1:]
364
+ noise_pred = self.point_model(x_t_input, t.reshape(1).expand(B), context=aud_feat)[:, 2:]
365
+
366
+
367
+ # Step
368
+ x_t = scheduler.step(noise_pred, t, x_t, **extra_step_kwargs).prev_sample
369
+
370
+ # Append to output list if desired
371
+ if (return_all_outputs and (i % return_sample_every_n_steps == 0 or i == len(scheduler.timesteps) - 1)):
372
+ all_outputs.append(x_t)
373
+
374
+ # Convert output back into a point cloud, undoing normalization and scaling
375
+ output = x_t
376
+ output = torch.stack([output,output,output],-1)
377
+ if return_all_outputs:
378
+ all_outputs = torch.stack(all_outputs, dim=1) # (B, sample_steps, N, D)
379
+ return (output, all_outputs) if return_all_outputs else output
380
+
381
+ def forward(self, batch: dict, mode: str = 'train', **kwargs):
382
+ """A wrapper around the forward method for training and inference"""
383
+
384
+ if mode == 'train':
385
+ return self.forward_train(
386
+ pc=batch['sequence_keypoints'],
387
+ ref_kps=batch['ref_keypoint'],
388
+ ori_kps=batch['ori_keypoint'],
389
+ aud_feat=batch['aud_feat'],
390
+ mode='train',
391
+ **kwargs)
392
+ elif mode == 'val':
393
+ return self.forward_train(
394
+ pc=batch['sequence_keypoints'],
395
+ ref_kps=batch['ref_keypoint'],
396
+ ori_kps=batch['ori_keypoint'],
397
+ aud_feat=batch['aud_feat'],
398
+ mode='val',
399
+ **kwargs)
400
+ elif mode == 'sample':
401
+ num_points = 68
402
+ return self.forward_sample(
403
+ num_points=num_points,
404
+ ref_kps=batch['ref_keypoint'],
405
+ ori_kps=batch['ori_keypoint'],
406
+ aud_feat=batch['aud_feat'],
407
+ **kwargs)
408
+ else:
409
+ raise NotImplementedError()
difpoint/model/model_utils.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+
7
+
8
+ def set_requires_grad(module: nn.Module, requires_grad: bool):
9
+ for p in module.parameters():
10
+ p.requires_grad_(requires_grad)
11
+
12
+
13
+ def compute_distance_transform(mask: torch.Tensor):
14
+ image_size = mask.shape[-1]
15
+ distance_transform = torch.stack([
16
+ torch.from_numpy(cv2.distanceTransform(
17
+ (1 - m), distanceType=cv2.DIST_L2, maskSize=cv2.DIST_MASK_3
18
+ ) / (image_size / 2))
19
+ for m in mask.squeeze(1).detach().cpu().numpy().astype(np.uint8)
20
+ ]).unsqueeze(1).clip(0, 1).to(mask.device)
21
+ return distance_transform
22
+
23
+
24
+ def default(x, d):
25
+ return d if x is None else x
26
+
27
+ def get_custom_betas(beta_start: float, beta_end: float, warmup_frac: float = 0.3, num_train_timesteps: int = 1000):
28
+ """Custom beta schedule"""
29
+ betas = np.linspace(beta_start, beta_end, num_train_timesteps, dtype=np.float32)
30
+ warmup_frac = 0.3
31
+ warmup_time = int(num_train_timesteps * warmup_frac)
32
+ warmup_steps = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)
33
+ warmup_time = min(warmup_time, num_train_timesteps)
34
+ betas[:warmup_time] = warmup_steps[:warmup_time]
35
+ return betas
difpoint/model/point_model.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
3
+ from diffusers import ModelMixin
4
+ from torch import Tensor
5
+
6
+ from .temporaltrans.temptrans import SimpleTemperalPointModel, SimpleTransModel
7
+
8
+ class PointModel(ModelMixin, ConfigMixin):
9
+ @register_to_config
10
+ def __init__(
11
+ self,
12
+ model_type: str = 'pvcnn',
13
+ in_channels: int = 3,
14
+ out_channels: int = 3,
15
+ embed_dim: int = 64,
16
+ dropout: float = 0.1,
17
+ width_multiplier: int = 1,
18
+ voxel_resolution_multiplier: int = 1,
19
+ ):
20
+ super().__init__()
21
+ self.model_type = model_type
22
+ if self.model_type == 'simple':
23
+ self.autocast_context = torch.autocast('cuda', dtype=torch.float32)
24
+ self.model = SimpleTransModel(
25
+ embed_dim=embed_dim,
26
+ num_classes=out_channels,
27
+ extra_feature_channels=(in_channels - 3),
28
+ )
29
+ self.model.output_projection.bias.data.normal_(0, 1e-6)
30
+ self.model.output_projection.weight.data.normal_(0, 1e-6)
31
+ else:
32
+ raise NotImplementedError()
33
+
34
+ def forward(self, inputs: Tensor, t: Tensor, context=None) -> Tensor:
35
+ """ Receives input of shape (B, N, in_channels) and returns output
36
+ of shape (B, N, out_channels) """
37
+ with self.autocast_context:
38
+ return self.model(inputs, t, context)
difpoint/model/temporaltrans/__pycache__/temptrans.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
difpoint/model/temporaltrans/__pycache__/temptrans.cpython-38.pyc ADDED
Binary file (11.1 kB). View file
 
difpoint/model/temporaltrans/__pycache__/transformer_utils.cpython-310.pyc ADDED
Binary file (5.09 kB). View file
 
difpoint/model/temporaltrans/__pycache__/transformer_utils.cpython-38.pyc ADDED
Binary file (5.09 kB). View file
 
difpoint/model/temporaltrans/pointnet_util.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from time import time
5
+ import numpy as np
6
+
7
+
8
+ # reference https://github.com/yanx27/Pointnet_Pointnet2_pytorch, modified by Yang You
9
+
10
+
11
+ def timeit(tag, t):
12
+ print("{}: {}s".format(tag, time() - t))
13
+ return time()
14
+
15
+ def pc_normalize(pc):
16
+ centroid = np.mean(pc, axis=0)
17
+ pc = pc - centroid
18
+ m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
19
+ pc = pc / m
20
+ return pc
21
+
22
+ def square_distance(src, dst):
23
+ """
24
+ Calculate Euclid distance between each two points.
25
+ src^T * dst = xn * xm + yn * ym + zn * zm;
26
+ sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
27
+ sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
28
+ dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
29
+ = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
30
+ Input:
31
+ src: source points, [B, N, C]
32
+ dst: target points, [B, M, C]
33
+ Output:
34
+ dist: per-point square distance, [B, N, M]
35
+ """
36
+ return torch.sum((src[:, :, None] - dst[:, None]) ** 2, dim=-1)
37
+
38
+
39
+ def index_points(points, idx):
40
+ """
41
+ Input:
42
+ points: input points data, [B, N, C]
43
+ idx: sample index data, [B, S, [K]]
44
+ Return:
45
+ new_points:, indexed points data, [B, S, [K], C]
46
+ """
47
+ raw_size = idx.size()
48
+ idx = idx.reshape(raw_size[0], -1)
49
+ res = torch.gather(points, 1, idx[..., None].expand(-1, -1, points.size(-1)))
50
+ return res.reshape(*raw_size, -1)
51
+
52
+
53
+ def farthest_point_sample(xyz, npoint):
54
+ """
55
+ Input:
56
+ xyz: pointcloud data, [B, N, 3]
57
+ npoint: number of samples
58
+ Return:
59
+ centroids: sampled pointcloud index, [B, npoint]
60
+ """
61
+ device = xyz.device
62
+ B, N, C = xyz.shape
63
+ centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
64
+ distance = torch.ones(B, N).to(device) * 1e10
65
+ farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
66
+ batch_indices = torch.arange(B, dtype=torch.long).to(device)
67
+ for i in range(npoint):
68
+ centroids[:, i] = farthest
69
+ centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
70
+ dist = torch.sum((xyz - centroid) ** 2, -1)
71
+ distance = torch.min(distance, dist)
72
+ farthest = torch.max(distance, -1)[1]
73
+ return centroids
74
+
75
+
76
+ def query_ball_point(radius, nsample, xyz, new_xyz):
77
+ """
78
+ Input:
79
+ radius: local region radius
80
+ nsample: max sample number in local region
81
+ xyz: all points, [B, N, 3]
82
+ new_xyz: query points, [B, S, 3]
83
+ Return:
84
+ group_idx: grouped points index, [B, S, nsample]
85
+ """
86
+ device = xyz.device
87
+ B, N, C = xyz.shape
88
+ _, S, _ = new_xyz.shape
89
+ group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
90
+ sqrdists = square_distance(new_xyz, xyz)
91
+ group_idx[sqrdists > radius ** 2] = N
92
+ group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
93
+ group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
94
+ mask = group_idx == N
95
+ group_idx[mask] = group_first[mask]
96
+ return group_idx
97
+
98
+
99
+ def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False, knn=False):
100
+ """
101
+ Input:
102
+ npoint:
103
+ radius:
104
+ nsample:
105
+ xyz: input points position data, [B, N, 3]
106
+ points: input points data, [B, N, D]
107
+ Return:
108
+ new_xyz: sampled points position data, [B, npoint, nsample, 3]
109
+ new_points: sampled points data, [B, npoint, nsample, 3+D]
110
+ """
111
+ B, N, C = xyz.shape
112
+ S = npoint
113
+ fps_idx = farthest_point_sample(xyz, npoint) # [B, npoint]
114
+ torch.cuda.empty_cache()
115
+ new_xyz = index_points(xyz, fps_idx)
116
+ torch.cuda.empty_cache()
117
+ if knn:
118
+ dists = square_distance(new_xyz, xyz) # B x npoint x N
119
+ idx = dists.argsort()[:, :, :nsample] # B x npoint x K
120
+ else:
121
+ idx = query_ball_point(radius, nsample, xyz, new_xyz)
122
+ torch.cuda.empty_cache()
123
+ grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
124
+ torch.cuda.empty_cache()
125
+ grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)
126
+ torch.cuda.empty_cache()
127
+
128
+ if points is not None:
129
+ grouped_points = index_points(points, idx)
130
+ new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1) # [B, npoint, nsample, C+D]
131
+ else:
132
+ new_points = grouped_xyz_norm
133
+ if returnfps:
134
+ return new_xyz, new_points, grouped_xyz, fps_idx
135
+ else:
136
+ return new_xyz, new_points
137
+
138
+
139
+ def sample_and_group_all(xyz, points):
140
+ """
141
+ Input:
142
+ xyz: input points position data, [B, N, 3]
143
+ points: input points data, [B, N, D]
144
+ Return:
145
+ new_xyz: sampled points position data, [B, 1, 3]
146
+ new_points: sampled points data, [B, 1, N, 3+D]
147
+ """
148
+ device = xyz.device
149
+ B, N, C = xyz.shape
150
+ new_xyz = torch.zeros(B, 1, C).to(device)
151
+ grouped_xyz = xyz.view(B, 1, N, C)
152
+ if points is not None:
153
+ new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1)
154
+ else:
155
+ new_points = grouped_xyz
156
+ return new_xyz, new_points
157
+
158
+
159
+ class PointNetSetAbstraction(nn.Module):
160
+ def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all, knn=False):
161
+ super(PointNetSetAbstraction, self).__init__()
162
+ self.npoint = npoint
163
+ self.radius = radius
164
+ self.nsample = nsample
165
+ self.knn = knn
166
+ self.mlp_convs = nn.ModuleList()
167
+ self.mlp_bns = nn.ModuleList()
168
+ last_channel = in_channel
169
+ for out_channel in mlp:
170
+ self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
171
+ self.mlp_bns.append(nn.BatchNorm2d(out_channel))
172
+ last_channel = out_channel
173
+ self.group_all = group_all
174
+
175
+ def forward(self, xyz, points):
176
+ """
177
+ Input:
178
+ xyz: input points position data, [B, N, C]
179
+ points: input points data, [B, N, C]
180
+ Return:
181
+ new_xyz: sampled points position data, [B, S, C]
182
+ new_points_concat: sample points feature data, [B, S, D']
183
+ """
184
+ if self.group_all:
185
+ new_xyz, new_points = sample_and_group_all(xyz, points)
186
+ else:
187
+ new_xyz, new_points = sample_and_group(self.npoint, self.radius, self.nsample, xyz, points, knn=self.knn)
188
+ # new_xyz: sampled points position data, [B, npoint, C]
189
+ # new_points: sampled points data, [B, npoint, nsample, C+D]
190
+ new_points = new_points.permute(0, 3, 2, 1) # [B, C+D, nsample,npoint]
191
+ for i, conv in enumerate(self.mlp_convs):
192
+ bn = self.mlp_bns[i]
193
+ new_points = F.relu(bn(conv(new_points)))
194
+
195
+ new_points = torch.max(new_points, 2)[0].transpose(1, 2)
196
+ return new_xyz, new_points
197
+
198
+
199
+ class PointNetSetAbstractionMsg(nn.Module):
200
+ def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list, knn=False):
201
+ super(PointNetSetAbstractionMsg, self).__init__()
202
+ self.npoint = npoint
203
+ self.radius_list = radius_list
204
+ self.nsample_list = nsample_list
205
+ self.knn = knn
206
+ self.conv_blocks = nn.ModuleList()
207
+ self.bn_blocks = nn.ModuleList()
208
+ for i in range(len(mlp_list)):
209
+ convs = nn.ModuleList()
210
+ bns = nn.ModuleList()
211
+ last_channel = in_channel + 3
212
+ for out_channel in mlp_list[i]:
213
+ convs.append(nn.Conv2d(last_channel, out_channel, 1))
214
+ bns.append(nn.BatchNorm2d(out_channel))
215
+ last_channel = out_channel
216
+ self.conv_blocks.append(convs)
217
+ self.bn_blocks.append(bns)
218
+
219
+ def forward(self, xyz, points, seed_idx=None):
220
+ """
221
+ Input:
222
+ xyz: input points position data, [B, C, N]
223
+ points: input points data, [B, D, N]
224
+ Return:
225
+ new_xyz: sampled points position data, [B, C, S]
226
+ new_points_concat: sample points feature data, [B, D', S]
227
+ """
228
+
229
+ B, N, C = xyz.shape
230
+ S = self.npoint
231
+ new_xyz = index_points(xyz, farthest_point_sample(xyz, S) if seed_idx is None else seed_idx)
232
+ new_points_list = []
233
+ for i, radius in enumerate(self.radius_list):
234
+ K = self.nsample_list[i]
235
+ if self.knn:
236
+ dists = square_distance(new_xyz, xyz) # B x npoint x N
237
+ group_idx = dists.argsort()[:, :, :K] # B x npoint x K
238
+ else:
239
+ group_idx = query_ball_point(radius, K, xyz, new_xyz)
240
+ grouped_xyz = index_points(xyz, group_idx)
241
+ grouped_xyz -= new_xyz.view(B, S, 1, C)
242
+ if points is not None:
243
+ grouped_points = index_points(points, group_idx)
244
+ grouped_points = torch.cat([grouped_points, grouped_xyz], dim=-1)
245
+ else:
246
+ grouped_points = grouped_xyz
247
+
248
+ grouped_points = grouped_points.permute(0, 3, 2, 1) # [B, D, K, S]
249
+ for j in range(len(self.conv_blocks[i])):
250
+ conv = self.conv_blocks[i][j]
251
+ bn = self.bn_blocks[i][j]
252
+ grouped_points = F.relu(bn(conv(grouped_points)))
253
+ new_points = torch.max(grouped_points, 2)[0] # [B, D', S]
254
+ new_points_list.append(new_points)
255
+
256
+ new_points_concat = torch.cat(new_points_list, dim=1).transpose(1, 2)
257
+ return new_xyz, new_points_concat
258
+
259
+
260
+ # NoteL this function swaps N and C
261
+ class PointNetFeaturePropagation(nn.Module):
262
+ def __init__(self, in_channel, mlp):
263
+ super(PointNetFeaturePropagation, self).__init__()
264
+ self.mlp_convs = nn.ModuleList()
265
+ self.mlp_bns = nn.ModuleList()
266
+ last_channel = in_channel
267
+ for out_channel in mlp:
268
+ self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
269
+ self.mlp_bns.append(nn.BatchNorm1d(out_channel))
270
+ last_channel = out_channel
271
+
272
+ def forward(self, xyz1, xyz2, points1, points2):
273
+ """
274
+ Input:
275
+ xyz1: input points position data, [B, C, N]
276
+ xyz2: sampled input points position data, [B, C, S]
277
+ points1: input points data, [B, D, N]
278
+ points2: input points data, [B, D, S]
279
+ Return:
280
+ new_points: upsampled points data, [B, D', N]
281
+ """
282
+ xyz1 = xyz1.permute(0, 2, 1)
283
+ xyz2 = xyz2.permute(0, 2, 1)
284
+
285
+ points2 = points2.permute(0, 2, 1)
286
+ B, N, C = xyz1.shape
287
+ _, S, _ = xyz2.shape
288
+
289
+ if S == 1:
290
+ interpolated_points = points2.repeat(1, N, 1)
291
+ else:
292
+ dists = square_distance(xyz1, xyz2)
293
+ dists, idx = dists.sort(dim=-1)
294
+ dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
295
+
296
+ dist_recip = 1.0 / (dists + 1e-8)
297
+ norm = torch.sum(dist_recip, dim=2, keepdim=True)
298
+ weight = dist_recip / norm
299
+ interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)
300
+
301
+ if points1 is not None:
302
+ points1 = points1.permute(0, 2, 1)
303
+ new_points = torch.cat([points1, interpolated_points], dim=-1)
304
+ else:
305
+ new_points = interpolated_points
306
+
307
+ new_points = new_points.permute(0, 2, 1)
308
+ for i, conv in enumerate(self.mlp_convs):
309
+ bn = self.mlp_bns[i]
310
+ new_points = F.relu(bn(conv(new_points)))
311
+ return new_points
difpoint/model/temporaltrans/pointtransformerv2.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .transformer_utils import BaseTemperalPointModel
2
+ from copy import deepcopy
3
+ import torch
4
+ import einops
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from torch import nn
8
+ from einops import rearrange
9
+ import pointops
10
+ from pointcept.models.utils import offset2batch, batch2offset
11
+ class PointBatchNorm(nn.Module):
12
+ """
13
+ Batch Normalization for Point Clouds data in shape of [B*N, C], [B*N, L, C]
14
+ """
15
+
16
+ def __init__(self, embed_channels):
17
+ super().__init__()
18
+ self.norm = nn.BatchNorm1d(embed_channels)
19
+
20
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
21
+ if input.dim() == 3:
22
+ return (
23
+ self.norm(input.transpose(1, 2).contiguous())
24
+ .transpose(1, 2)
25
+ .contiguous()
26
+ )
27
+ elif input.dim() == 2:
28
+ return self.norm(input)
29
+ else:
30
+ raise NotImplementedError
31
+ #https://github.com/Pointcept/Pointcept/blob/main/pointcept/models/point_transformer_v2/point_transformer_v2m2_base.py
32
+ class GroupedVectorAttention(nn.Module):
33
+ def __init__(
34
+ self,
35
+ embed_channels,
36
+ groups,
37
+ attn_drop_rate=0.0,
38
+ qkv_bias=True,
39
+ pe_multiplier=False,
40
+ pe_bias=True,
41
+ ):
42
+ super(GroupedVectorAttention, self).__init__()
43
+ self.embed_channels = embed_channels
44
+ self.groups = groups
45
+ assert embed_channels % groups == 0
46
+ self.attn_drop_rate = attn_drop_rate
47
+ self.qkv_bias = qkv_bias
48
+ self.pe_multiplier = pe_multiplier
49
+ self.pe_bias = pe_bias
50
+
51
+ self.linear_q = nn.Sequential(
52
+ nn.Linear(embed_channels, embed_channels, bias=qkv_bias),
53
+ PointBatchNorm(embed_channels),
54
+ nn.ReLU(inplace=True),
55
+ )
56
+ self.linear_k = nn.Sequential(
57
+ nn.Linear(embed_channels, embed_channels, bias=qkv_bias),
58
+ PointBatchNorm(embed_channels),
59
+ nn.ReLU(inplace=True),
60
+ )
61
+
62
+ self.linear_v = nn.Linear(embed_channels, embed_channels, bias=qkv_bias)
63
+
64
+ if self.pe_multiplier:
65
+ self.linear_p_multiplier = nn.Sequential(
66
+ nn.Linear(3, embed_channels),
67
+ PointBatchNorm(embed_channels),
68
+ nn.ReLU(inplace=True),
69
+ nn.Linear(embed_channels, embed_channels),
70
+ )
71
+ if self.pe_bias:
72
+ self.linear_p_bias = nn.Sequential(
73
+ nn.Linear(3, embed_channels),
74
+ PointBatchNorm(embed_channels),
75
+ nn.ReLU(inplace=True),
76
+ nn.Linear(embed_channels, embed_channels),
77
+ )
78
+ self.weight_encoding = nn.Sequential(
79
+ nn.Linear(embed_channels, groups),
80
+ PointBatchNorm(groups),
81
+ nn.ReLU(inplace=True),
82
+ nn.Linear(groups, groups),
83
+ )
84
+ self.softmax = nn.Softmax(dim=1)
85
+ self.attn_drop = nn.Dropout(attn_drop_rate)
86
+
87
+ def forward(self, feat, coord, reference_index):
88
+ query, key, value = (
89
+ self.linear_q(feat),
90
+ self.linear_k(feat),
91
+ self.linear_v(feat),
92
+ )
93
+ key = pointops.grouping(reference_index, key, coord, with_xyz=True)
94
+ value = pointops.grouping(reference_index, value, coord, with_xyz=False)
95
+ pos, key = key[:, :, 0:3], key[:, :, 3:]
96
+ relation_qk = key - query.unsqueeze(1)
97
+ if self.pe_multiplier:
98
+ pem = self.linear_p_multiplier(pos)
99
+ relation_qk = relation_qk * pem
100
+ if self.pe_bias:
101
+ peb = self.linear_p_bias(pos)
102
+ relation_qk = relation_qk + peb
103
+ value = value + peb
104
+
105
+ weight = self.weight_encoding(relation_qk)
106
+ weight = self.attn_drop(self.softmax(weight))
107
+
108
+ mask = torch.sign(reference_index + 1)
109
+ weight = torch.einsum("n s g, n s -> n s g", weight, mask)
110
+ value = einops.rearrange(value, "n ns (g i) -> n ns g i", g=self.groups)
111
+ feat = torch.einsum("n s g i, n s g -> n g i", value, weight)
112
+ feat = einops.rearrange(feat, "n g i -> n (g i)")
113
+ return feat
114
+
115
+ class BlockSequence(nn.Module):
116
+ def __init__(
117
+ self,
118
+ depth,
119
+ embed_channels,
120
+ groups,
121
+ neighbours=16,
122
+ qkv_bias=True,
123
+ pe_multiplier=False,
124
+ pe_bias=True,
125
+ attn_drop_rate=0.0,
126
+ drop_path_rate=0.0,
127
+ enable_checkpoint=False,
128
+ ):
129
+ super(BlockSequence, self).__init__()
130
+
131
+ if isinstance(drop_path_rate, list):
132
+ drop_path_rates = drop_path_rate
133
+ assert len(drop_path_rates) == depth
134
+ elif isinstance(drop_path_rate, float):
135
+ drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)]
136
+ else:
137
+ drop_path_rates = [0.0 for _ in range(depth)]
138
+
139
+ self.neighbours = neighbours
140
+ self.blocks = nn.ModuleList()
141
+ for i in range(depth):
142
+ block = Block(
143
+ embed_channels=embed_channels,
144
+ groups=groups,
145
+ qkv_bias=qkv_bias,
146
+ pe_multiplier=pe_multiplier,
147
+ pe_bias=pe_bias,
148
+ attn_drop_rate=attn_drop_rate,
149
+ drop_path_rate=drop_path_rates[i],
150
+ enable_checkpoint=enable_checkpoint,
151
+ )
152
+ self.blocks.append(block)
153
+
154
+ def forward(self, points):
155
+ coord, feat, offset = points
156
+ # reference index query of neighbourhood attention
157
+ # for windows attention, modify reference index query method
158
+ reference_index, _ = pointops.knn_query(self.neighbours, coord, offset)
159
+ for block in self.blocks:
160
+ points = block(points, reference_index)
161
+ return points
162
+
163
+ class GVAPatchEmbed(nn.Module):
164
+ def __init__(
165
+ self,
166
+ depth,
167
+ in_channels,
168
+ embed_channels,
169
+ groups,
170
+ neighbours=16,
171
+ qkv_bias=True,
172
+ pe_multiplier=False,
173
+ pe_bias=True,
174
+ attn_drop_rate=0.0,
175
+ drop_path_rate=0.0,
176
+ enable_checkpoint=False,
177
+ ):
178
+ super(GVAPatchEmbed, self).__init__()
179
+ self.in_channels = in_channels
180
+ self.embed_channels = embed_channels
181
+ self.proj = nn.Sequential(
182
+ nn.Linear(in_channels, embed_channels, bias=False),
183
+ PointBatchNorm(embed_channels),
184
+ nn.ReLU(inplace=True),
185
+ )
186
+ self.blocks = BlockSequence(
187
+ depth=depth,
188
+ embed_channels=embed_channels,
189
+ groups=groups,
190
+ neighbours=neighbours,
191
+ qkv_bias=qkv_bias,
192
+ pe_multiplier=pe_multiplier,
193
+ pe_bias=pe_bias,
194
+ attn_drop_rate=attn_drop_rate,
195
+ drop_path_rate=drop_path_rate,
196
+ enable_checkpoint=enable_checkpoint,
197
+ )
198
+
199
+ def forward(self, points):
200
+ coord, feat, offset = points
201
+ feat = self.proj(feat)
202
+ return self.blocks([coord, feat, offset])
203
+
204
+
205
+ class Block(nn.Module):
206
+ def __init__(
207
+ self,
208
+ embed_channels,
209
+ groups,
210
+ qkv_bias=True,
211
+ pe_multiplier=False,
212
+ pe_bias=True,
213
+ attn_drop_rate=0.0,
214
+ drop_path_rate=0.0,
215
+ enable_checkpoint=False,
216
+ ):
217
+ super(Block, self).__init__()
218
+ self.attn = GroupedVectorAttention(
219
+ embed_channels=embed_channels,
220
+ groups=groups,
221
+ qkv_bias=qkv_bias,
222
+ attn_drop_rate=attn_drop_rate,
223
+ pe_multiplier=pe_multiplier,
224
+ pe_bias=pe_bias,
225
+ )
226
+ self.fc1 = nn.Linear(embed_channels, embed_channels, bias=False)
227
+ self.fc3 = nn.Linear(embed_channels, embed_channels, bias=False)
228
+ self.norm1 = PointBatchNorm(embed_channels)
229
+ self.norm2 = PointBatchNorm(embed_channels)
230
+ self.norm3 = PointBatchNorm(embed_channels)
231
+ self.act = nn.ReLU(inplace=True)
232
+ self.enable_checkpoint = enable_checkpoint
233
+ self.drop_path = (
234
+ DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
235
+ )
236
+
237
+ def forward(self, points, reference_index):
238
+ coord, feat, offset = points
239
+ identity = feat
240
+ feat = self.act(self.norm1(self.fc1(feat)))
241
+ feat = (
242
+ self.attn(feat, coord, reference_index)
243
+ if not self.enable_checkpoint
244
+ else checkpoint(self.attn, feat, coord, reference_index)
245
+ )
246
+ feat = self.act(self.norm2(feat))
247
+ feat = self.norm3(self.fc3(feat))
248
+ feat = identity + self.drop_path(feat)
249
+ feat = self.act(feat)
250
+ return [coord, feat, offset]
difpoint/model/temporaltrans/temptrans.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch import nn
5
+ from einops import rearrange
6
+ from .transformer_utils import BaseTemperalPointModel
7
+ import math
8
+ from einops_exts import check_shape, rearrange_many
9
+ from functools import partial
10
+
11
+ class SinusoidalPosEmb(nn.Module):
12
+ def __init__(self, dim):
13
+ super().__init__()
14
+ self.dim = dim
15
+
16
+ def forward(self, x):
17
+ device = x.device
18
+ half_dim = self.dim // 2
19
+ emb = math.log(10000) / (half_dim - 1)
20
+ emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
21
+ emb = x[:, None] * emb[None, :]
22
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
23
+ return emb
24
+
25
+ class RelativePositionBias(nn.Module):
26
+ def __init__(
27
+ self,
28
+ heads = 8,
29
+ num_buckets = 32,
30
+ max_distance = 128
31
+ ):
32
+ super().__init__()
33
+ self.num_buckets = num_buckets
34
+ self.max_distance = max_distance
35
+ self.relative_attention_bias = nn.Embedding(num_buckets, heads)
36
+
37
+ @staticmethod
38
+ def _relative_position_bucket(relative_position, num_buckets = 32, max_distance = 128):
39
+ ret = 0
40
+ n = -relative_position
41
+
42
+ num_buckets //= 2
43
+ ret += (n < 0).long() * num_buckets
44
+ n = torch.abs(n)
45
+
46
+ max_exact = num_buckets // 2
47
+ is_small = n < max_exact
48
+
49
+ val_if_large = max_exact + (
50
+ torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
51
+ ).long()
52
+ val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
53
+
54
+ ret += torch.where(is_small, n, val_if_large)
55
+ return ret
56
+
57
+ def forward(self, n, device):
58
+ q_pos = torch.arange(n, dtype = torch.long, device = device)
59
+ k_pos = torch.arange(n, dtype = torch.long, device = device)
60
+ rel_pos = rearrange(k_pos, 'j -> 1 j') - rearrange(q_pos, 'i -> i 1')
61
+ rp_bucket = self._relative_position_bucket(rel_pos, num_buckets = self.num_buckets, max_distance = self.max_distance)
62
+ values = self.relative_attention_bias(rp_bucket)
63
+ return rearrange(values, 'i j h -> h i j')
64
+ def exists(x):
65
+ return x is not None
66
+
67
+ class Residual(nn.Module):
68
+ def __init__(self, fn):
69
+ super().__init__()
70
+ self.fn = fn
71
+
72
+ def forward(self, x, *args, **kwargs):
73
+ return self.fn(x, *args, **kwargs) + x
74
+ class LayerNorm(nn.Module):
75
+ def __init__(self, dim, eps = 1e-5):
76
+ super().__init__()
77
+ self.eps = eps
78
+ self.gamma = nn.Parameter(torch.ones(1, 1, dim))
79
+ self.beta = nn.Parameter(torch.zeros(1, 1, dim))
80
+
81
+ def forward(self, x):
82
+ var = torch.var(x, dim = -1, unbiased = False, keepdim = True)
83
+ mean = torch.mean(x, dim = -1, keepdim = True)
84
+ return (x - mean) / (var + self.eps).sqrt() * self.gamma + self.beta
85
+
86
+ class PreNorm(nn.Module):
87
+ def __init__(self, dim, fn):
88
+ super().__init__()
89
+ self.fn = fn
90
+ self.norm = LayerNorm(dim)
91
+
92
+ def forward(self, x, **kwargs):
93
+ x = self.norm(x)
94
+ return self.fn(x, **kwargs)
95
+
96
+
97
+ class EinopsToAndFrom(nn.Module):
98
+ def __init__(self, from_einops, to_einops, fn):
99
+ super().__init__()
100
+ self.from_einops = from_einops
101
+ self.to_einops = to_einops
102
+ self.fn = fn
103
+
104
+ def forward(self, x, **kwargs):
105
+ shape = x.shape
106
+ reconstitute_kwargs = dict(tuple(zip(self.from_einops.split(' '), shape)))
107
+ x = rearrange(x, f'{self.from_einops} -> {self.to_einops}')
108
+ x = self.fn(x, **kwargs)
109
+ x = rearrange(x, f'{self.to_einops} -> {self.from_einops}', **reconstitute_kwargs)
110
+ return x
111
+
112
+ class Attention(nn.Module):
113
+ def __init__(
114
+ self, dim, heads=4, attn_head_dim=None, casual_attn=False,rotary_emb = None):
115
+ super().__init__()
116
+ self.num_heads = heads
117
+ head_dim = dim // heads
118
+ self.casual_attn = casual_attn
119
+
120
+ if attn_head_dim is not None:
121
+ head_dim = attn_head_dim
122
+
123
+ all_head_dim = head_dim * self.num_heads
124
+ self.scale = head_dim ** -0.5
125
+ self.to_qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
126
+ self.proj = nn.Linear(all_head_dim, dim)
127
+ self.rotary_emb = rotary_emb
128
+
129
+ def forward(self, x, pos_bias = None):
130
+ N, device = x.shape[-2], x.device
131
+ qkv = self.to_qkv(x).chunk(3, dim = -1)
132
+
133
+ q, k, v = rearrange_many(qkv, '... n (h d) -> ... h n d', h=self.num_heads)
134
+
135
+ q = q * self.scale
136
+
137
+ if exists(self.rotary_emb):
138
+ q = self.rotary_emb.rotate_queries_or_keys(q)
139
+ k = self.rotary_emb.rotate_queries_or_keys(k)
140
+
141
+ sim = torch.einsum('... h i d, ... h j d -> ... h i j', q, k)
142
+
143
+ if exists(pos_bias):
144
+ sim = sim + pos_bias
145
+
146
+ if self.casual_attn:
147
+ mask = torch.tril(torch.ones(sim.size(-1), sim.size(-2))).to(device)
148
+ sim = sim.masked_fill(mask[..., :, :] == 0, float('-inf'))
149
+
150
+ attn = sim.softmax(dim = -1)
151
+ x = torch.einsum('... h i j, ... h j d -> ... h i d', attn, v)
152
+ x = rearrange(x, '... h n d -> ... n (h d)')
153
+ x = self.proj(x)
154
+ return x
155
+
156
+
157
+ class Block(nn.Module):
158
+ def __init__(self, dim, dim_out):
159
+ super().__init__()
160
+ self.proj = nn.Linear(dim, dim_out)
161
+ self.norm = LayerNorm(dim)
162
+ self.act = nn.SiLU()
163
+
164
+ def forward(self, x, scale_shift=None):
165
+ x = self.proj(x)
166
+
167
+ if exists(scale_shift):
168
+ x = self.norm(x)
169
+ scale, shift = scale_shift
170
+ x = x * (scale + 1) + shift
171
+ return self.act(x)
172
+
173
+
174
+ class ResnetBlock(nn.Module):
175
+ def __init__(self, dim, dim_out, cond_dim=None):
176
+ super().__init__()
177
+ self.mlp = nn.Sequential(
178
+ nn.SiLU(),
179
+ nn.Linear(cond_dim, dim_out * 2)
180
+ ) if exists(cond_dim) else None
181
+
182
+ self.block1 = Block(dim, dim_out)
183
+ self.block2 = Block(dim_out, dim_out)
184
+
185
+ def forward(self, x, cond_emb=None):
186
+ scale_shift = None
187
+ if exists(self.mlp):
188
+ assert exists(cond_emb), 'time emb must be passed in'
189
+ cond_emb = self.mlp(cond_emb)
190
+ #cond_emb = rearrange(cond_emb, 'b f c -> b f 1 c')
191
+ scale_shift = cond_emb.chunk(2, dim=-1)
192
+
193
+ h = self.block1(x, scale_shift=scale_shift)
194
+ h = self.block2(h)
195
+ return h + x
196
+
197
+ from rotary_embedding_torch import RotaryEmbedding
198
+
199
+
200
+ class SimpleTransModel(BaseTemperalPointModel):
201
+ """
202
+ A simple model that processes a point cloud by applying a series of MLPs to each point
203
+ individually, along with some pooled global features.
204
+ """
205
+
206
+ def get_layers(self):
207
+
208
+
209
+ # self.input_projection = nn.Linear(
210
+ # in_features=51,
211
+ # out_features=self.dim
212
+ # )
213
+
214
+ self.input_projection = nn.Linear(
215
+ in_features=70,
216
+ out_features=self.dim
217
+ )
218
+
219
+ cond_dim = 512 + self.timestep_embed_dim
220
+
221
+ num_head = self.dim//64
222
+ rotary_emb = RotaryEmbedding(min(32, num_head))
223
+
224
+ self.time_rel_pos_bias = RelativePositionBias(heads=num_head, max_distance=128) # realistically will not be able to generate that many frames of video... yet
225
+
226
+ temporal_casual_attn = lambda dim: Attention(dim, heads=num_head, casual_attn=False,rotary_emb=rotary_emb)
227
+
228
+ cond_block= partial(ResnetBlock,cond_dim=cond_dim)
229
+
230
+ layers = nn.ModuleList([])
231
+
232
+ for _ in range(self.num_layers):
233
+ layers.append(nn.ModuleList([
234
+ cond_block(self.dim,self.dim),
235
+ cond_block(self.dim,self.dim),
236
+ Residual(PreNorm(self.dim,temporal_casual_attn(self.dim)))
237
+ ]))
238
+
239
+ return layers
240
+
241
+ def forward(self, inputs: torch.Tensor, timesteps: torch.Tensor, context=None):
242
+ """
243
+ Apply the model to an input batch.
244
+ :param x: an [N x C x ...] Tensor of inputs.
245
+ :param timesteps: a 1-D batch of timesteps.
246
+ :param context: conditioning plugged in via crossattn
247
+ """
248
+ # Prepare inputs
249
+
250
+ batch, num_frames, channels = inputs.size()
251
+
252
+ device = inputs.device
253
+ #assert channels==3
254
+
255
+ # Positional encoding of point coords
256
+ # inputs=rearrange(inputs,'b f p c->(b f) p c')
257
+ # pos_emb=self.positional_encoding(inputs)
258
+ x = self.input_projection(inputs)
259
+ #x = rearrange(x,'(b f) p c-> b f p c',b=batch)
260
+
261
+ t_emb = self.time_mlp(timesteps) if exists(self.time_mlp) else None
262
+ t_emb = t_emb[:,None,:].expand(-1, num_frames, -1) # b f c
263
+ if context is not None:
264
+ t_emb = torch.cat([t_emb, context],-1)
265
+
266
+ time_rel_pos_bias = self.time_rel_pos_bias(num_frames, device=device)
267
+
268
+ for block1, block2, temporal_casual_attn in self.layers:
269
+ x = block1(x, t_emb)
270
+ x = block2(x, t_emb)
271
+ x = temporal_casual_attn(x, pos_bias=time_rel_pos_bias)
272
+
273
+ # Project
274
+ x = self.output_projection(x)
275
+ return x
276
+
277
+
278
+
279
+ class SimpleTemperalPointModel(BaseTemperalPointModel):
280
+ """
281
+ A simple model that processes a point cloud by applying a series of MLPs to each point
282
+ individually, along with some pooled global features.
283
+ """
284
+
285
+ def get_layers(self):
286
+ audio_dim = 512
287
+
288
+ cond_dim = audio_dim + self.timestep_embed_dim
289
+
290
+ num_head = 4
291
+ rotary_emb = RotaryEmbedding(min(32, num_head))
292
+ self.time_rel_pos_bias = RelativePositionBias(heads=num_head, max_distance=128) # realistically will not be able to generate that many frames of video... yet
293
+
294
+ temporal_casual_attn = lambda dim: EinopsToAndFrom('b f p c', 'b p f c', Attention(dim, heads=num_head, casual_attn=False, rotary_emb = rotary_emb))
295
+
296
+ spatial_kp_attn= lambda dim: EinopsToAndFrom('b f p c', 'b f p c', Attention(dim, heads=num_head))
297
+
298
+ cond_block= partial(ResnetBlock,cond_dim=cond_dim)
299
+
300
+ layers = nn.ModuleList([])
301
+
302
+ for _ in range(self.num_layers):
303
+ layers.append(nn.ModuleList([
304
+ cond_block(self.dim,self.dim),
305
+ cond_block(self.dim,self.dim),
306
+ Residual(PreNorm(self.dim,spatial_kp_attn(self.dim))),
307
+ Residual(PreNorm(self.dim,temporal_casual_attn(self.dim)))
308
+ ]))
309
+
310
+ return layers
311
+
312
+ def forward(self, inputs: torch.Tensor, timesteps: torch.Tensor, context=None):
313
+ """
314
+ Apply the model to an input batch.
315
+ :param x: an [N x C x ...] Tensor of inputs.
316
+ :param timesteps: a 1-D batch of timesteps.
317
+ :param context: conditioning plugged in via crossattn
318
+ """
319
+ # Prepare inputs
320
+
321
+ batch, num_frames, num_points, channels = inputs.size()
322
+ device = inputs.device
323
+ #assert channels==3
324
+
325
+ # Positional encoding of point coords
326
+ inputs=rearrange(inputs,'b f p c->(b f) p c')
327
+ pos_emb=self.positional_encoding(inputs)
328
+ x = self.input_projection(torch.cat([inputs, pos_emb], -1))
329
+ x = rearrange(x,'(b f) p c-> b f p c',b=batch)
330
+
331
+ t_emb = self.time_mlp(timesteps) if exists(self.time_mlp) else None
332
+ t_emb = t_emb[:,None,:].expand(-1, num_frames, -1) # b f c
333
+ if context is not None:
334
+ t_emb = torch.cat([t_emb,context],-1)
335
+
336
+ time_rel_pos_bias = self.time_rel_pos_bias(num_frames, device=device)
337
+
338
+ for block1, block2, spatial_kp_attn, temporal_casual_attn in self.layers:
339
+ x = block1(x, t_emb)
340
+ x = block2(x, t_emb)
341
+ x = spatial_kp_attn(x)
342
+ x = temporal_casual_attn(x, pos_bias=time_rel_pos_bias)
343
+
344
+ # Project
345
+ x = self.output_projection(x)
346
+ return x
347
+
difpoint/model/temporaltrans/transformer_utils.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch import nn
5
+ from einops import rearrange
6
+ import math
7
+ from einops_exts import check_shape, rearrange_many
8
+ from torch import Size, Tensor, nn
9
+ class SinusoidalPosEmb(nn.Module):
10
+ def __init__(self, dim):
11
+ super().__init__()
12
+ self.dim = dim
13
+
14
+ def forward(self, x):
15
+ device = x.device
16
+ half_dim = self.dim // 2
17
+ emb = math.log(10000) / (half_dim - 1)
18
+ emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
19
+ emb = x[:, None] * emb[None, :]
20
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
21
+ return emb
22
+
23
+
24
+ def map_positional_encoding(v: Tensor, freq_bands: Tensor) -> Tensor:
25
+ """Map v to positional encoding representation phi(v)
26
+
27
+ Arguments:
28
+ v (Tensor): input features (B, IFeatures)
29
+ freq_bands (Tensor): frequency bands (N_freqs, )
30
+
31
+ Returns:
32
+ phi(v) (Tensor): fourrier features (B, 3 + (2 * N_freqs) * 3)
33
+ """
34
+ pe = [v]
35
+ for freq in freq_bands:
36
+ fv = freq * v
37
+ pe += [torch.sin(fv), torch.cos(fv)]
38
+ return torch.cat(pe, dim=-1)
39
+
40
+ class FeatureMapping(nn.Module):
41
+ """FeatureMapping nn.Module
42
+
43
+ Maps v to features following transformation phi(v)
44
+
45
+ Arguments:
46
+ i_dim (int): input dimensions
47
+ o_dim (int): output dimensions
48
+ """
49
+
50
+ def __init__(self, i_dim: int, o_dim: int) -> None:
51
+ super().__init__()
52
+ self.i_dim = i_dim
53
+ self.o_dim = o_dim
54
+
55
+ def forward(self, v: Tensor) -> Tensor:
56
+ """FeratureMapping forward pass
57
+
58
+ Arguments:
59
+ v (Tensor): input features (B, IFeatures)
60
+
61
+ Returns:
62
+ phi(v) (Tensor): mapped features (B, OFeatures)
63
+ """
64
+ raise NotImplementedError("Forward pass not implemented yet!")
65
+
66
+ class PositionalEncoding(FeatureMapping):
67
+ """PositionalEncoding module
68
+
69
+ Maps v to positional encoding representation phi(v)
70
+
71
+ Arguments:
72
+ i_dim (int): input dimension for v
73
+ N_freqs (int): #frequency to sample (default: 10)
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ i_dim: int,
79
+ N_freqs: int = 10,
80
+ ) -> None:
81
+ super().__init__(i_dim, 3 + (2 * N_freqs) * 3)
82
+ self.N_freqs = N_freqs
83
+
84
+ a, b = 1, self.N_freqs - 1
85
+ freq_bands = 2 ** torch.linspace(a, b, self.N_freqs)
86
+ self.register_buffer("freq_bands", freq_bands)
87
+
88
+ def forward(self, v: Tensor) -> Tensor:
89
+ """Map v to positional encoding representation phi(v)
90
+
91
+ Arguments:
92
+ v (Tensor): input features (B, IFeatures)
93
+
94
+ Returns:
95
+ phi(v) (Tensor): fourrier features (B, 3 + (2 * N_freqs) * 3)
96
+ """
97
+ return map_positional_encoding(v, self.freq_bands)
98
+
99
+ class BaseTemperalPointModel(nn.Module):
100
+ """ A base class providing useful methods for point cloud processing. """
101
+
102
+ def __init__(
103
+ self,
104
+ *,
105
+ num_classes,
106
+ embed_dim,
107
+ extra_feature_channels,
108
+ dim: int = 768,
109
+ num_layers: int = 6
110
+ ):
111
+ super().__init__()
112
+
113
+ self.extra_feature_channels = extra_feature_channels
114
+ self.timestep_embed_dim = 256
115
+ self.output_dim = num_classes
116
+ self.dim = dim
117
+ self.num_layers = num_layers
118
+
119
+
120
+ self.time_mlp = nn.Sequential(
121
+ SinusoidalPosEmb(dim),
122
+ nn.Linear(dim, self.timestep_embed_dim ),
123
+ nn.SiLU(),
124
+ nn.Linear(self.timestep_embed_dim , self.timestep_embed_dim )
125
+ )
126
+
127
+ self.positional_encoding = PositionalEncoding(i_dim=3, N_freqs=10)
128
+ positional_encoding_d_out = 3 + (2 * 10) * 3
129
+
130
+ # Input projection (point coords, point coord encodings, other features, and timestep embeddings)
131
+
132
+ self.input_projection = nn.Linear(
133
+ in_features=(3 + positional_encoding_d_out),
134
+ out_features=self.dim
135
+ )#b f p c
136
+
137
+ # Transformer layers
138
+ self.layers = self.get_layers()
139
+
140
+ # Output projection
141
+ self.output_projection = nn.Linear(self.dim, self.output_dim)
142
+ def get_layers(self):
143
+ raise NotImplementedError('This method should be implemented by subclasses')
144
+
145
+ def forward(self, inputs: torch.Tensor, t: torch.Tensor):
146
+ raise NotImplementedError('This method should be implemented by subclasses')
difpoint/src/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Author : wenshao
3
+ # @Email : [email protected]
4
+ # @Project : FasterLivePortrait
5
+ # @FileName: __init__.py.py
difpoint/src/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (144 Bytes). View file
 
difpoint/src/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (157 Bytes). View file
 
difpoint/src/__pycache__/live_portrait_pipeline.cpython-310.pyc ADDED
Binary file (8.08 kB). View file
 
difpoint/src/__pycache__/live_portrait_wrapper.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
difpoint/src/config/__init__.py ADDED
File without changes
difpoint/src/config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (137 Bytes). View file
 
difpoint/src/config/__pycache__/argument_config.cpython-310.pyc ADDED
Binary file (1.82 kB). View file
 
difpoint/src/config/__pycache__/base_config.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
difpoint/src/config/__pycache__/crop_config.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
difpoint/src/config/__pycache__/inference_config.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
difpoint/src/config/argument_config.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ All configs for user
5
+ """
6
+
7
+ from dataclasses import dataclass
8
+ import tyro
9
+ from typing_extensions import Annotated
10
+ from typing import Optional
11
+ from .base_config import PrintableConfig, make_abs_path
12
+
13
+
14
+ @dataclass(repr=False) # use repr from PrintableConfig
15
+ class ArgumentConfig(PrintableConfig):
16
+ ########## input arguments ##########
17
+ source_image: Annotated[str, tyro.conf.arg(aliases=["-s"])] = make_abs_path('../../assets/examples/source/s6.jpg') # path to the source portrait
18
+ driving_info: Annotated[str, tyro.conf.arg(aliases=["-d"])] = make_abs_path('../../assets/examples/driving/d12.mp4') # path to driving video or template (.pkl format)
19
+ output_dir: Annotated[str, tyro.conf.arg(aliases=["-o"])] = 'animations/' # directory to save output video
20
+
21
+ ########## inference arguments ##########
22
+ flag_use_half_precision: bool = False # whether to use half precision (FP16). If black boxes appear, it might be due to GPU incompatibility; set to False.
23
+ flag_crop_driving_video: bool = False # whether to crop the driving video, if the given driving info is a video
24
+ device_id: int = 0 # gpu device id
25
+ flag_force_cpu: bool = False # force cpu inference, WIP!
26
+ flag_lip_zero: bool = False # whether let the lip to close state before animation, only take effect when flag_eye_retargeting and flag_lip_retargeting is False
27
+ flag_eye_retargeting: bool = False # not recommend to be True, WIP
28
+ flag_lip_retargeting: bool = False # not recommend to be True, WIP
29
+ flag_stitching: bool = False # recommend to True if head movement is small, False if head movement is large
30
+ flag_relative_motion: bool = False # whether to use relative motion
31
+ flag_pasteback: bool = False # whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space
32
+ flag_do_crop: bool = False # whether to crop the source portrait to the face-cropping space
33
+ flag_do_rot: bool = False # whether to conduct the rotation when flag_do_crop is True
34
+
35
+ ########## crop arguments ##########
36
+ scale: float = 2.3 # the ratio of face area is smaller if scale is larger
37
+ vx_ratio: float = 0 # the ratio to move the face to left or right in cropping space
38
+ vy_ratio: float = -0.125 # the ratio to move the face to up or down in cropping space
39
+
40
+ scale_crop_video: float = 2.2 # scale factor for cropping video
41
+ vx_ratio_crop_video: float = 0. # adjust y offset
42
+ vy_ratio_crop_video: float = -0.1 # adjust x offset
43
+
44
+ ########## gradio arguments ##########
45
+ server_port: Annotated[int, tyro.conf.arg(aliases=["-p"])] = 8890 # port for gradio server
46
+ share: bool = False # whether to share the server to public
47
+ server_name: Optional[str] = "127.0.0.1" # set the local server name, "0.0.0.0" to broadcast all
48
+ flag_do_torch_compile: bool = False # whether to use torch.compile to accelerate generation
difpoint/src/config/base_config.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ pretty printing class
5
+ """
6
+
7
+ from __future__ import annotations
8
+ import os.path as osp
9
+ from typing import Tuple
10
+
11
+
12
+ def make_abs_path(fn):
13
+ return osp.join(osp.dirname(osp.realpath(__file__)), fn)
14
+
15
+
16
+ class PrintableConfig: # pylint: disable=too-few-public-methods
17
+ """Printable Config defining str function"""
18
+
19
+ def __repr__(self):
20
+ lines = [self.__class__.__name__ + ":"]
21
+ for key, val in vars(self).items():
22
+ if isinstance(val, Tuple):
23
+ flattened_val = "["
24
+ for item in val:
25
+ flattened_val += str(item) + "\n"
26
+ flattened_val = flattened_val.rstrip("\n")
27
+ val = flattened_val + "]"
28
+ lines += f"{key}: {str(val)}".split("\n")
29
+ return "\n ".join(lines)
difpoint/src/config/crop_config.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ parameters used for crop faces
5
+ """
6
+
7
+ from dataclasses import dataclass
8
+
9
+ from .base_config import PrintableConfig
10
+
11
+
12
+ @dataclass(repr=False) # use repr from PrintableConfig
13
+ class CropConfig(PrintableConfig):
14
+ insightface_root: str = "./downloaded_repo/pretrained_weights/insightface"
15
+ landmark_ckpt_path: str = "./downloaded_repo/pretrained_weights/liveportrait/landmark.onnx"
16
+ device_id: int = 0 # gpu device id
17
+ flag_force_cpu: bool = False # force cpu inference, WIP
18
+ ########## source image cropping option ##########
19
+ dsize: int = 512 # crop size
20
+ scale: float = 2.0 # scale factor
21
+ vx_ratio: float = 0 # vx ratio
22
+ vy_ratio: float = -0.125 # vy ratio +up, -down
23
+ max_face_num: int = 0 # max face number, 0 mean no limit
24
+
25
+ ########## driving video auto cropping option ##########
26
+ scale_crop_video: float = 2.2 # 2.0 # scale factor for cropping video
27
+ vx_ratio_crop_video: float = 0.0 # adjust y offset
28
+ vy_ratio_crop_video: float = -0.1 # adjust x offset
29
+ direction: str = "large-small" # direction of cropping
difpoint/src/config/inference_config.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+
3
+ """
4
+ config dataclass used for inference
5
+ """
6
+
7
+ import os.path as osp
8
+ import cv2
9
+ from numpy import ndarray
10
+ from dataclasses import dataclass
11
+ from typing import Literal, Tuple
12
+ from .base_config import PrintableConfig, make_abs_path
13
+
14
+
15
+ @dataclass(repr=False) # use repr from PrintableConfig
16
+ class InferenceConfig(PrintableConfig):
17
+ # MODEL CONFIG, NOT EXPORTED PARAMS
18
+ models_config: str = make_abs_path('./models.yaml') # portrait animation config
19
+ checkpoint_F: str = './downloaded_repo/pretrained_weights/liveportrait/base_models/appearance_feature_extractor.pth' # path to checkpoint of F
20
+ checkpoint_M: str = './downloaded_repo/pretrained_weights/liveportrait/base_models/motion_extractor.pth' # path to checkpoint pf M
21
+ checkpoint_G: str = './downloaded_repo/pretrained_weights/liveportrait/base_models/spade_generator.pth' # path to checkpoint of G
22
+ checkpoint_W: str = './downloaded_repo/pretrained_weights/liveportrait/base_models/warping_module.pth' # path to checkpoint of W
23
+ checkpoint_S: str = './downloaded_repo/pretrained_weights/liveportrait/base_models/retargeting_models/stitching_retargeting_module.pth' # path to checkpoint to S and R_eyes, R_lip
24
+
25
+ # EXPORTED PARAMS
26
+ flag_use_half_precision: bool = True
27
+ flag_crop_driving_video: bool = False
28
+ device_id: int = 0
29
+ flag_lip_zero: bool = False
30
+ flag_eye_retargeting: bool = False
31
+ flag_lip_retargeting: bool = False
32
+ flag_stitching: bool = False
33
+ flag_relative_motion: bool = False
34
+ flag_pasteback: bool = False
35
+ flag_do_crop: bool = False
36
+ flag_do_rot: bool = False
37
+ flag_force_cpu: bool = False
38
+ flag_do_torch_compile: bool = False
39
+
40
+ # NOT EXPORTED PARAMS
41
+ lip_zero_threshold: float = 0.03 # threshold for flag_lip_zero
42
+ anchor_frame: int = 0 # TO IMPLEMENT
43
+
44
+ input_shape: Tuple[int, int] = (256, 256) # input shape
45
+ output_format: Literal['mp4', 'gif'] = 'mp4' # output video format
46
+ crf: int = 15 # crf for output video
47
+ output_fps: int = 25 # default output fps
48
+
49
+ mask_crop: ndarray = cv2.imread(make_abs_path('../utils/resources/mask_template.png'), cv2.IMREAD_COLOR)
50
+ size_gif: int = 256 # default gif size, TO IMPLEMENT
51
+ source_max_dim: int = 1280 # the max dim of height and width of source image
52
+ source_division: int = 2 # make sure the height and width of source image can be divided by this number