Spaces:
Running
on
Zero
Running
on
Zero
Upload 5 files
Browse files
difpoint/configs/onnx_infer.yaml
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
models:
|
2 |
+
warping_spade:
|
3 |
+
name: "WarpingSpadeModel"
|
4 |
+
predict_type: "ort"
|
5 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/warping_spade.onnx"
|
6 |
+
motion_extractor:
|
7 |
+
name: "MotionExtractorModel"
|
8 |
+
predict_type: "ort"
|
9 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/motion_extractor.onnx"
|
10 |
+
landmark:
|
11 |
+
name: "LandmarkModel"
|
12 |
+
predict_type: "ort"
|
13 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/landmark.onnx"
|
14 |
+
face_analysis:
|
15 |
+
name: "FaceAnalysisModel"
|
16 |
+
predict_type: "ort"
|
17 |
+
model_path:
|
18 |
+
- "./difpoint/checkpoints/liveportrait_onnx/retinaface_det_static.onnx"
|
19 |
+
- "./difpoint/checkpoints/liveportrait_onnx/face_2dpose_106_static.onnx"
|
20 |
+
app_feat_extractor:
|
21 |
+
name: "AppearanceFeatureExtractorModel"
|
22 |
+
predict_type: "ort"
|
23 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/appearance_feature_extractor.onnx"
|
24 |
+
stitching:
|
25 |
+
name: "StitchingModel"
|
26 |
+
predict_type: "ort"
|
27 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching.onnx"
|
28 |
+
stitching_eye_retarget:
|
29 |
+
name: "StitchingModel"
|
30 |
+
predict_type: "ort"
|
31 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching_eye.onnx"
|
32 |
+
stitching_lip_retarget:
|
33 |
+
name: "StitchingModel"
|
34 |
+
predict_type: "ort"
|
35 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching_lip.onnx"
|
36 |
+
|
37 |
+
animal_models:
|
38 |
+
warping_spade:
|
39 |
+
name: "WarpingSpadeModel"
|
40 |
+
predict_type: "ort"
|
41 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/warping_spade.onnx"
|
42 |
+
motion_extractor:
|
43 |
+
name: "MotionExtractorModel"
|
44 |
+
predict_type: "ort"
|
45 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/motion_extractor.onnx"
|
46 |
+
app_feat_extractor:
|
47 |
+
name: "AppearanceFeatureExtractorModel"
|
48 |
+
predict_type: "ort"
|
49 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/appearance_feature_extractor.onnx"
|
50 |
+
stitching:
|
51 |
+
name: "StitchingModel"
|
52 |
+
predict_type: "ort"
|
53 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching.onnx"
|
54 |
+
stitching_eye_retarget:
|
55 |
+
name: "StitchingModel"
|
56 |
+
predict_type: "ort"
|
57 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching_eye.onnx"
|
58 |
+
stitching_lip_retarget:
|
59 |
+
name: "StitchingModel"
|
60 |
+
predict_type: "ort"
|
61 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching_lip.onnx"
|
62 |
+
landmark:
|
63 |
+
name: "LandmarkModel"
|
64 |
+
predict_type: "ort"
|
65 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/landmark.onnx"
|
66 |
+
face_analysis:
|
67 |
+
name: "FaceAnalysisModel"
|
68 |
+
predict_type: "ort"
|
69 |
+
model_path:
|
70 |
+
- "./difpoint/checkpoints/liveportrait_onnx/retinaface_det_static.onnx"
|
71 |
+
- "./difpoint/checkpoints/liveportrait_onnx/face_2dpose_106_static.onnx"
|
72 |
+
|
73 |
+
crop_params:
|
74 |
+
src_dsize: 512
|
75 |
+
src_scale: 2.3
|
76 |
+
src_vx_ratio: 0.0
|
77 |
+
src_vy_ratio: -0.125
|
78 |
+
dri_scale: 2.2
|
79 |
+
dri_vx_ratio: 0.0
|
80 |
+
dri_vy_ratio: -0.1
|
81 |
+
|
82 |
+
|
83 |
+
infer_params:
|
84 |
+
flag_crop_driving_video: False
|
85 |
+
flag_normalize_lip: True
|
86 |
+
flag_source_video_eye_retargeting: False
|
87 |
+
flag_video_editing_head_rotation: False
|
88 |
+
flag_eye_retargeting: False
|
89 |
+
flag_lip_retargeting: False
|
90 |
+
flag_stitching: True
|
91 |
+
flag_relative_motion: True
|
92 |
+
flag_pasteback: True
|
93 |
+
flag_do_crop: True
|
94 |
+
flag_do_rot: True
|
95 |
+
|
96 |
+
# NOT EXPOERTED PARAMS
|
97 |
+
lip_normalize_threshold: 0.03 # threshold for flag_normalize_lip
|
98 |
+
source_video_eye_retargeting_threshold: 0.18 # threshold for eyes retargeting if the input is a source video
|
99 |
+
driving_smooth_observation_variance: 1e-7 # smooth strength scalar for the animated video when the input is a source video, the larger the number, the smoother the animated video; too much smoothness would result in loss of motion accuracy
|
100 |
+
anchor_frame: 0 # TO IMPLEMENT
|
101 |
+
mask_crop_path: "./assets/mask_template.png"
|
102 |
+
driving_multiplier: 1.0
|
103 |
+
|
104 |
+
source_max_dim: 1280 # the max dim of height and width of source image
|
105 |
+
source_division: 2 # make sure the height and width of source image can be divided by this number
|
difpoint/configs/onnx_mp_infer.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
models:
|
2 |
+
warping_spade:
|
3 |
+
name: "WarpingSpadeModel"
|
4 |
+
predict_type: "ort"
|
5 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/warping_spade.onnx"
|
6 |
+
motion_extractor:
|
7 |
+
name: "MotionExtractorModel"
|
8 |
+
predict_type: "ort"
|
9 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/motion_extractor.onnx"
|
10 |
+
landmark:
|
11 |
+
name: "LandmarkModel"
|
12 |
+
predict_type: "ort"
|
13 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/landmark.onnx"
|
14 |
+
face_analysis:
|
15 |
+
name: "MediaPipeFaceModel"
|
16 |
+
predict_type: "mp"
|
17 |
+
app_feat_extractor:
|
18 |
+
name: "AppearanceFeatureExtractorModel"
|
19 |
+
predict_type: "ort"
|
20 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/appearance_feature_extractor.onnx"
|
21 |
+
stitching:
|
22 |
+
name: "StitchingModel"
|
23 |
+
predict_type: "ort"
|
24 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching.onnx"
|
25 |
+
stitching_eye_retarget:
|
26 |
+
name: "StitchingModel"
|
27 |
+
predict_type: "ort"
|
28 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching_eye.onnx"
|
29 |
+
stitching_lip_retarget:
|
30 |
+
name: "StitchingModel"
|
31 |
+
predict_type: "ort"
|
32 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching_lip.onnx"
|
33 |
+
|
34 |
+
animal_models:
|
35 |
+
warping_spade:
|
36 |
+
name: "WarpingSpadeModel"
|
37 |
+
predict_type: "ort"
|
38 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/warping_spade.onnx"
|
39 |
+
motion_extractor:
|
40 |
+
name: "MotionExtractorModel"
|
41 |
+
predict_type: "ort"
|
42 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/motion_extractor.onnx"
|
43 |
+
app_feat_extractor:
|
44 |
+
name: "AppearanceFeatureExtractorModel"
|
45 |
+
predict_type: "ort"
|
46 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/appearance_feature_extractor.onnx"
|
47 |
+
stitching:
|
48 |
+
name: "StitchingModel"
|
49 |
+
predict_type: "ort"
|
50 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching.onnx"
|
51 |
+
stitching_eye_retarget:
|
52 |
+
name: "StitchingModel"
|
53 |
+
predict_type: "ort"
|
54 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching_eye.onnx"
|
55 |
+
stitching_lip_retarget:
|
56 |
+
name: "StitchingModel"
|
57 |
+
predict_type: "ort"
|
58 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching_lip.onnx"
|
59 |
+
landmark:
|
60 |
+
name: "LandmarkModel"
|
61 |
+
predict_type: "ort"
|
62 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/landmark.onnx"
|
63 |
+
face_analysis:
|
64 |
+
name: "MediaPipeFaceModel"
|
65 |
+
predict_type: "mp"
|
66 |
+
|
67 |
+
crop_params:
|
68 |
+
src_dsize: 512
|
69 |
+
src_scale: 2.3
|
70 |
+
src_vx_ratio: 0.0
|
71 |
+
src_vy_ratio: -0.125
|
72 |
+
dri_scale: 2.2
|
73 |
+
dri_vx_ratio: 0.0
|
74 |
+
dri_vy_ratio: -0.1
|
75 |
+
|
76 |
+
|
77 |
+
infer_params:
|
78 |
+
flag_crop_driving_video: False
|
79 |
+
flag_normalize_lip: True
|
80 |
+
flag_source_video_eye_retargeting: False
|
81 |
+
flag_video_editing_head_rotation: False
|
82 |
+
flag_eye_retargeting: False
|
83 |
+
flag_lip_retargeting: False
|
84 |
+
flag_stitching: True
|
85 |
+
flag_relative_motion: True
|
86 |
+
flag_pasteback: True
|
87 |
+
flag_do_crop: True
|
88 |
+
flag_do_rot: True
|
89 |
+
|
90 |
+
# NOT EXPOERTED PARAMS
|
91 |
+
lip_normalize_threshold: 0.03 # threshold for flag_normalize_lip
|
92 |
+
source_video_eye_retargeting_threshold: 0.18 # threshold for eyes retargeting if the input is a source video
|
93 |
+
driving_smooth_observation_variance: 1e-7 # smooth strength scalar for the animated video when the input is a source video, the larger the number, the smoother the animated video; too much smoothness would result in loss of motion accuracy
|
94 |
+
anchor_frame: 0 # TO IMPLEMENT
|
95 |
+
mask_crop_path: "./assets/mask_template.png"
|
96 |
+
driving_multiplier: 1.0
|
97 |
+
|
98 |
+
source_max_dim: 1280 # the max dim of height and width of source image
|
99 |
+
source_division: 2 # make sure the height and width of source image can be divided by this number
|
difpoint/configs/trt_infer.yaml
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
models:
|
2 |
+
warping_spade:
|
3 |
+
name: "WarpingSpadeModel"
|
4 |
+
predict_type: "trt"
|
5 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/warping_spade-fix.trt"
|
6 |
+
motion_extractor:
|
7 |
+
name: "MotionExtractorModel"
|
8 |
+
predict_type: "trt"
|
9 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/motion_extractor.trt"
|
10 |
+
landmark:
|
11 |
+
name: "LandmarkModel"
|
12 |
+
predict_type: "trt"
|
13 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/landmark.trt"
|
14 |
+
face_analysis:
|
15 |
+
name: "FaceAnalysisModel"
|
16 |
+
predict_type: "trt"
|
17 |
+
model_path:
|
18 |
+
- "./difpoint/checkpoints/liveportrait_onnx/retinaface_det_static.trt"
|
19 |
+
- "./difpoint/checkpoints/liveportrait_onnx/face_2dpose_106_static.trt"
|
20 |
+
app_feat_extractor:
|
21 |
+
name: "AppearanceFeatureExtractorModel"
|
22 |
+
predict_type: "trt"
|
23 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/appearance_feature_extractor.trt"
|
24 |
+
stitching:
|
25 |
+
name: "StitchingModel"
|
26 |
+
predict_type: "trt"
|
27 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching.trt"
|
28 |
+
stitching_eye_retarget:
|
29 |
+
name: "StitchingModel"
|
30 |
+
predict_type: "trt"
|
31 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching_eye.trt"
|
32 |
+
stitching_lip_retarget:
|
33 |
+
name: "StitchingModel"
|
34 |
+
predict_type: "trt"
|
35 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching_lip.trt"
|
36 |
+
|
37 |
+
animal_models:
|
38 |
+
warping_spade:
|
39 |
+
name: "WarpingSpadeModel"
|
40 |
+
predict_type: "trt"
|
41 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/warping_spade-fix.trt"
|
42 |
+
motion_extractor:
|
43 |
+
name: "MotionExtractorModel"
|
44 |
+
predict_type: "trt"
|
45 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/motion_extractor.trt"
|
46 |
+
app_feat_extractor:
|
47 |
+
name: "AppearanceFeatureExtractorModel"
|
48 |
+
predict_type: "trt"
|
49 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/appearance_feature_extractor.trt"
|
50 |
+
stitching:
|
51 |
+
name: "StitchingModel"
|
52 |
+
predict_type: "trt"
|
53 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching.trt"
|
54 |
+
stitching_eye_retarget:
|
55 |
+
name: "StitchingModel"
|
56 |
+
predict_type: "trt"
|
57 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching_eye.trt"
|
58 |
+
stitching_lip_retarget:
|
59 |
+
name: "StitchingModel"
|
60 |
+
predict_type: "trt"
|
61 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching_lip.trt"
|
62 |
+
landmark:
|
63 |
+
name: "LandmarkModel"
|
64 |
+
predict_type: "trt"
|
65 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/landmark.trt"
|
66 |
+
face_analysis:
|
67 |
+
name: "FaceAnalysisModel"
|
68 |
+
predict_type: "trt"
|
69 |
+
model_path:
|
70 |
+
- "./difpoint/checkpoints/liveportrait_onnx/retinaface_det_static.trt"
|
71 |
+
- "./difpoint/checkpoints/liveportrait_onnx/face_2dpose_106_static.trt"
|
72 |
+
|
73 |
+
crop_params:
|
74 |
+
src_dsize: 512
|
75 |
+
src_scale: 2.3
|
76 |
+
src_vx_ratio: 0.0
|
77 |
+
src_vy_ratio: -0.125
|
78 |
+
dri_scale: 2.2
|
79 |
+
dri_vx_ratio: 0.0
|
80 |
+
dri_vy_ratio: -0.1
|
81 |
+
|
82 |
+
|
83 |
+
infer_params:
|
84 |
+
flag_crop_driving_video: False
|
85 |
+
flag_normalize_lip: False
|
86 |
+
flag_source_video_eye_retargeting: False
|
87 |
+
flag_video_editing_head_rotation: False
|
88 |
+
flag_eye_retargeting: False
|
89 |
+
flag_lip_retargeting: False
|
90 |
+
flag_stitching: True
|
91 |
+
flag_relative_motion: True
|
92 |
+
flag_pasteback: True
|
93 |
+
flag_do_crop: True
|
94 |
+
flag_do_rot: True
|
95 |
+
|
96 |
+
# NOT EXPOERTED PARAMS
|
97 |
+
lip_normalize_threshold: 0.03 # threshold for flag_normalize_lip
|
98 |
+
source_video_eye_retargeting_threshold: 0.18 # threshold for eyes retargeting if the input is a source video
|
99 |
+
driving_smooth_observation_variance: 1e-7 # smooth strength scalar for the animated video when the input is a source video, the larger the number, the smoother the animated video; too much smoothness would result in loss of motion accuracy
|
100 |
+
anchor_frame: 0 # TO IMPLEMENT
|
101 |
+
mask_crop_path: "./assets/mask_template.png"
|
102 |
+
driving_multiplier: 1.0
|
103 |
+
|
104 |
+
source_max_dim: 1280 # the max dim of height and width of source image
|
105 |
+
source_division: 2 # make sure the height and width of source image can be divided by this number
|
difpoint/configs/trt_mp_infer.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
models:
|
2 |
+
warping_spade:
|
3 |
+
name: "WarpingSpadeModel"
|
4 |
+
predict_type: "trt"
|
5 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/warping_spade-fix.trt"
|
6 |
+
motion_extractor:
|
7 |
+
name: "MotionExtractorModel"
|
8 |
+
predict_type: "trt"
|
9 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/motion_extractor.trt"
|
10 |
+
landmark:
|
11 |
+
name: "LandmarkModel"
|
12 |
+
predict_type: "trt"
|
13 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/landmark.trt"
|
14 |
+
face_analysis:
|
15 |
+
name: "MediaPipeFaceModel"
|
16 |
+
predict_type: "mp"
|
17 |
+
app_feat_extractor:
|
18 |
+
name: "AppearanceFeatureExtractorModel"
|
19 |
+
predict_type: "trt"
|
20 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/appearance_feature_extractor.trt"
|
21 |
+
stitching:
|
22 |
+
name: "StitchingModel"
|
23 |
+
predict_type: "trt"
|
24 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching.trt"
|
25 |
+
stitching_eye_retarget:
|
26 |
+
name: "StitchingModel"
|
27 |
+
predict_type: "trt"
|
28 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching_eye.trt"
|
29 |
+
stitching_lip_retarget:
|
30 |
+
name: "StitchingModel"
|
31 |
+
predict_type: "trt"
|
32 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/stitching_lip.trt"
|
33 |
+
|
34 |
+
animal_models:
|
35 |
+
warping_spade:
|
36 |
+
name: "WarpingSpadeModel"
|
37 |
+
predict_type: "trt"
|
38 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/warping_spade-fix.trt"
|
39 |
+
motion_extractor:
|
40 |
+
name: "MotionExtractorModel"
|
41 |
+
predict_type: "trt"
|
42 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/motion_extractor.trt"
|
43 |
+
app_feat_extractor:
|
44 |
+
name: "AppearanceFeatureExtractorModel"
|
45 |
+
predict_type: "trt"
|
46 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/appearance_feature_extractor.trt"
|
47 |
+
stitching:
|
48 |
+
name: "StitchingModel"
|
49 |
+
predict_type: "trt"
|
50 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching.trt"
|
51 |
+
stitching_eye_retarget:
|
52 |
+
name: "StitchingModel"
|
53 |
+
predict_type: "trt"
|
54 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching_eye.trt"
|
55 |
+
stitching_lip_retarget:
|
56 |
+
name: "StitchingModel"
|
57 |
+
predict_type: "trt"
|
58 |
+
model_path: "./difpoint/checkpoints/liveportrait_animal_onnx/stitching_lip.trt"
|
59 |
+
landmark:
|
60 |
+
name: "LandmarkModel"
|
61 |
+
predict_type: "trt"
|
62 |
+
model_path: "./difpoint/checkpoints/liveportrait_onnx/landmark.trt"
|
63 |
+
face_analysis:
|
64 |
+
name: "MediaPipeFaceModel"
|
65 |
+
predict_type: "mp"
|
66 |
+
|
67 |
+
crop_params:
|
68 |
+
src_dsize: 512
|
69 |
+
src_scale: 2.0
|
70 |
+
src_vx_ratio: 0.0
|
71 |
+
src_vy_ratio: -0.125
|
72 |
+
dri_scale: 2.2
|
73 |
+
dri_vx_ratio: 0.0
|
74 |
+
dri_vy_ratio: -0.1
|
75 |
+
|
76 |
+
|
77 |
+
infer_params:
|
78 |
+
flag_crop_driving_video: False
|
79 |
+
flag_normalize_lip: False
|
80 |
+
flag_source_video_eye_retargeting: False
|
81 |
+
flag_video_editing_head_rotation: False
|
82 |
+
flag_eye_retargeting: False
|
83 |
+
flag_lip_retargeting: False
|
84 |
+
flag_stitching: False
|
85 |
+
flag_relative_motion: False
|
86 |
+
flag_pasteback: False
|
87 |
+
flag_do_crop: False
|
88 |
+
flag_do_rot: False
|
89 |
+
|
90 |
+
# NOT EXPOERTED PARAMS
|
91 |
+
lip_normalize_threshold: 0.03 # threshold for flag_normalize_lip
|
92 |
+
source_video_eye_retargeting_threshold: 0.18 # threshold for eyes retargeting if the input is a source video
|
93 |
+
driving_smooth_observation_variance: 1e-7 # smooth strength scalar for the animated video when the input is a source video, the larger the number, the smoother the animated video; too much smoothness would result in loss of motion accuracy
|
94 |
+
anchor_frame: 0 # TO IMPLEMENT
|
95 |
+
mask_crop_path: "./assets/mask_template.png"
|
96 |
+
driving_multiplier: 1.0
|
97 |
+
|
98 |
+
source_max_dim: 1280 # the max dim of height and width of source image
|
99 |
+
source_division: 2 # make sure the height and width of source image can be divided by this number
|
difpoint/datasets/norm_info_d6.5_c8.5_vox1_train.npz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9422e503e75df9d1bd455d8e0f9f5e2826b12956cdedbb5566097c0151bddafb
|
3 |
+
size 5580
|