Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- .github/FUNDING.yml +1 -0
- .github/preview.png +3 -0
- .github/workflows/ci.yml +58 -0
- .gitignore +6 -0
- facefusion/processors/choices.py +171 -9
- facefusion/processors/modules/age_modifier.py +254 -0
- facefusion/processors/modules/deep_swapper.py +464 -0
- facefusion/processors/modules/expression_restorer.py +298 -0
- facefusion/processors/modules/face_debugger.py +228 -0
- facefusion/processors/modules/face_editor.py +533 -0
- facefusion/processors/modules/lip_syncer.py +348 -0
- facefusion/uis/layouts/default.py +43 -43
- facefusion/uis/types.py +15 -15
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
.github/preview.png filter=lfs diff=lfs merge=lfs -text
|
.github/FUNDING.yml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
custom: [ buymeacoffee.com/facefusion, ko-fi.com/facefusion ]
|
.github/preview.png
ADDED
![]() |
Git LFS Details
|
.github/workflows/ci.yml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: ci
|
2 |
+
|
3 |
+
on: [ push, pull_request ]
|
4 |
+
|
5 |
+
jobs:
|
6 |
+
lint:
|
7 |
+
runs-on: ubuntu-latest
|
8 |
+
steps:
|
9 |
+
- name: Checkout
|
10 |
+
uses: actions/checkout@v4
|
11 |
+
- name: Set up Python 3.12
|
12 |
+
uses: actions/setup-python@v5
|
13 |
+
with:
|
14 |
+
python-version: '3.12'
|
15 |
+
- run: pip install flake8
|
16 |
+
- run: pip install flake8-import-order
|
17 |
+
- run: pip install mypy
|
18 |
+
- run: flake8 facefusion.py install.py
|
19 |
+
- run: flake8 facefusion tests
|
20 |
+
- run: mypy facefusion.py install.py
|
21 |
+
- run: mypy facefusion tests
|
22 |
+
test:
|
23 |
+
strategy:
|
24 |
+
matrix:
|
25 |
+
os: [ macos-latest, ubuntu-latest, windows-latest ]
|
26 |
+
runs-on: ${{ matrix.os }}
|
27 |
+
steps:
|
28 |
+
- name: Checkout
|
29 |
+
uses: actions/checkout@v4
|
30 |
+
- name: Set up FFmpeg
|
31 |
+
uses: AnimMouse/setup-ffmpeg@v1
|
32 |
+
- name: Set up Python 3.12
|
33 |
+
uses: actions/setup-python@v5
|
34 |
+
with:
|
35 |
+
python-version: '3.12'
|
36 |
+
- run: python install.py --onnxruntime default --skip-conda
|
37 |
+
- run: pip install pytest
|
38 |
+
- run: pytest
|
39 |
+
report:
|
40 |
+
needs: test
|
41 |
+
runs-on: ubuntu-latest
|
42 |
+
steps:
|
43 |
+
- name: Checkout
|
44 |
+
uses: actions/checkout@v4
|
45 |
+
- name: Set up FFmpeg
|
46 |
+
uses: FedericoCarboni/setup-ffmpeg@v3
|
47 |
+
- name: Set up Python 3.12
|
48 |
+
uses: actions/setup-python@v5
|
49 |
+
with:
|
50 |
+
python-version: '3.12'
|
51 |
+
- run: python install.py --onnxruntime default --skip-conda
|
52 |
+
- run: pip install coveralls
|
53 |
+
- run: pip install pytest
|
54 |
+
- run: pip install pytest-cov
|
55 |
+
- run: pytest tests --cov facefusion
|
56 |
+
- run: coveralls --service github
|
57 |
+
env:
|
58 |
+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
.gitignore
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
.assets
|
3 |
+
.caches
|
4 |
+
.jobs
|
5 |
+
.idea
|
6 |
+
.vscode
|
facefusion/processors/choices.py
CHANGED
@@ -2,7 +2,169 @@ from typing import List, Sequence
|
|
2 |
|
3 |
from facefusion.common_helper import create_float_range, create_int_range
|
4 |
from facefusion.filesystem import get_file_name, resolve_file_paths, resolve_relative_path
|
5 |
-
from facefusion.processors.types import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
custom_model_file_paths = resolve_file_paths(resolve_relative_path('../.assets/models/custom'))
|
8 |
|
@@ -12,9 +174,9 @@ if custom_model_file_paths:
|
|
12 |
model_id = '/'.join([ 'custom', get_file_name(model_file_path) ])
|
13 |
deep_swapper_models.append(model_id)
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus' ]
|
19 |
face_swapper_set : FaceSwapperSet =\
|
20 |
{
|
@@ -36,11 +198,11 @@ face_swapper_models : List[FaceSwapperModel] = list(face_swapper_set.keys())
|
|
36 |
frame_colorizer_models : List[FrameColorizerModel] = [ 'ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldify_artistic', 'deoldify_stable' ]
|
37 |
frame_colorizer_sizes : List[str] = [ '192x192', '256x256', '384x384', '512x512' ]
|
38 |
frame_enhancer_models : List[FrameEnhancerModel] = [ 'clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'real_web_photo_x4', 'realistic_rescaler_x4', 'remacri_x4', 'siax_x4', 'span_kendata_x4', 'swin2_sr_x4', 'ultra_sharp_x4', 'ultra_sharp_2_x4' ]
|
39 |
-
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
face_editor_eyebrow_direction_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
|
45 |
face_editor_eye_gaze_horizontal_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
|
46 |
face_editor_eye_gaze_vertical_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
|
@@ -59,4 +221,4 @@ face_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1)
|
|
59 |
face_enhancer_weight_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)
|
60 |
frame_colorizer_blend_range : Sequence[int] = create_int_range(0, 100, 1)
|
61 |
frame_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1)
|
62 |
-
|
|
|
2 |
|
3 |
from facefusion.common_helper import create_float_range, create_int_range
|
4 |
from facefusion.filesystem import get_file_name, resolve_file_paths, resolve_relative_path
|
5 |
+
from facefusion.processors.types import AgeModifierModel, DeepSwapperModel, ExpressionRestorerModel, FaceDebuggerItem, FaceEditorModel, FaceEnhancerModel, FaceSwapperModel, FaceSwapperSet, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel
|
6 |
+
|
7 |
+
age_modifier_models : List[AgeModifierModel] = [ 'styleganex_age' ]
|
8 |
+
deep_swapper_models : List[DeepSwapperModel] =\
|
9 |
+
[
|
10 |
+
'druuzil/adam_levine_320',
|
11 |
+
'druuzil/adrianne_palicki_384',
|
12 |
+
'druuzil/agnetha_falskog_224',
|
13 |
+
'druuzil/alan_ritchson_320',
|
14 |
+
'druuzil/alicia_vikander_320',
|
15 |
+
'druuzil/amber_midthunder_320',
|
16 |
+
'druuzil/andras_arato_384',
|
17 |
+
'druuzil/andrew_tate_320',
|
18 |
+
'druuzil/angelina_jolie_384',
|
19 |
+
'druuzil/anne_hathaway_320',
|
20 |
+
'druuzil/anya_chalotra_320',
|
21 |
+
'druuzil/arnold_schwarzenegger_320',
|
22 |
+
'druuzil/benjamin_affleck_320',
|
23 |
+
'druuzil/benjamin_stiller_384',
|
24 |
+
'druuzil/bradley_pitt_224',
|
25 |
+
'druuzil/brie_larson_384',
|
26 |
+
'druuzil/bruce_campbell_384',
|
27 |
+
'druuzil/bryan_cranston_320',
|
28 |
+
'druuzil/catherine_blanchett_352',
|
29 |
+
'druuzil/christian_bale_320',
|
30 |
+
'druuzil/christopher_hemsworth_320',
|
31 |
+
'druuzil/christoph_waltz_384',
|
32 |
+
'druuzil/cillian_murphy_320',
|
33 |
+
'druuzil/cobie_smulders_256',
|
34 |
+
'druuzil/dwayne_johnson_384',
|
35 |
+
'druuzil/edward_norton_320',
|
36 |
+
'druuzil/elisabeth_shue_320',
|
37 |
+
'druuzil/elizabeth_olsen_384',
|
38 |
+
'druuzil/elon_musk_320',
|
39 |
+
'druuzil/emily_blunt_320',
|
40 |
+
'druuzil/emma_stone_384',
|
41 |
+
'druuzil/emma_watson_320',
|
42 |
+
'druuzil/erin_moriarty_384',
|
43 |
+
'druuzil/eva_green_320',
|
44 |
+
'druuzil/ewan_mcgregor_320',
|
45 |
+
'druuzil/florence_pugh_320',
|
46 |
+
'druuzil/freya_allan_320',
|
47 |
+
'druuzil/gary_cole_224',
|
48 |
+
'druuzil/gigi_hadid_224',
|
49 |
+
'druuzil/harrison_ford_384',
|
50 |
+
'druuzil/hayden_christensen_320',
|
51 |
+
'druuzil/heath_ledger_320',
|
52 |
+
'druuzil/henry_cavill_448',
|
53 |
+
'druuzil/hugh_jackman_384',
|
54 |
+
'druuzil/idris_elba_320',
|
55 |
+
'druuzil/jack_nicholson_320',
|
56 |
+
'druuzil/james_carrey_384',
|
57 |
+
'druuzil/james_mcavoy_320',
|
58 |
+
'druuzil/james_varney_320',
|
59 |
+
'druuzil/jason_momoa_320',
|
60 |
+
'druuzil/jason_statham_320',
|
61 |
+
'druuzil/jennifer_connelly_384',
|
62 |
+
'druuzil/jimmy_donaldson_320',
|
63 |
+
'druuzil/jordan_peterson_384',
|
64 |
+
'druuzil/karl_urban_224',
|
65 |
+
'druuzil/kate_beckinsale_384',
|
66 |
+
'druuzil/laurence_fishburne_384',
|
67 |
+
'druuzil/lili_reinhart_320',
|
68 |
+
'druuzil/luke_evans_384',
|
69 |
+
'druuzil/mads_mikkelsen_384',
|
70 |
+
'druuzil/mary_winstead_320',
|
71 |
+
'druuzil/margaret_qualley_384',
|
72 |
+
'druuzil/melina_juergens_320',
|
73 |
+
'druuzil/michael_fassbender_320',
|
74 |
+
'druuzil/michael_fox_320',
|
75 |
+
'druuzil/millie_bobby_brown_320',
|
76 |
+
'druuzil/morgan_freeman_320',
|
77 |
+
'druuzil/patrick_stewart_224',
|
78 |
+
'druuzil/rachel_weisz_384',
|
79 |
+
'druuzil/rebecca_ferguson_320',
|
80 |
+
'druuzil/scarlett_johansson_320',
|
81 |
+
'druuzil/shannen_doherty_384',
|
82 |
+
'druuzil/seth_macfarlane_384',
|
83 |
+
'druuzil/thomas_cruise_320',
|
84 |
+
'druuzil/thomas_hanks_384',
|
85 |
+
'druuzil/william_murray_384',
|
86 |
+
'druuzil/zoe_saldana_384',
|
87 |
+
'edel/emma_roberts_224',
|
88 |
+
'edel/ivanka_trump_224',
|
89 |
+
'edel/lize_dzjabrailova_224',
|
90 |
+
'edel/sidney_sweeney_224',
|
91 |
+
'edel/winona_ryder_224',
|
92 |
+
'iperov/alexandra_daddario_224',
|
93 |
+
'iperov/alexei_navalny_224',
|
94 |
+
'iperov/amber_heard_224',
|
95 |
+
'iperov/dilraba_dilmurat_224',
|
96 |
+
'iperov/elon_musk_224',
|
97 |
+
'iperov/emilia_clarke_224',
|
98 |
+
'iperov/emma_watson_224',
|
99 |
+
'iperov/erin_moriarty_224',
|
100 |
+
'iperov/jackie_chan_224',
|
101 |
+
'iperov/james_carrey_224',
|
102 |
+
'iperov/jason_statham_320',
|
103 |
+
'iperov/keanu_reeves_320',
|
104 |
+
'iperov/margot_robbie_224',
|
105 |
+
'iperov/natalie_dormer_224',
|
106 |
+
'iperov/nicolas_coppola_224',
|
107 |
+
'iperov/robert_downey_224',
|
108 |
+
'iperov/rowan_atkinson_224',
|
109 |
+
'iperov/ryan_reynolds_224',
|
110 |
+
'iperov/scarlett_johansson_224',
|
111 |
+
'iperov/sylvester_stallone_224',
|
112 |
+
'iperov/thomas_cruise_224',
|
113 |
+
'iperov/thomas_holland_224',
|
114 |
+
'iperov/vin_diesel_224',
|
115 |
+
'iperov/vladimir_putin_224',
|
116 |
+
'jen/angelica_trae_288',
|
117 |
+
'jen/ella_freya_224',
|
118 |
+
'jen/emma_myers_320',
|
119 |
+
'jen/evie_pickerill_224',
|
120 |
+
'jen/kang_hyewon_320',
|
121 |
+
'jen/maddie_mead_224',
|
122 |
+
'jen/nicole_turnbull_288',
|
123 |
+
'mats/alica_schmidt_320',
|
124 |
+
'mats/ashley_alexiss_224',
|
125 |
+
'mats/billie_eilish_224',
|
126 |
+
'mats/brie_larson_224',
|
127 |
+
'mats/cara_delevingne_224',
|
128 |
+
'mats/carolin_kebekus_224',
|
129 |
+
'mats/chelsea_clinton_224',
|
130 |
+
'mats/claire_boucher_224',
|
131 |
+
'mats/corinna_kopf_224',
|
132 |
+
'mats/florence_pugh_224',
|
133 |
+
'mats/hillary_clinton_224',
|
134 |
+
'mats/jenna_fischer_224',
|
135 |
+
'mats/kim_jisoo_320',
|
136 |
+
'mats/mica_suarez_320',
|
137 |
+
'mats/shailene_woodley_224',
|
138 |
+
'mats/shraddha_kapoor_320',
|
139 |
+
'mats/yu_jimin_352',
|
140 |
+
'rumateus/alison_brie_224',
|
141 |
+
'rumateus/amber_heard_224',
|
142 |
+
'rumateus/angelina_jolie_224',
|
143 |
+
'rumateus/aubrey_plaza_224',
|
144 |
+
'rumateus/bridget_regan_224',
|
145 |
+
'rumateus/cobie_smulders_224',
|
146 |
+
'rumateus/deborah_woll_224',
|
147 |
+
'rumateus/dua_lipa_224',
|
148 |
+
'rumateus/emma_stone_224',
|
149 |
+
'rumateus/hailee_steinfeld_224',
|
150 |
+
'rumateus/hilary_duff_224',
|
151 |
+
'rumateus/jessica_alba_224',
|
152 |
+
'rumateus/jessica_biel_224',
|
153 |
+
'rumateus/john_cena_224',
|
154 |
+
'rumateus/kim_kardashian_224',
|
155 |
+
'rumateus/kristen_bell_224',
|
156 |
+
'rumateus/lucy_liu_224',
|
157 |
+
'rumateus/margot_robbie_224',
|
158 |
+
'rumateus/megan_fox_224',
|
159 |
+
'rumateus/meghan_markle_224',
|
160 |
+
'rumateus/millie_bobby_brown_224',
|
161 |
+
'rumateus/natalie_portman_224',
|
162 |
+
'rumateus/nicki_minaj_224',
|
163 |
+
'rumateus/olivia_wilde_224',
|
164 |
+
'rumateus/shay_mitchell_224',
|
165 |
+
'rumateus/sophie_turner_224',
|
166 |
+
'rumateus/taylor_swift_224'
|
167 |
+
]
|
168 |
|
169 |
custom_model_file_paths = resolve_file_paths(resolve_relative_path('../.assets/models/custom'))
|
170 |
|
|
|
174 |
model_id = '/'.join([ 'custom', get_file_name(model_file_path) ])
|
175 |
deep_swapper_models.append(model_id)
|
176 |
|
177 |
+
expression_restorer_models : List[ExpressionRestorerModel] = [ 'live_portrait' ]
|
178 |
+
face_debugger_items : List[FaceDebuggerItem] = [ 'bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender', 'race' ]
|
179 |
+
face_editor_models : List[FaceEditorModel] = [ 'live_portrait' ]
|
180 |
face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus' ]
|
181 |
face_swapper_set : FaceSwapperSet =\
|
182 |
{
|
|
|
198 |
frame_colorizer_models : List[FrameColorizerModel] = [ 'ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldify_artistic', 'deoldify_stable' ]
|
199 |
frame_colorizer_sizes : List[str] = [ '192x192', '256x256', '384x384', '512x512' ]
|
200 |
frame_enhancer_models : List[FrameEnhancerModel] = [ 'clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'real_web_photo_x4', 'realistic_rescaler_x4', 'remacri_x4', 'siax_x4', 'span_kendata_x4', 'swin2_sr_x4', 'ultra_sharp_x4', 'ultra_sharp_2_x4' ]
|
201 |
+
lip_syncer_models : List[LipSyncerModel] = [ 'edtalk_256', 'wav2lip_96', 'wav2lip_gan_96' ]
|
202 |
|
203 |
+
age_modifier_direction_range : Sequence[int] = create_int_range(-100, 100, 1)
|
204 |
+
deep_swapper_morph_range : Sequence[int] = create_int_range(0, 100, 1)
|
205 |
+
expression_restorer_factor_range : Sequence[int] = create_int_range(0, 100, 1)
|
206 |
face_editor_eyebrow_direction_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
|
207 |
face_editor_eye_gaze_horizontal_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
|
208 |
face_editor_eye_gaze_vertical_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
|
|
|
221 |
face_enhancer_weight_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)
|
222 |
frame_colorizer_blend_range : Sequence[int] = create_int_range(0, 100, 1)
|
223 |
frame_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1)
|
224 |
+
lip_syncer_weight_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05)
|
facefusion/processors/modules/age_modifier.py
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from argparse import ArgumentParser
|
2 |
+
from functools import lru_cache
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
import cv2
|
6 |
+
import numpy
|
7 |
+
|
8 |
+
import facefusion.choices
|
9 |
+
import facefusion.jobs.job_manager
|
10 |
+
import facefusion.jobs.job_store
|
11 |
+
import facefusion.processors.core as processors
|
12 |
+
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, video_manager, wording
|
13 |
+
from facefusion.common_helper import create_int_metavar
|
14 |
+
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
|
15 |
+
from facefusion.execution import has_execution_provider
|
16 |
+
from facefusion.face_analyser import get_many_faces, get_one_face
|
17 |
+
from facefusion.face_helper import merge_matrix, paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5
|
18 |
+
from facefusion.face_masker import create_box_mask, create_occlusion_mask
|
19 |
+
from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
|
20 |
+
from facefusion.face_store import get_reference_faces
|
21 |
+
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
|
22 |
+
from facefusion.processors import choices as processors_choices
|
23 |
+
from facefusion.processors.types import AgeModifierDirection, AgeModifierInputs
|
24 |
+
from facefusion.program_helper import find_argument_group
|
25 |
+
from facefusion.thread_helper import thread_semaphore
|
26 |
+
from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
|
27 |
+
from facefusion.vision import match_frame_color, read_image, read_static_image, write_image
|
28 |
+
|
29 |
+
|
30 |
+
@lru_cache(maxsize = None)
|
31 |
+
def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
|
32 |
+
return\
|
33 |
+
{
|
34 |
+
'styleganex_age':
|
35 |
+
{
|
36 |
+
'hashes':
|
37 |
+
{
|
38 |
+
'age_modifier':
|
39 |
+
{
|
40 |
+
'url': resolve_download_url('models-3.1.0', 'styleganex_age.hash'),
|
41 |
+
'path': resolve_relative_path('../.assets/models/styleganex_age.hash')
|
42 |
+
}
|
43 |
+
},
|
44 |
+
'sources':
|
45 |
+
{
|
46 |
+
'age_modifier':
|
47 |
+
{
|
48 |
+
'url': resolve_download_url('models-3.1.0', 'styleganex_age.onnx'),
|
49 |
+
'path': resolve_relative_path('../.assets/models/styleganex_age.onnx')
|
50 |
+
}
|
51 |
+
},
|
52 |
+
'templates':
|
53 |
+
{
|
54 |
+
'target': 'ffhq_512',
|
55 |
+
'target_with_background': 'styleganex_384'
|
56 |
+
},
|
57 |
+
'sizes':
|
58 |
+
{
|
59 |
+
'target': (256, 256),
|
60 |
+
'target_with_background': (384, 384)
|
61 |
+
}
|
62 |
+
}
|
63 |
+
}
|
64 |
+
|
65 |
+
|
66 |
+
def get_inference_pool() -> InferencePool:
|
67 |
+
model_names = [ state_manager.get_item('age_modifier_model') ]
|
68 |
+
model_source_set = get_model_options().get('sources')
|
69 |
+
|
70 |
+
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
|
71 |
+
|
72 |
+
|
73 |
+
def clear_inference_pool() -> None:
|
74 |
+
model_names = [ state_manager.get_item('age_modifier_model') ]
|
75 |
+
inference_manager.clear_inference_pool(__name__, model_names)
|
76 |
+
|
77 |
+
|
78 |
+
def get_model_options() -> ModelOptions:
|
79 |
+
model_name = state_manager.get_item('age_modifier_model')
|
80 |
+
return create_static_model_set('full').get(model_name)
|
81 |
+
|
82 |
+
|
83 |
+
def register_args(program : ArgumentParser) -> None:
|
84 |
+
group_processors = find_argument_group(program, 'processors')
|
85 |
+
if group_processors:
|
86 |
+
group_processors.add_argument('--age-modifier-model', help = wording.get('help.age_modifier_model'), default = config.get_str_value('processors', 'age_modifier_model', 'styleganex_age'), choices = processors_choices.age_modifier_models)
|
87 |
+
group_processors.add_argument('--age-modifier-direction', help = wording.get('help.age_modifier_direction'), type = int, default = config.get_int_value('processors', 'age_modifier_direction', '0'), choices = processors_choices.age_modifier_direction_range, metavar = create_int_metavar(processors_choices.age_modifier_direction_range))
|
88 |
+
facefusion.jobs.job_store.register_step_keys([ 'age_modifier_model', 'age_modifier_direction' ])
|
89 |
+
|
90 |
+
|
91 |
+
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
|
92 |
+
apply_state_item('age_modifier_model', args.get('age_modifier_model'))
|
93 |
+
apply_state_item('age_modifier_direction', args.get('age_modifier_direction'))
|
94 |
+
|
95 |
+
|
96 |
+
def pre_check() -> bool:
|
97 |
+
model_hash_set = get_model_options().get('hashes')
|
98 |
+
model_source_set = get_model_options().get('sources')
|
99 |
+
|
100 |
+
return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
|
101 |
+
|
102 |
+
|
103 |
+
def pre_process(mode : ProcessMode) -> bool:
|
104 |
+
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
|
105 |
+
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
|
106 |
+
return False
|
107 |
+
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
|
108 |
+
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
|
109 |
+
return False
|
110 |
+
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
|
111 |
+
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
|
112 |
+
return False
|
113 |
+
return True
|
114 |
+
|
115 |
+
|
116 |
+
def post_process() -> None:
|
117 |
+
read_static_image.cache_clear()
|
118 |
+
video_manager.clear_video_pool()
|
119 |
+
if state_manager.get_item('video_memory_strategy') in [ 'strict', 'moderate' ]:
|
120 |
+
clear_inference_pool()
|
121 |
+
if state_manager.get_item('video_memory_strategy') == 'strict':
|
122 |
+
content_analyser.clear_inference_pool()
|
123 |
+
face_classifier.clear_inference_pool()
|
124 |
+
face_detector.clear_inference_pool()
|
125 |
+
face_landmarker.clear_inference_pool()
|
126 |
+
face_masker.clear_inference_pool()
|
127 |
+
face_recognizer.clear_inference_pool()
|
128 |
+
|
129 |
+
|
130 |
+
def modify_age(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
131 |
+
model_templates = get_model_options().get('templates')
|
132 |
+
model_sizes = get_model_options().get('sizes')
|
133 |
+
face_landmark_5 = target_face.landmark_set.get('5/68').copy()
|
134 |
+
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_templates.get('target'), model_sizes.get('target'))
|
135 |
+
extend_face_landmark_5 = scale_face_landmark_5(face_landmark_5, 0.875)
|
136 |
+
extend_vision_frame, extend_affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, extend_face_landmark_5, model_templates.get('target_with_background'), model_sizes.get('target_with_background'))
|
137 |
+
extend_vision_frame_raw = extend_vision_frame.copy()
|
138 |
+
box_mask = create_box_mask(extend_vision_frame, state_manager.get_item('face_mask_blur'), (0, 0, 0, 0))
|
139 |
+
crop_masks =\
|
140 |
+
[
|
141 |
+
box_mask
|
142 |
+
]
|
143 |
+
|
144 |
+
if 'occlusion' in state_manager.get_item('face_mask_types'):
|
145 |
+
occlusion_mask = create_occlusion_mask(crop_vision_frame)
|
146 |
+
combined_matrix = merge_matrix([ extend_affine_matrix, cv2.invertAffineTransform(affine_matrix) ])
|
147 |
+
occlusion_mask = cv2.warpAffine(occlusion_mask, combined_matrix, model_sizes.get('target_with_background'))
|
148 |
+
crop_masks.append(occlusion_mask)
|
149 |
+
|
150 |
+
crop_vision_frame = prepare_vision_frame(crop_vision_frame)
|
151 |
+
extend_vision_frame = prepare_vision_frame(extend_vision_frame)
|
152 |
+
age_modifier_direction = numpy.array(numpy.interp(state_manager.get_item('age_modifier_direction'), [ -100, 100 ], [ 2.5, -2.5 ])).astype(numpy.float32)
|
153 |
+
extend_vision_frame = forward(crop_vision_frame, extend_vision_frame, age_modifier_direction)
|
154 |
+
extend_vision_frame = normalize_extend_frame(extend_vision_frame)
|
155 |
+
extend_vision_frame = match_frame_color(extend_vision_frame_raw, extend_vision_frame)
|
156 |
+
extend_affine_matrix *= (model_sizes.get('target')[0] * 4) / model_sizes.get('target_with_background')[0]
|
157 |
+
crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1)
|
158 |
+
crop_mask = cv2.resize(crop_mask, (model_sizes.get('target')[0] * 4, model_sizes.get('target')[1] * 4))
|
159 |
+
paste_vision_frame = paste_back(temp_vision_frame, extend_vision_frame, crop_mask, extend_affine_matrix)
|
160 |
+
return paste_vision_frame
|
161 |
+
|
162 |
+
|
163 |
+
def forward(crop_vision_frame : VisionFrame, extend_vision_frame : VisionFrame, age_modifier_direction : AgeModifierDirection) -> VisionFrame:
|
164 |
+
age_modifier = get_inference_pool().get('age_modifier')
|
165 |
+
age_modifier_inputs = {}
|
166 |
+
|
167 |
+
if has_execution_provider('coreml'):
|
168 |
+
age_modifier.set_providers([ facefusion.choices.execution_provider_set.get('cpu') ])
|
169 |
+
|
170 |
+
for age_modifier_input in age_modifier.get_inputs():
|
171 |
+
if age_modifier_input.name == 'target':
|
172 |
+
age_modifier_inputs[age_modifier_input.name] = crop_vision_frame
|
173 |
+
if age_modifier_input.name == 'target_with_background':
|
174 |
+
age_modifier_inputs[age_modifier_input.name] = extend_vision_frame
|
175 |
+
if age_modifier_input.name == 'direction':
|
176 |
+
age_modifier_inputs[age_modifier_input.name] = age_modifier_direction
|
177 |
+
|
178 |
+
with thread_semaphore():
|
179 |
+
crop_vision_frame = age_modifier.run(None, age_modifier_inputs)[0][0]
|
180 |
+
|
181 |
+
return crop_vision_frame
|
182 |
+
|
183 |
+
|
184 |
+
def prepare_vision_frame(vision_frame : VisionFrame) -> VisionFrame:
|
185 |
+
vision_frame = vision_frame[:, :, ::-1] / 255.0
|
186 |
+
vision_frame = (vision_frame - 0.5) / 0.5
|
187 |
+
vision_frame = numpy.expand_dims(vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
|
188 |
+
return vision_frame
|
189 |
+
|
190 |
+
|
191 |
+
def normalize_extend_frame(extend_vision_frame : VisionFrame) -> VisionFrame:
|
192 |
+
model_sizes = get_model_options().get('sizes')
|
193 |
+
extend_vision_frame = numpy.clip(extend_vision_frame, -1, 1)
|
194 |
+
extend_vision_frame = (extend_vision_frame + 1) / 2
|
195 |
+
extend_vision_frame = extend_vision_frame.transpose(1, 2, 0).clip(0, 255)
|
196 |
+
extend_vision_frame = (extend_vision_frame * 255.0)
|
197 |
+
extend_vision_frame = extend_vision_frame.astype(numpy.uint8)[:, :, ::-1]
|
198 |
+
extend_vision_frame = cv2.resize(extend_vision_frame, (model_sizes.get('target')[0] * 4, model_sizes.get('target')[1] * 4), interpolation = cv2.INTER_AREA)
|
199 |
+
return extend_vision_frame
|
200 |
+
|
201 |
+
|
202 |
+
def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
203 |
+
return modify_age(target_face, temp_vision_frame)
|
204 |
+
|
205 |
+
|
206 |
+
def process_frame(inputs : AgeModifierInputs) -> VisionFrame:
|
207 |
+
reference_faces = inputs.get('reference_faces')
|
208 |
+
target_vision_frame = inputs.get('target_vision_frame')
|
209 |
+
many_faces = sort_and_filter_faces(get_many_faces([ target_vision_frame ]))
|
210 |
+
|
211 |
+
if state_manager.get_item('face_selector_mode') == 'many':
|
212 |
+
if many_faces:
|
213 |
+
for target_face in many_faces:
|
214 |
+
target_vision_frame = modify_age(target_face, target_vision_frame)
|
215 |
+
if state_manager.get_item('face_selector_mode') == 'one':
|
216 |
+
target_face = get_one_face(many_faces)
|
217 |
+
if target_face:
|
218 |
+
target_vision_frame = modify_age(target_face, target_vision_frame)
|
219 |
+
if state_manager.get_item('face_selector_mode') == 'reference':
|
220 |
+
similar_faces = find_similar_faces(many_faces, reference_faces, state_manager.get_item('reference_face_distance'))
|
221 |
+
if similar_faces:
|
222 |
+
for similar_face in similar_faces:
|
223 |
+
target_vision_frame = modify_age(similar_face, target_vision_frame)
|
224 |
+
return target_vision_frame
|
225 |
+
|
226 |
+
|
227 |
+
def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None:
|
228 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
229 |
+
|
230 |
+
for queue_payload in process_manager.manage(queue_payloads):
|
231 |
+
target_vision_path = queue_payload['frame_path']
|
232 |
+
target_vision_frame = read_image(target_vision_path)
|
233 |
+
output_vision_frame = process_frame(
|
234 |
+
{
|
235 |
+
'reference_faces': reference_faces,
|
236 |
+
'target_vision_frame': target_vision_frame
|
237 |
+
})
|
238 |
+
write_image(target_vision_path, output_vision_frame)
|
239 |
+
update_progress(1)
|
240 |
+
|
241 |
+
|
242 |
+
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
243 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
244 |
+
target_vision_frame = read_static_image(target_path)
|
245 |
+
output_vision_frame = process_frame(
|
246 |
+
{
|
247 |
+
'reference_faces': reference_faces,
|
248 |
+
'target_vision_frame': target_vision_frame
|
249 |
+
})
|
250 |
+
write_image(output_path, output_vision_frame)
|
251 |
+
|
252 |
+
|
253 |
+
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
254 |
+
processors.multi_process_frames(None, temp_frame_paths, process_frames)
|
facefusion/processors/modules/deep_swapper.py
ADDED
@@ -0,0 +1,464 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from argparse import ArgumentParser
|
2 |
+
from functools import lru_cache
|
3 |
+
from typing import List, Tuple
|
4 |
+
|
5 |
+
import cv2
|
6 |
+
import numpy
|
7 |
+
from cv2.typing import Size
|
8 |
+
|
9 |
+
import facefusion.jobs.job_manager
|
10 |
+
import facefusion.jobs.job_store
|
11 |
+
import facefusion.processors.core as processors
|
12 |
+
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, video_manager, wording
|
13 |
+
from facefusion.common_helper import create_int_metavar
|
14 |
+
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url_by_provider
|
15 |
+
from facefusion.face_analyser import get_many_faces, get_one_face
|
16 |
+
from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
|
17 |
+
from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask, create_region_mask
|
18 |
+
from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
|
19 |
+
from facefusion.face_store import get_reference_faces
|
20 |
+
from facefusion.filesystem import get_file_name, in_directory, is_image, is_video, resolve_file_paths, resolve_relative_path, same_file_extension
|
21 |
+
from facefusion.processors import choices as processors_choices
|
22 |
+
from facefusion.processors.types import DeepSwapperInputs, DeepSwapperMorph
|
23 |
+
from facefusion.program_helper import find_argument_group
|
24 |
+
from facefusion.thread_helper import thread_semaphore
|
25 |
+
from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, Mask, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
|
26 |
+
from facefusion.vision import conditional_match_frame_color, read_image, read_static_image, write_image
|
27 |
+
|
28 |
+
|
29 |
+
@lru_cache(maxsize = None)
|
30 |
+
def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
|
31 |
+
model_config = []
|
32 |
+
|
33 |
+
if download_scope == 'full':
|
34 |
+
model_config.extend(
|
35 |
+
[
|
36 |
+
('druuzil', 'adam_levine_320'),
|
37 |
+
('druuzil', 'adrianne_palicki_384'),
|
38 |
+
('druuzil', 'agnetha_falskog_224'),
|
39 |
+
('druuzil', 'alan_ritchson_320'),
|
40 |
+
('druuzil', 'alicia_vikander_320'),
|
41 |
+
('druuzil', 'amber_midthunder_320'),
|
42 |
+
('druuzil', 'andras_arato_384'),
|
43 |
+
('druuzil', 'andrew_tate_320'),
|
44 |
+
('druuzil', 'angelina_jolie_384'),
|
45 |
+
('druuzil', 'anne_hathaway_320'),
|
46 |
+
('druuzil', 'anya_chalotra_320'),
|
47 |
+
('druuzil', 'arnold_schwarzenegger_320'),
|
48 |
+
('druuzil', 'benjamin_affleck_320'),
|
49 |
+
('druuzil', 'benjamin_stiller_384'),
|
50 |
+
('druuzil', 'bradley_pitt_224'),
|
51 |
+
('druuzil', 'brie_larson_384'),
|
52 |
+
('druuzil', 'bruce_campbell_384'),
|
53 |
+
('druuzil', 'bryan_cranston_320'),
|
54 |
+
('druuzil', 'catherine_blanchett_352'),
|
55 |
+
('druuzil', 'christian_bale_320'),
|
56 |
+
('druuzil', 'christopher_hemsworth_320'),
|
57 |
+
('druuzil', 'christoph_waltz_384'),
|
58 |
+
('druuzil', 'cillian_murphy_320'),
|
59 |
+
('druuzil', 'cobie_smulders_256'),
|
60 |
+
('druuzil', 'dwayne_johnson_384'),
|
61 |
+
('druuzil', 'edward_norton_320'),
|
62 |
+
('druuzil', 'elisabeth_shue_320'),
|
63 |
+
('druuzil', 'elizabeth_olsen_384'),
|
64 |
+
('druuzil', 'elon_musk_320'),
|
65 |
+
('druuzil', 'emily_blunt_320'),
|
66 |
+
('druuzil', 'emma_stone_384'),
|
67 |
+
('druuzil', 'emma_watson_320'),
|
68 |
+
('druuzil', 'erin_moriarty_384'),
|
69 |
+
('druuzil', 'eva_green_320'),
|
70 |
+
('druuzil', 'ewan_mcgregor_320'),
|
71 |
+
('druuzil', 'florence_pugh_320'),
|
72 |
+
('druuzil', 'freya_allan_320'),
|
73 |
+
('druuzil', 'gary_cole_224'),
|
74 |
+
('druuzil', 'gigi_hadid_224'),
|
75 |
+
('druuzil', 'harrison_ford_384'),
|
76 |
+
('druuzil', 'hayden_christensen_320'),
|
77 |
+
('druuzil', 'heath_ledger_320'),
|
78 |
+
('druuzil', 'henry_cavill_448'),
|
79 |
+
('druuzil', 'hugh_jackman_384'),
|
80 |
+
('druuzil', 'idris_elba_320'),
|
81 |
+
('druuzil', 'jack_nicholson_320'),
|
82 |
+
('druuzil', 'james_carrey_384'),
|
83 |
+
('druuzil', 'james_mcavoy_320'),
|
84 |
+
('druuzil', 'james_varney_320'),
|
85 |
+
('druuzil', 'jason_momoa_320'),
|
86 |
+
('druuzil', 'jason_statham_320'),
|
87 |
+
('druuzil', 'jennifer_connelly_384'),
|
88 |
+
('druuzil', 'jimmy_donaldson_320'),
|
89 |
+
('druuzil', 'jordan_peterson_384'),
|
90 |
+
('druuzil', 'karl_urban_224'),
|
91 |
+
('druuzil', 'kate_beckinsale_384'),
|
92 |
+
('druuzil', 'laurence_fishburne_384'),
|
93 |
+
('druuzil', 'lili_reinhart_320'),
|
94 |
+
('druuzil', 'luke_evans_384'),
|
95 |
+
('druuzil', 'mads_mikkelsen_384'),
|
96 |
+
('druuzil', 'mary_winstead_320'),
|
97 |
+
('druuzil', 'margaret_qualley_384'),
|
98 |
+
('druuzil', 'melina_juergens_320'),
|
99 |
+
('druuzil', 'michael_fassbender_320'),
|
100 |
+
('druuzil', 'michael_fox_320'),
|
101 |
+
('druuzil', 'millie_bobby_brown_320'),
|
102 |
+
('druuzil', 'morgan_freeman_320'),
|
103 |
+
('druuzil', 'patrick_stewart_224'),
|
104 |
+
('druuzil', 'rachel_weisz_384'),
|
105 |
+
('druuzil', 'rebecca_ferguson_320'),
|
106 |
+
('druuzil', 'scarlett_johansson_320'),
|
107 |
+
('druuzil', 'shannen_doherty_384'),
|
108 |
+
('druuzil', 'seth_macfarlane_384'),
|
109 |
+
('druuzil', 'thomas_cruise_320'),
|
110 |
+
('druuzil', 'thomas_hanks_384'),
|
111 |
+
('druuzil', 'william_murray_384'),
|
112 |
+
('druuzil', 'zoe_saldana_384'),
|
113 |
+
('edel', 'emma_roberts_224'),
|
114 |
+
('edel', 'ivanka_trump_224'),
|
115 |
+
('edel', 'lize_dzjabrailova_224'),
|
116 |
+
('edel', 'sidney_sweeney_224'),
|
117 |
+
('edel', 'winona_ryder_224')
|
118 |
+
])
|
119 |
+
if download_scope in [ 'lite', 'full' ]:
|
120 |
+
model_config.extend(
|
121 |
+
[
|
122 |
+
('iperov', 'alexandra_daddario_224'),
|
123 |
+
('iperov', 'alexei_navalny_224'),
|
124 |
+
('iperov', 'amber_heard_224'),
|
125 |
+
('iperov', 'dilraba_dilmurat_224'),
|
126 |
+
('iperov', 'elon_musk_224'),
|
127 |
+
('iperov', 'emilia_clarke_224'),
|
128 |
+
('iperov', 'emma_watson_224'),
|
129 |
+
('iperov', 'erin_moriarty_224'),
|
130 |
+
('iperov', 'jackie_chan_224'),
|
131 |
+
('iperov', 'james_carrey_224'),
|
132 |
+
('iperov', 'jason_statham_320'),
|
133 |
+
('iperov', 'keanu_reeves_320'),
|
134 |
+
('iperov', 'margot_robbie_224'),
|
135 |
+
('iperov', 'natalie_dormer_224'),
|
136 |
+
('iperov', 'nicolas_coppola_224'),
|
137 |
+
('iperov', 'robert_downey_224'),
|
138 |
+
('iperov', 'rowan_atkinson_224'),
|
139 |
+
('iperov', 'ryan_reynolds_224'),
|
140 |
+
('iperov', 'scarlett_johansson_224'),
|
141 |
+
('iperov', 'sylvester_stallone_224'),
|
142 |
+
('iperov', 'thomas_cruise_224'),
|
143 |
+
('iperov', 'thomas_holland_224'),
|
144 |
+
('iperov', 'vin_diesel_224'),
|
145 |
+
('iperov', 'vladimir_putin_224')
|
146 |
+
])
|
147 |
+
if download_scope == 'full':
|
148 |
+
model_config.extend(
|
149 |
+
[
|
150 |
+
('jen', 'angelica_trae_288'),
|
151 |
+
('jen', 'ella_freya_224'),
|
152 |
+
('jen', 'emma_myers_320'),
|
153 |
+
('jen', 'evie_pickerill_224'),
|
154 |
+
('jen', 'kang_hyewon_320'),
|
155 |
+
('jen', 'maddie_mead_224'),
|
156 |
+
('jen', 'nicole_turnbull_288'),
|
157 |
+
('mats', 'alica_schmidt_320'),
|
158 |
+
('mats', 'ashley_alexiss_224'),
|
159 |
+
('mats', 'billie_eilish_224'),
|
160 |
+
('mats', 'brie_larson_224'),
|
161 |
+
('mats', 'cara_delevingne_224'),
|
162 |
+
('mats', 'carolin_kebekus_224'),
|
163 |
+
('mats', 'chelsea_clinton_224'),
|
164 |
+
('mats', 'claire_boucher_224'),
|
165 |
+
('mats', 'corinna_kopf_224'),
|
166 |
+
('mats', 'florence_pugh_224'),
|
167 |
+
('mats', 'hillary_clinton_224'),
|
168 |
+
('mats', 'jenna_fischer_224'),
|
169 |
+
('mats', 'kim_jisoo_320'),
|
170 |
+
('mats', 'mica_suarez_320'),
|
171 |
+
('mats', 'shailene_woodley_224'),
|
172 |
+
('mats', 'shraddha_kapoor_320'),
|
173 |
+
('mats', 'yu_jimin_352'),
|
174 |
+
('rumateus', 'alison_brie_224'),
|
175 |
+
('rumateus', 'amber_heard_224'),
|
176 |
+
('rumateus', 'angelina_jolie_224'),
|
177 |
+
('rumateus', 'aubrey_plaza_224'),
|
178 |
+
('rumateus', 'bridget_regan_224'),
|
179 |
+
('rumateus', 'cobie_smulders_224'),
|
180 |
+
('rumateus', 'deborah_woll_224'),
|
181 |
+
('rumateus', 'dua_lipa_224'),
|
182 |
+
('rumateus', 'emma_stone_224'),
|
183 |
+
('rumateus', 'hailee_steinfeld_224'),
|
184 |
+
('rumateus', 'hilary_duff_224'),
|
185 |
+
('rumateus', 'jessica_alba_224'),
|
186 |
+
('rumateus', 'jessica_biel_224'),
|
187 |
+
('rumateus', 'john_cena_224'),
|
188 |
+
('rumateus', 'kim_kardashian_224'),
|
189 |
+
('rumateus', 'kristen_bell_224'),
|
190 |
+
('rumateus', 'lucy_liu_224'),
|
191 |
+
('rumateus', 'margot_robbie_224'),
|
192 |
+
('rumateus', 'megan_fox_224'),
|
193 |
+
('rumateus', 'meghan_markle_224'),
|
194 |
+
('rumateus', 'millie_bobby_brown_224'),
|
195 |
+
('rumateus', 'natalie_portman_224'),
|
196 |
+
('rumateus', 'nicki_minaj_224'),
|
197 |
+
('rumateus', 'olivia_wilde_224'),
|
198 |
+
('rumateus', 'shay_mitchell_224'),
|
199 |
+
('rumateus', 'sophie_turner_224'),
|
200 |
+
('rumateus', 'taylor_swift_224')
|
201 |
+
])
|
202 |
+
model_set : ModelSet = {}
|
203 |
+
|
204 |
+
for model_scope, model_name in model_config:
|
205 |
+
model_id = '/'.join([ model_scope, model_name ])
|
206 |
+
|
207 |
+
model_set[model_id] =\
|
208 |
+
{
|
209 |
+
'hashes':
|
210 |
+
{
|
211 |
+
'deep_swapper':
|
212 |
+
{
|
213 |
+
'url': resolve_download_url_by_provider('huggingface', 'deepfacelive-models-' + model_scope, model_name + '.hash'),
|
214 |
+
'path': resolve_relative_path('../.assets/models/' + model_scope + '/' + model_name + '.hash')
|
215 |
+
}
|
216 |
+
},
|
217 |
+
'sources':
|
218 |
+
{
|
219 |
+
'deep_swapper':
|
220 |
+
{
|
221 |
+
'url': resolve_download_url_by_provider('huggingface', 'deepfacelive-models-' + model_scope, model_name + '.dfm'),
|
222 |
+
'path': resolve_relative_path('../.assets/models/' + model_scope + '/' + model_name + '.dfm')
|
223 |
+
}
|
224 |
+
},
|
225 |
+
'template': 'dfl_whole_face'
|
226 |
+
}
|
227 |
+
|
228 |
+
custom_model_file_paths = resolve_file_paths(resolve_relative_path('../.assets/models/custom'))
|
229 |
+
|
230 |
+
if custom_model_file_paths:
|
231 |
+
|
232 |
+
for model_file_path in custom_model_file_paths:
|
233 |
+
model_id = '/'.join([ 'custom', get_file_name(model_file_path) ])
|
234 |
+
|
235 |
+
model_set[model_id] =\
|
236 |
+
{
|
237 |
+
'sources':
|
238 |
+
{
|
239 |
+
'deep_swapper':
|
240 |
+
{
|
241 |
+
'path': resolve_relative_path(model_file_path)
|
242 |
+
}
|
243 |
+
},
|
244 |
+
'template': 'dfl_whole_face'
|
245 |
+
}
|
246 |
+
|
247 |
+
return model_set
|
248 |
+
|
249 |
+
|
250 |
+
def get_inference_pool() -> InferencePool:
|
251 |
+
model_names = [ state_manager.get_item('deep_swapper_model') ]
|
252 |
+
model_source_set = get_model_options().get('sources')
|
253 |
+
|
254 |
+
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
|
255 |
+
|
256 |
+
|
257 |
+
def clear_inference_pool() -> None:
|
258 |
+
model_names = [ state_manager.get_item('deep_swapper_model') ]
|
259 |
+
inference_manager.clear_inference_pool(__name__, model_names)
|
260 |
+
|
261 |
+
|
262 |
+
def get_model_options() -> ModelOptions:
|
263 |
+
model_name = state_manager.get_item('deep_swapper_model')
|
264 |
+
return create_static_model_set('full').get(model_name)
|
265 |
+
|
266 |
+
|
267 |
+
def get_model_size() -> Size:
|
268 |
+
deep_swapper = get_inference_pool().get('deep_swapper')
|
269 |
+
|
270 |
+
for deep_swapper_input in deep_swapper.get_inputs():
|
271 |
+
if deep_swapper_input.name == 'in_face:0':
|
272 |
+
return deep_swapper_input.shape[1:3]
|
273 |
+
|
274 |
+
return 0, 0
|
275 |
+
|
276 |
+
|
277 |
+
def register_args(program : ArgumentParser) -> None:
|
278 |
+
group_processors = find_argument_group(program, 'processors')
|
279 |
+
if group_processors:
|
280 |
+
group_processors.add_argument('--deep-swapper-model', help = wording.get('help.deep_swapper_model'), default = config.get_str_value('processors', 'deep_swapper_model', 'iperov/elon_musk_224'), choices = processors_choices.deep_swapper_models)
|
281 |
+
group_processors.add_argument('--deep-swapper-morph', help = wording.get('help.deep_swapper_morph'), type = int, default = config.get_int_value('processors', 'deep_swapper_morph', '100'), choices = processors_choices.deep_swapper_morph_range, metavar = create_int_metavar(processors_choices.deep_swapper_morph_range))
|
282 |
+
facefusion.jobs.job_store.register_step_keys([ 'deep_swapper_model', 'deep_swapper_morph' ])
|
283 |
+
|
284 |
+
|
285 |
+
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
|
286 |
+
apply_state_item('deep_swapper_model', args.get('deep_swapper_model'))
|
287 |
+
apply_state_item('deep_swapper_morph', args.get('deep_swapper_morph'))
|
288 |
+
|
289 |
+
|
290 |
+
def pre_check() -> bool:
|
291 |
+
model_hash_set = get_model_options().get('hashes')
|
292 |
+
model_source_set = get_model_options().get('sources')
|
293 |
+
|
294 |
+
if model_hash_set and model_source_set:
|
295 |
+
return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
|
296 |
+
return True
|
297 |
+
|
298 |
+
|
299 |
+
def pre_process(mode : ProcessMode) -> bool:
|
300 |
+
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
|
301 |
+
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
|
302 |
+
return False
|
303 |
+
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
|
304 |
+
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
|
305 |
+
return False
|
306 |
+
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
|
307 |
+
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
|
308 |
+
return False
|
309 |
+
return True
|
310 |
+
|
311 |
+
|
312 |
+
def post_process() -> None:
|
313 |
+
read_static_image.cache_clear()
|
314 |
+
video_manager.clear_video_pool()
|
315 |
+
if state_manager.get_item('video_memory_strategy') in [ 'strict', 'moderate' ]:
|
316 |
+
clear_inference_pool()
|
317 |
+
if state_manager.get_item('video_memory_strategy') == 'strict':
|
318 |
+
content_analyser.clear_inference_pool()
|
319 |
+
face_classifier.clear_inference_pool()
|
320 |
+
face_detector.clear_inference_pool()
|
321 |
+
face_landmarker.clear_inference_pool()
|
322 |
+
face_masker.clear_inference_pool()
|
323 |
+
face_recognizer.clear_inference_pool()
|
324 |
+
|
325 |
+
|
326 |
+
def swap_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
327 |
+
model_template = get_model_options().get('template')
|
328 |
+
model_size = get_model_size()
|
329 |
+
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmark_set.get('5/68'), model_template, model_size)
|
330 |
+
crop_vision_frame_raw = crop_vision_frame.copy()
|
331 |
+
box_mask = create_box_mask(crop_vision_frame, state_manager.get_item('face_mask_blur'), state_manager.get_item('face_mask_padding'))
|
332 |
+
crop_masks =\
|
333 |
+
[
|
334 |
+
box_mask
|
335 |
+
]
|
336 |
+
|
337 |
+
if 'occlusion' in state_manager.get_item('face_mask_types'):
|
338 |
+
occlusion_mask = create_occlusion_mask(crop_vision_frame)
|
339 |
+
crop_masks.append(occlusion_mask)
|
340 |
+
|
341 |
+
crop_vision_frame = prepare_crop_frame(crop_vision_frame)
|
342 |
+
deep_swapper_morph = numpy.array([ numpy.interp(state_manager.get_item('deep_swapper_morph'), [ 0, 100 ], [ 0, 1 ]) ]).astype(numpy.float32)
|
343 |
+
crop_vision_frame, crop_source_mask, crop_target_mask = forward(crop_vision_frame, deep_swapper_morph)
|
344 |
+
crop_vision_frame = normalize_crop_frame(crop_vision_frame)
|
345 |
+
crop_vision_frame = conditional_match_frame_color(crop_vision_frame_raw, crop_vision_frame)
|
346 |
+
crop_masks.append(prepare_crop_mask(crop_source_mask, crop_target_mask))
|
347 |
+
|
348 |
+
if 'area' in state_manager.get_item('face_mask_types'):
|
349 |
+
face_landmark_68 = cv2.transform(target_face.landmark_set.get('68').reshape(1, -1, 2), affine_matrix).reshape(-1, 2)
|
350 |
+
area_mask = create_area_mask(crop_vision_frame, face_landmark_68, state_manager.get_item('face_mask_areas'))
|
351 |
+
crop_masks.append(area_mask)
|
352 |
+
|
353 |
+
if 'region' in state_manager.get_item('face_mask_types'):
|
354 |
+
region_mask = create_region_mask(crop_vision_frame, state_manager.get_item('face_mask_regions'))
|
355 |
+
crop_masks.append(region_mask)
|
356 |
+
|
357 |
+
crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1)
|
358 |
+
paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix)
|
359 |
+
return paste_vision_frame
|
360 |
+
|
361 |
+
|
362 |
+
def forward(crop_vision_frame : VisionFrame, deep_swapper_morph : DeepSwapperMorph) -> Tuple[VisionFrame, Mask, Mask]:
|
363 |
+
deep_swapper = get_inference_pool().get('deep_swapper')
|
364 |
+
deep_swapper_inputs = {}
|
365 |
+
|
366 |
+
for deep_swapper_input in deep_swapper.get_inputs():
|
367 |
+
if deep_swapper_input.name == 'in_face:0':
|
368 |
+
deep_swapper_inputs[deep_swapper_input.name] = crop_vision_frame
|
369 |
+
if deep_swapper_input.name == 'morph_value:0':
|
370 |
+
deep_swapper_inputs[deep_swapper_input.name] = deep_swapper_morph
|
371 |
+
|
372 |
+
with thread_semaphore():
|
373 |
+
crop_target_mask, crop_vision_frame, crop_source_mask = deep_swapper.run(None, deep_swapper_inputs)
|
374 |
+
|
375 |
+
return crop_vision_frame[0], crop_source_mask[0], crop_target_mask[0]
|
376 |
+
|
377 |
+
|
378 |
+
def has_morph_input() -> bool:
|
379 |
+
deep_swapper = get_inference_pool().get('deep_swapper')
|
380 |
+
|
381 |
+
for deep_swapper_input in deep_swapper.get_inputs():
|
382 |
+
if deep_swapper_input.name == 'morph_value:0':
|
383 |
+
return True
|
384 |
+
|
385 |
+
return False
|
386 |
+
|
387 |
+
|
388 |
+
def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
389 |
+
crop_vision_frame = cv2.addWeighted(crop_vision_frame, 1.75, cv2.GaussianBlur(crop_vision_frame, (0, 0), 2), -0.75, 0)
|
390 |
+
crop_vision_frame = crop_vision_frame / 255.0
|
391 |
+
crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0).astype(numpy.float32)
|
392 |
+
return crop_vision_frame
|
393 |
+
|
394 |
+
|
395 |
+
def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
396 |
+
crop_vision_frame = (crop_vision_frame * 255.0).clip(0, 255)
|
397 |
+
crop_vision_frame = crop_vision_frame.astype(numpy.uint8)
|
398 |
+
return crop_vision_frame
|
399 |
+
|
400 |
+
|
401 |
+
def prepare_crop_mask(crop_source_mask : Mask, crop_target_mask : Mask) -> Mask:
|
402 |
+
model_size = get_model_size()
|
403 |
+
blur_size = 6.25
|
404 |
+
kernel_size = 3
|
405 |
+
crop_mask = numpy.minimum.reduce([ crop_source_mask, crop_target_mask ])
|
406 |
+
crop_mask = crop_mask.reshape(model_size).clip(0, 1)
|
407 |
+
crop_mask = cv2.erode(crop_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size)), iterations = 2)
|
408 |
+
crop_mask = cv2.GaussianBlur(crop_mask, (0, 0), blur_size)
|
409 |
+
return crop_mask
|
410 |
+
|
411 |
+
|
412 |
+
def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
413 |
+
return swap_face(target_face, temp_vision_frame)
|
414 |
+
|
415 |
+
|
416 |
+
def process_frame(inputs : DeepSwapperInputs) -> VisionFrame:
|
417 |
+
reference_faces = inputs.get('reference_faces')
|
418 |
+
target_vision_frame = inputs.get('target_vision_frame')
|
419 |
+
many_faces = sort_and_filter_faces(get_many_faces([ target_vision_frame ]))
|
420 |
+
|
421 |
+
if state_manager.get_item('face_selector_mode') == 'many':
|
422 |
+
if many_faces:
|
423 |
+
for target_face in many_faces:
|
424 |
+
target_vision_frame = swap_face(target_face, target_vision_frame)
|
425 |
+
if state_manager.get_item('face_selector_mode') == 'one':
|
426 |
+
target_face = get_one_face(many_faces)
|
427 |
+
if target_face:
|
428 |
+
target_vision_frame = swap_face(target_face, target_vision_frame)
|
429 |
+
if state_manager.get_item('face_selector_mode') == 'reference':
|
430 |
+
similar_faces = find_similar_faces(many_faces, reference_faces, state_manager.get_item('reference_face_distance'))
|
431 |
+
if similar_faces:
|
432 |
+
for similar_face in similar_faces:
|
433 |
+
target_vision_frame = swap_face(similar_face, target_vision_frame)
|
434 |
+
return target_vision_frame
|
435 |
+
|
436 |
+
|
437 |
+
def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None:
|
438 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
439 |
+
|
440 |
+
for queue_payload in process_manager.manage(queue_payloads):
|
441 |
+
target_vision_path = queue_payload['frame_path']
|
442 |
+
target_vision_frame = read_image(target_vision_path)
|
443 |
+
output_vision_frame = process_frame(
|
444 |
+
{
|
445 |
+
'reference_faces': reference_faces,
|
446 |
+
'target_vision_frame': target_vision_frame
|
447 |
+
})
|
448 |
+
write_image(target_vision_path, output_vision_frame)
|
449 |
+
update_progress(1)
|
450 |
+
|
451 |
+
|
452 |
+
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
453 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
454 |
+
target_vision_frame = read_static_image(target_path)
|
455 |
+
output_vision_frame = process_frame(
|
456 |
+
{
|
457 |
+
'reference_faces': reference_faces,
|
458 |
+
'target_vision_frame': target_vision_frame
|
459 |
+
})
|
460 |
+
write_image(output_path, output_vision_frame)
|
461 |
+
|
462 |
+
|
463 |
+
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
464 |
+
processors.multi_process_frames(None, temp_frame_paths, process_frames)
|
facefusion/processors/modules/expression_restorer.py
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from argparse import ArgumentParser
|
2 |
+
from functools import lru_cache
|
3 |
+
from typing import List, Tuple
|
4 |
+
|
5 |
+
import cv2
|
6 |
+
import numpy
|
7 |
+
|
8 |
+
import facefusion.jobs.job_manager
|
9 |
+
import facefusion.jobs.job_store
|
10 |
+
import facefusion.processors.core as processors
|
11 |
+
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, video_manager, wording
|
12 |
+
from facefusion.common_helper import create_int_metavar
|
13 |
+
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
|
14 |
+
from facefusion.face_analyser import get_many_faces, get_one_face
|
15 |
+
from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
|
16 |
+
from facefusion.face_masker import create_box_mask, create_occlusion_mask
|
17 |
+
from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
|
18 |
+
from facefusion.face_store import get_reference_faces
|
19 |
+
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
|
20 |
+
from facefusion.processors import choices as processors_choices
|
21 |
+
from facefusion.processors.live_portrait import create_rotation, limit_expression
|
22 |
+
from facefusion.processors.types import ExpressionRestorerInputs, LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw
|
23 |
+
from facefusion.program_helper import find_argument_group
|
24 |
+
from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore
|
25 |
+
from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
|
26 |
+
from facefusion.vision import read_image, read_static_image, read_video_frame, write_image
|
27 |
+
|
28 |
+
|
29 |
+
@lru_cache(maxsize = None)
|
30 |
+
def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
|
31 |
+
return\
|
32 |
+
{
|
33 |
+
'live_portrait':
|
34 |
+
{
|
35 |
+
'hashes':
|
36 |
+
{
|
37 |
+
'feature_extractor':
|
38 |
+
{
|
39 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.hash'),
|
40 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash')
|
41 |
+
},
|
42 |
+
'motion_extractor':
|
43 |
+
{
|
44 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.hash'),
|
45 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash')
|
46 |
+
},
|
47 |
+
'generator':
|
48 |
+
{
|
49 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.hash'),
|
50 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash')
|
51 |
+
}
|
52 |
+
},
|
53 |
+
'sources':
|
54 |
+
{
|
55 |
+
'feature_extractor':
|
56 |
+
{
|
57 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.onnx'),
|
58 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx')
|
59 |
+
},
|
60 |
+
'motion_extractor':
|
61 |
+
{
|
62 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.onnx'),
|
63 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx')
|
64 |
+
},
|
65 |
+
'generator':
|
66 |
+
{
|
67 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.onnx'),
|
68 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx')
|
69 |
+
}
|
70 |
+
},
|
71 |
+
'template': 'arcface_128',
|
72 |
+
'size': (512, 512)
|
73 |
+
}
|
74 |
+
}
|
75 |
+
|
76 |
+
|
77 |
+
def get_inference_pool() -> InferencePool:
|
78 |
+
model_names = [ state_manager.get_item('expression_restorer_model') ]
|
79 |
+
model_source_set = get_model_options().get('sources')
|
80 |
+
|
81 |
+
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
|
82 |
+
|
83 |
+
|
84 |
+
def clear_inference_pool() -> None:
|
85 |
+
model_names = [ state_manager.get_item('expression_restorer_model') ]
|
86 |
+
inference_manager.clear_inference_pool(__name__, model_names)
|
87 |
+
|
88 |
+
|
89 |
+
def get_model_options() -> ModelOptions:
|
90 |
+
model_name = state_manager.get_item('expression_restorer_model')
|
91 |
+
return create_static_model_set('full').get(model_name)
|
92 |
+
|
93 |
+
|
94 |
+
def register_args(program : ArgumentParser) -> None:
|
95 |
+
group_processors = find_argument_group(program, 'processors')
|
96 |
+
if group_processors:
|
97 |
+
group_processors.add_argument('--expression-restorer-model', help = wording.get('help.expression_restorer_model'), default = config.get_str_value('processors', 'expression_restorer_model', 'live_portrait'), choices = processors_choices.expression_restorer_models)
|
98 |
+
group_processors.add_argument('--expression-restorer-factor', help = wording.get('help.expression_restorer_factor'), type = int, default = config.get_int_value('processors', 'expression_restorer_factor', '80'), choices = processors_choices.expression_restorer_factor_range, metavar = create_int_metavar(processors_choices.expression_restorer_factor_range))
|
99 |
+
facefusion.jobs.job_store.register_step_keys([ 'expression_restorer_model', 'expression_restorer_factor' ])
|
100 |
+
|
101 |
+
|
102 |
+
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
|
103 |
+
apply_state_item('expression_restorer_model', args.get('expression_restorer_model'))
|
104 |
+
apply_state_item('expression_restorer_factor', args.get('expression_restorer_factor'))
|
105 |
+
|
106 |
+
|
107 |
+
def pre_check() -> bool:
|
108 |
+
model_hash_set = get_model_options().get('hashes')
|
109 |
+
model_source_set = get_model_options().get('sources')
|
110 |
+
|
111 |
+
return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
|
112 |
+
|
113 |
+
|
114 |
+
def pre_process(mode : ProcessMode) -> bool:
|
115 |
+
if mode == 'stream':
|
116 |
+
logger.error(wording.get('stream_not_supported') + wording.get('exclamation_mark'), __name__)
|
117 |
+
return False
|
118 |
+
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
|
119 |
+
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
|
120 |
+
return False
|
121 |
+
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
|
122 |
+
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
|
123 |
+
return False
|
124 |
+
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
|
125 |
+
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
|
126 |
+
return False
|
127 |
+
return True
|
128 |
+
|
129 |
+
|
130 |
+
def post_process() -> None:
|
131 |
+
read_static_image.cache_clear()
|
132 |
+
video_manager.clear_video_pool()
|
133 |
+
if state_manager.get_item('video_memory_strategy') in [ 'strict', 'moderate' ]:
|
134 |
+
clear_inference_pool()
|
135 |
+
if state_manager.get_item('video_memory_strategy') == 'strict':
|
136 |
+
content_analyser.clear_inference_pool()
|
137 |
+
face_classifier.clear_inference_pool()
|
138 |
+
face_detector.clear_inference_pool()
|
139 |
+
face_landmarker.clear_inference_pool()
|
140 |
+
face_masker.clear_inference_pool()
|
141 |
+
face_recognizer.clear_inference_pool()
|
142 |
+
|
143 |
+
|
144 |
+
def restore_expression(source_vision_frame : VisionFrame, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
145 |
+
model_template = get_model_options().get('template')
|
146 |
+
model_size = get_model_options().get('size')
|
147 |
+
expression_restorer_factor = float(numpy.interp(float(state_manager.get_item('expression_restorer_factor')), [ 0, 100 ], [ 0, 1.2 ]))
|
148 |
+
source_vision_frame = cv2.resize(source_vision_frame, temp_vision_frame.shape[:2][::-1])
|
149 |
+
source_crop_vision_frame, _ = warp_face_by_face_landmark_5(source_vision_frame, target_face.landmark_set.get('5/68'), model_template, model_size)
|
150 |
+
target_crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmark_set.get('5/68'), model_template, model_size)
|
151 |
+
box_mask = create_box_mask(target_crop_vision_frame, state_manager.get_item('face_mask_blur'), (0, 0, 0, 0))
|
152 |
+
crop_masks =\
|
153 |
+
[
|
154 |
+
box_mask
|
155 |
+
]
|
156 |
+
|
157 |
+
if 'occlusion' in state_manager.get_item('face_mask_types'):
|
158 |
+
occlusion_mask = create_occlusion_mask(target_crop_vision_frame)
|
159 |
+
crop_masks.append(occlusion_mask)
|
160 |
+
|
161 |
+
source_crop_vision_frame = prepare_crop_frame(source_crop_vision_frame)
|
162 |
+
target_crop_vision_frame = prepare_crop_frame(target_crop_vision_frame)
|
163 |
+
target_crop_vision_frame = apply_restore(source_crop_vision_frame, target_crop_vision_frame, expression_restorer_factor)
|
164 |
+
target_crop_vision_frame = normalize_crop_frame(target_crop_vision_frame)
|
165 |
+
crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1)
|
166 |
+
temp_vision_frame = paste_back(temp_vision_frame, target_crop_vision_frame, crop_mask, affine_matrix)
|
167 |
+
return temp_vision_frame
|
168 |
+
|
169 |
+
|
170 |
+
def apply_restore(source_crop_vision_frame : VisionFrame, target_crop_vision_frame : VisionFrame, expression_restorer_factor : float) -> VisionFrame:
|
171 |
+
feature_volume = forward_extract_feature(target_crop_vision_frame)
|
172 |
+
source_expression = forward_extract_motion(source_crop_vision_frame)[5]
|
173 |
+
pitch, yaw, roll, scale, translation, target_expression, motion_points = forward_extract_motion(target_crop_vision_frame)
|
174 |
+
rotation = create_rotation(pitch, yaw, roll)
|
175 |
+
source_expression[:, [ 0, 4, 5, 8, 9 ]] = target_expression[:, [ 0, 4, 5, 8, 9 ]]
|
176 |
+
source_expression = source_expression * expression_restorer_factor + target_expression * (1 - expression_restorer_factor)
|
177 |
+
source_expression = limit_expression(source_expression)
|
178 |
+
source_motion_points = scale * (motion_points @ rotation.T + source_expression) + translation
|
179 |
+
target_motion_points = scale * (motion_points @ rotation.T + target_expression) + translation
|
180 |
+
crop_vision_frame = forward_generate_frame(feature_volume, source_motion_points, target_motion_points)
|
181 |
+
return crop_vision_frame
|
182 |
+
|
183 |
+
|
184 |
+
def forward_extract_feature(crop_vision_frame : VisionFrame) -> LivePortraitFeatureVolume:
|
185 |
+
feature_extractor = get_inference_pool().get('feature_extractor')
|
186 |
+
|
187 |
+
with conditional_thread_semaphore():
|
188 |
+
feature_volume = feature_extractor.run(None,
|
189 |
+
{
|
190 |
+
'input': crop_vision_frame
|
191 |
+
})[0]
|
192 |
+
|
193 |
+
return feature_volume
|
194 |
+
|
195 |
+
|
196 |
+
def forward_extract_motion(crop_vision_frame : VisionFrame) -> Tuple[LivePortraitPitch, LivePortraitYaw, LivePortraitRoll, LivePortraitScale, LivePortraitTranslation, LivePortraitExpression, LivePortraitMotionPoints]:
|
197 |
+
motion_extractor = get_inference_pool().get('motion_extractor')
|
198 |
+
|
199 |
+
with conditional_thread_semaphore():
|
200 |
+
pitch, yaw, roll, scale, translation, expression, motion_points = motion_extractor.run(None,
|
201 |
+
{
|
202 |
+
'input': crop_vision_frame
|
203 |
+
})
|
204 |
+
|
205 |
+
return pitch, yaw, roll, scale, translation, expression, motion_points
|
206 |
+
|
207 |
+
|
208 |
+
def forward_generate_frame(feature_volume : LivePortraitFeatureVolume, source_motion_points : LivePortraitMotionPoints, target_motion_points : LivePortraitMotionPoints) -> VisionFrame:
|
209 |
+
generator = get_inference_pool().get('generator')
|
210 |
+
|
211 |
+
with thread_semaphore():
|
212 |
+
crop_vision_frame = generator.run(None,
|
213 |
+
{
|
214 |
+
'feature_volume': feature_volume,
|
215 |
+
'source': source_motion_points,
|
216 |
+
'target': target_motion_points
|
217 |
+
})[0][0]
|
218 |
+
|
219 |
+
return crop_vision_frame
|
220 |
+
|
221 |
+
|
222 |
+
def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
223 |
+
model_size = get_model_options().get('size')
|
224 |
+
prepare_size = (model_size[0] // 2, model_size[1] // 2)
|
225 |
+
crop_vision_frame = cv2.resize(crop_vision_frame, prepare_size, interpolation = cv2.INTER_AREA)
|
226 |
+
crop_vision_frame = crop_vision_frame[:, :, ::-1] / 255.0
|
227 |
+
crop_vision_frame = numpy.expand_dims(crop_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
|
228 |
+
return crop_vision_frame
|
229 |
+
|
230 |
+
|
231 |
+
def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
232 |
+
crop_vision_frame = crop_vision_frame.transpose(1, 2, 0).clip(0, 1)
|
233 |
+
crop_vision_frame = crop_vision_frame * 255.0
|
234 |
+
crop_vision_frame = crop_vision_frame.astype(numpy.uint8)[:, :, ::-1]
|
235 |
+
return crop_vision_frame
|
236 |
+
|
237 |
+
|
238 |
+
def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
239 |
+
pass
|
240 |
+
|
241 |
+
|
242 |
+
def process_frame(inputs : ExpressionRestorerInputs) -> VisionFrame:
|
243 |
+
reference_faces = inputs.get('reference_faces')
|
244 |
+
source_vision_frame = inputs.get('source_vision_frame')
|
245 |
+
target_vision_frame = inputs.get('target_vision_frame')
|
246 |
+
many_faces = sort_and_filter_faces(get_many_faces([ target_vision_frame ]))
|
247 |
+
|
248 |
+
if state_manager.get_item('face_selector_mode') == 'many':
|
249 |
+
if many_faces:
|
250 |
+
for target_face in many_faces:
|
251 |
+
target_vision_frame = restore_expression(source_vision_frame, target_face, target_vision_frame)
|
252 |
+
if state_manager.get_item('face_selector_mode') == 'one':
|
253 |
+
target_face = get_one_face(many_faces)
|
254 |
+
if target_face:
|
255 |
+
target_vision_frame = restore_expression(source_vision_frame, target_face, target_vision_frame)
|
256 |
+
if state_manager.get_item('face_selector_mode') == 'reference':
|
257 |
+
similar_faces = find_similar_faces(many_faces, reference_faces, state_manager.get_item('reference_face_distance'))
|
258 |
+
if similar_faces:
|
259 |
+
for similar_face in similar_faces:
|
260 |
+
target_vision_frame = restore_expression(source_vision_frame, similar_face, target_vision_frame)
|
261 |
+
return target_vision_frame
|
262 |
+
|
263 |
+
|
264 |
+
def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None:
|
265 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
266 |
+
|
267 |
+
for queue_payload in process_manager.manage(queue_payloads):
|
268 |
+
frame_number = queue_payload.get('frame_number')
|
269 |
+
if state_manager.get_item('trim_frame_start'):
|
270 |
+
frame_number += state_manager.get_item('trim_frame_start')
|
271 |
+
source_vision_frame = read_video_frame(state_manager.get_item('target_path'), frame_number)
|
272 |
+
target_vision_path = queue_payload.get('frame_path')
|
273 |
+
target_vision_frame = read_image(target_vision_path)
|
274 |
+
output_vision_frame = process_frame(
|
275 |
+
{
|
276 |
+
'reference_faces': reference_faces,
|
277 |
+
'source_vision_frame': source_vision_frame,
|
278 |
+
'target_vision_frame': target_vision_frame
|
279 |
+
})
|
280 |
+
write_image(target_vision_path, output_vision_frame)
|
281 |
+
update_progress(1)
|
282 |
+
|
283 |
+
|
284 |
+
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
285 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
286 |
+
source_vision_frame = read_static_image(state_manager.get_item('target_path'))
|
287 |
+
target_vision_frame = read_static_image(target_path)
|
288 |
+
output_vision_frame = process_frame(
|
289 |
+
{
|
290 |
+
'reference_faces': reference_faces,
|
291 |
+
'source_vision_frame': source_vision_frame,
|
292 |
+
'target_vision_frame': target_vision_frame
|
293 |
+
})
|
294 |
+
write_image(output_path, output_vision_frame)
|
295 |
+
|
296 |
+
|
297 |
+
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
298 |
+
processors.multi_process_frames(None, temp_frame_paths, process_frames)
|
facefusion/processors/modules/face_debugger.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from argparse import ArgumentParser
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
import cv2
|
5 |
+
import numpy
|
6 |
+
|
7 |
+
import facefusion.jobs.job_manager
|
8 |
+
import facefusion.jobs.job_store
|
9 |
+
import facefusion.processors.core as processors
|
10 |
+
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, logger, process_manager, state_manager, video_manager, wording
|
11 |
+
from facefusion.face_analyser import get_many_faces, get_one_face
|
12 |
+
from facefusion.face_helper import warp_face_by_face_landmark_5
|
13 |
+
from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask, create_region_mask
|
14 |
+
from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
|
15 |
+
from facefusion.face_store import get_reference_faces
|
16 |
+
from facefusion.filesystem import in_directory, same_file_extension
|
17 |
+
from facefusion.processors import choices as processors_choices
|
18 |
+
from facefusion.processors.types import FaceDebuggerInputs
|
19 |
+
from facefusion.program_helper import find_argument_group
|
20 |
+
from facefusion.types import ApplyStateItem, Args, Face, InferencePool, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
|
21 |
+
from facefusion.vision import read_image, read_static_image, write_image
|
22 |
+
|
23 |
+
|
24 |
+
def get_inference_pool() -> InferencePool:
|
25 |
+
pass
|
26 |
+
|
27 |
+
|
28 |
+
def clear_inference_pool() -> None:
|
29 |
+
pass
|
30 |
+
|
31 |
+
|
32 |
+
def register_args(program : ArgumentParser) -> None:
|
33 |
+
group_processors = find_argument_group(program, 'processors')
|
34 |
+
if group_processors:
|
35 |
+
group_processors.add_argument('--face-debugger-items', help = wording.get('help.face_debugger_items').format(choices = ', '.join(processors_choices.face_debugger_items)), default = config.get_str_list('processors', 'face_debugger_items', 'face-landmark-5/68 face-mask'), choices = processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS')
|
36 |
+
facefusion.jobs.job_store.register_step_keys([ 'face_debugger_items' ])
|
37 |
+
|
38 |
+
|
39 |
+
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
|
40 |
+
apply_state_item('face_debugger_items', args.get('face_debugger_items'))
|
41 |
+
|
42 |
+
|
43 |
+
def pre_check() -> bool:
|
44 |
+
return True
|
45 |
+
|
46 |
+
|
47 |
+
def pre_process(mode : ProcessMode) -> bool:
|
48 |
+
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
|
49 |
+
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
|
50 |
+
return False
|
51 |
+
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
|
52 |
+
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
|
53 |
+
return False
|
54 |
+
return True
|
55 |
+
|
56 |
+
|
57 |
+
def post_process() -> None:
|
58 |
+
read_static_image.cache_clear()
|
59 |
+
video_manager.clear_video_pool()
|
60 |
+
if state_manager.get_item('video_memory_strategy') == 'strict':
|
61 |
+
content_analyser.clear_inference_pool()
|
62 |
+
face_classifier.clear_inference_pool()
|
63 |
+
face_detector.clear_inference_pool()
|
64 |
+
face_landmarker.clear_inference_pool()
|
65 |
+
face_masker.clear_inference_pool()
|
66 |
+
face_recognizer.clear_inference_pool()
|
67 |
+
|
68 |
+
|
69 |
+
def debug_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
70 |
+
primary_color = (0, 0, 255)
|
71 |
+
primary_light_color = (100, 100, 255)
|
72 |
+
secondary_color = (0, 255, 0)
|
73 |
+
tertiary_color = (255, 255, 0)
|
74 |
+
bounding_box = target_face.bounding_box.astype(numpy.int32)
|
75 |
+
temp_vision_frame = temp_vision_frame.copy()
|
76 |
+
has_face_landmark_5_fallback = numpy.array_equal(target_face.landmark_set.get('5'), target_face.landmark_set.get('5/68'))
|
77 |
+
has_face_landmark_68_fallback = numpy.array_equal(target_face.landmark_set.get('68'), target_face.landmark_set.get('68/5'))
|
78 |
+
face_debugger_items = state_manager.get_item('face_debugger_items')
|
79 |
+
|
80 |
+
if 'bounding-box' in face_debugger_items:
|
81 |
+
x1, y1, x2, y2 = bounding_box
|
82 |
+
cv2.rectangle(temp_vision_frame, (x1, y1), (x2, y2), primary_color, 2)
|
83 |
+
|
84 |
+
if target_face.angle == 0:
|
85 |
+
cv2.line(temp_vision_frame, (x1, y1), (x2, y1), primary_light_color, 3)
|
86 |
+
if target_face.angle == 180:
|
87 |
+
cv2.line(temp_vision_frame, (x1, y2), (x2, y2), primary_light_color, 3)
|
88 |
+
if target_face.angle == 90:
|
89 |
+
cv2.line(temp_vision_frame, (x2, y1), (x2, y2), primary_light_color, 3)
|
90 |
+
if target_face.angle == 270:
|
91 |
+
cv2.line(temp_vision_frame, (x1, y1), (x1, y2), primary_light_color, 3)
|
92 |
+
|
93 |
+
if 'face-mask' in face_debugger_items:
|
94 |
+
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmark_set.get('5/68'), 'arcface_128', (512, 512))
|
95 |
+
inverse_matrix = cv2.invertAffineTransform(affine_matrix)
|
96 |
+
temp_size = temp_vision_frame.shape[:2][::-1]
|
97 |
+
crop_masks = []
|
98 |
+
|
99 |
+
if 'box' in state_manager.get_item('face_mask_types'):
|
100 |
+
box_mask = create_box_mask(crop_vision_frame, 0, state_manager.get_item('face_mask_padding'))
|
101 |
+
crop_masks.append(box_mask)
|
102 |
+
|
103 |
+
if 'occlusion' in state_manager.get_item('face_mask_types'):
|
104 |
+
occlusion_mask = create_occlusion_mask(crop_vision_frame)
|
105 |
+
crop_masks.append(occlusion_mask)
|
106 |
+
|
107 |
+
if 'area' in state_manager.get_item('face_mask_types'):
|
108 |
+
face_landmark_68 = cv2.transform(target_face.landmark_set.get('68').reshape(1, -1, 2), affine_matrix).reshape(-1, 2)
|
109 |
+
area_mask = create_area_mask(crop_vision_frame, face_landmark_68, state_manager.get_item('face_mask_areas'))
|
110 |
+
crop_masks.append(area_mask)
|
111 |
+
|
112 |
+
if 'region' in state_manager.get_item('face_mask_types'):
|
113 |
+
region_mask = create_region_mask(crop_vision_frame, state_manager.get_item('face_mask_regions'))
|
114 |
+
crop_masks.append(region_mask)
|
115 |
+
|
116 |
+
crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1)
|
117 |
+
crop_mask = (crop_mask * 255).astype(numpy.uint8)
|
118 |
+
inverse_vision_frame = cv2.warpAffine(crop_mask, inverse_matrix, temp_size)
|
119 |
+
inverse_vision_frame = cv2.threshold(inverse_vision_frame, 100, 255, cv2.THRESH_BINARY)[1]
|
120 |
+
inverse_vision_frame[inverse_vision_frame > 0] = 255 #type:ignore[operator]
|
121 |
+
inverse_contours = cv2.findContours(inverse_vision_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0]
|
122 |
+
cv2.drawContours(temp_vision_frame, inverse_contours, -1, tertiary_color if has_face_landmark_5_fallback else secondary_color, 2)
|
123 |
+
|
124 |
+
if 'face-landmark-5' in face_debugger_items and numpy.any(target_face.landmark_set.get('5')):
|
125 |
+
face_landmark_5 = target_face.landmark_set.get('5').astype(numpy.int32)
|
126 |
+
for index in range(face_landmark_5.shape[0]):
|
127 |
+
cv2.circle(temp_vision_frame, (face_landmark_5[index][0], face_landmark_5[index][1]), 3, primary_color, -1)
|
128 |
+
|
129 |
+
if 'face-landmark-5/68' in face_debugger_items and numpy.any(target_face.landmark_set.get('5/68')):
|
130 |
+
face_landmark_5_68 = target_face.landmark_set.get('5/68').astype(numpy.int32)
|
131 |
+
for index in range(face_landmark_5_68.shape[0]):
|
132 |
+
cv2.circle(temp_vision_frame, (face_landmark_5_68[index][0], face_landmark_5_68[index][1]), 3, tertiary_color if has_face_landmark_5_fallback else secondary_color, -1)
|
133 |
+
|
134 |
+
if 'face-landmark-68' in face_debugger_items and numpy.any(target_face.landmark_set.get('68')):
|
135 |
+
face_landmark_68 = target_face.landmark_set.get('68').astype(numpy.int32)
|
136 |
+
for index in range(face_landmark_68.shape[0]):
|
137 |
+
cv2.circle(temp_vision_frame, (face_landmark_68[index][0], face_landmark_68[index][1]), 3, tertiary_color if has_face_landmark_68_fallback else secondary_color, -1)
|
138 |
+
|
139 |
+
if 'face-landmark-68/5' in face_debugger_items and numpy.any(target_face.landmark_set.get('68')):
|
140 |
+
face_landmark_68 = target_face.landmark_set.get('68/5').astype(numpy.int32)
|
141 |
+
for index in range(face_landmark_68.shape[0]):
|
142 |
+
cv2.circle(temp_vision_frame, (face_landmark_68[index][0], face_landmark_68[index][1]), 3, tertiary_color, -1)
|
143 |
+
|
144 |
+
if bounding_box[3] - bounding_box[1] > 50 and bounding_box[2] - bounding_box[0] > 50:
|
145 |
+
top = bounding_box[1]
|
146 |
+
left = bounding_box[0] - 20
|
147 |
+
|
148 |
+
if 'face-detector-score' in face_debugger_items:
|
149 |
+
face_score_text = str(round(target_face.score_set.get('detector'), 2))
|
150 |
+
top = top + 20
|
151 |
+
cv2.putText(temp_vision_frame, face_score_text, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, primary_color, 2)
|
152 |
+
|
153 |
+
if 'face-landmarker-score' in face_debugger_items:
|
154 |
+
face_score_text = str(round(target_face.score_set.get('landmarker'), 2))
|
155 |
+
top = top + 20
|
156 |
+
cv2.putText(temp_vision_frame, face_score_text, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, tertiary_color if has_face_landmark_5_fallback else secondary_color, 2)
|
157 |
+
|
158 |
+
if 'age' in face_debugger_items:
|
159 |
+
face_age_text = str(target_face.age.start) + '-' + str(target_face.age.stop)
|
160 |
+
top = top + 20
|
161 |
+
cv2.putText(temp_vision_frame, face_age_text, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, primary_color, 2)
|
162 |
+
|
163 |
+
if 'gender' in face_debugger_items:
|
164 |
+
face_gender_text = target_face.gender
|
165 |
+
top = top + 20
|
166 |
+
cv2.putText(temp_vision_frame, face_gender_text, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, primary_color, 2)
|
167 |
+
|
168 |
+
if 'race' in face_debugger_items:
|
169 |
+
face_race_text = target_face.race
|
170 |
+
top = top + 20
|
171 |
+
cv2.putText(temp_vision_frame, face_race_text, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, primary_color, 2)
|
172 |
+
|
173 |
+
return temp_vision_frame
|
174 |
+
|
175 |
+
|
176 |
+
def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
177 |
+
pass
|
178 |
+
|
179 |
+
|
180 |
+
def process_frame(inputs : FaceDebuggerInputs) -> VisionFrame:
|
181 |
+
reference_faces = inputs.get('reference_faces')
|
182 |
+
target_vision_frame = inputs.get('target_vision_frame')
|
183 |
+
many_faces = sort_and_filter_faces(get_many_faces([ target_vision_frame ]))
|
184 |
+
|
185 |
+
if state_manager.get_item('face_selector_mode') == 'many':
|
186 |
+
if many_faces:
|
187 |
+
for target_face in many_faces:
|
188 |
+
target_vision_frame = debug_face(target_face, target_vision_frame)
|
189 |
+
if state_manager.get_item('face_selector_mode') == 'one':
|
190 |
+
target_face = get_one_face(many_faces)
|
191 |
+
if target_face:
|
192 |
+
target_vision_frame = debug_face(target_face, target_vision_frame)
|
193 |
+
if state_manager.get_item('face_selector_mode') == 'reference':
|
194 |
+
similar_faces = find_similar_faces(many_faces, reference_faces, state_manager.get_item('reference_face_distance'))
|
195 |
+
if similar_faces:
|
196 |
+
for similar_face in similar_faces:
|
197 |
+
target_vision_frame = debug_face(similar_face, target_vision_frame)
|
198 |
+
return target_vision_frame
|
199 |
+
|
200 |
+
|
201 |
+
def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None:
|
202 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
203 |
+
|
204 |
+
for queue_payload in process_manager.manage(queue_payloads):
|
205 |
+
target_vision_path = queue_payload['frame_path']
|
206 |
+
target_vision_frame = read_image(target_vision_path)
|
207 |
+
output_vision_frame = process_frame(
|
208 |
+
{
|
209 |
+
'reference_faces': reference_faces,
|
210 |
+
'target_vision_frame': target_vision_frame
|
211 |
+
})
|
212 |
+
write_image(target_vision_path, output_vision_frame)
|
213 |
+
update_progress(1)
|
214 |
+
|
215 |
+
|
216 |
+
def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
|
217 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
218 |
+
target_vision_frame = read_static_image(target_path)
|
219 |
+
output_vision_frame = process_frame(
|
220 |
+
{
|
221 |
+
'reference_faces': reference_faces,
|
222 |
+
'target_vision_frame': target_vision_frame
|
223 |
+
})
|
224 |
+
write_image(output_path, output_vision_frame)
|
225 |
+
|
226 |
+
|
227 |
+
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
228 |
+
processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
|
facefusion/processors/modules/face_editor.py
ADDED
@@ -0,0 +1,533 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from argparse import ArgumentParser
|
2 |
+
from functools import lru_cache
|
3 |
+
from typing import List, Tuple
|
4 |
+
|
5 |
+
import cv2
|
6 |
+
import numpy
|
7 |
+
|
8 |
+
import facefusion.jobs.job_manager
|
9 |
+
import facefusion.jobs.job_store
|
10 |
+
import facefusion.processors.core as processors
|
11 |
+
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, video_manager, wording
|
12 |
+
from facefusion.common_helper import create_float_metavar
|
13 |
+
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
|
14 |
+
from facefusion.face_analyser import get_many_faces, get_one_face
|
15 |
+
from facefusion.face_helper import paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5
|
16 |
+
from facefusion.face_masker import create_box_mask
|
17 |
+
from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
|
18 |
+
from facefusion.face_store import get_reference_faces
|
19 |
+
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
|
20 |
+
from facefusion.processors import choices as processors_choices
|
21 |
+
from facefusion.processors.live_portrait import create_rotation, limit_euler_angles, limit_expression
|
22 |
+
from facefusion.processors.types import FaceEditorInputs, LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitRotation, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw
|
23 |
+
from facefusion.program_helper import find_argument_group
|
24 |
+
from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore
|
25 |
+
from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, FaceLandmark68, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
|
26 |
+
from facefusion.vision import read_image, read_static_image, write_image
|
27 |
+
|
28 |
+
|
29 |
+
@lru_cache(maxsize = None)
|
30 |
+
def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
|
31 |
+
return\
|
32 |
+
{
|
33 |
+
'live_portrait':
|
34 |
+
{
|
35 |
+
'hashes':
|
36 |
+
{
|
37 |
+
'feature_extractor':
|
38 |
+
{
|
39 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.hash'),
|
40 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash')
|
41 |
+
},
|
42 |
+
'motion_extractor':
|
43 |
+
{
|
44 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.hash'),
|
45 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash')
|
46 |
+
},
|
47 |
+
'eye_retargeter':
|
48 |
+
{
|
49 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_eye_retargeter.hash'),
|
50 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.hash')
|
51 |
+
},
|
52 |
+
'lip_retargeter':
|
53 |
+
{
|
54 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_lip_retargeter.hash'),
|
55 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.hash')
|
56 |
+
},
|
57 |
+
'stitcher':
|
58 |
+
{
|
59 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_stitcher.hash'),
|
60 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.hash')
|
61 |
+
},
|
62 |
+
'generator':
|
63 |
+
{
|
64 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.hash'),
|
65 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash')
|
66 |
+
}
|
67 |
+
},
|
68 |
+
'sources':
|
69 |
+
{
|
70 |
+
'feature_extractor':
|
71 |
+
{
|
72 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.onnx'),
|
73 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx')
|
74 |
+
},
|
75 |
+
'motion_extractor':
|
76 |
+
{
|
77 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.onnx'),
|
78 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx')
|
79 |
+
},
|
80 |
+
'eye_retargeter':
|
81 |
+
{
|
82 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_eye_retargeter.onnx'),
|
83 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.onnx')
|
84 |
+
},
|
85 |
+
'lip_retargeter':
|
86 |
+
{
|
87 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_lip_retargeter.onnx'),
|
88 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.onnx')
|
89 |
+
},
|
90 |
+
'stitcher':
|
91 |
+
{
|
92 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_stitcher.onnx'),
|
93 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.onnx')
|
94 |
+
},
|
95 |
+
'generator':
|
96 |
+
{
|
97 |
+
'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.onnx'),
|
98 |
+
'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx')
|
99 |
+
}
|
100 |
+
},
|
101 |
+
'template': 'ffhq_512',
|
102 |
+
'size': (512, 512)
|
103 |
+
}
|
104 |
+
}
|
105 |
+
|
106 |
+
|
107 |
+
def get_inference_pool() -> InferencePool:
|
108 |
+
model_names = [ state_manager.get_item('face_editor_model') ]
|
109 |
+
model_source_set = get_model_options().get('sources')
|
110 |
+
|
111 |
+
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
|
112 |
+
|
113 |
+
|
114 |
+
def clear_inference_pool() -> None:
|
115 |
+
model_names = [ state_manager.get_item('face_editor_model') ]
|
116 |
+
inference_manager.clear_inference_pool(__name__, model_names)
|
117 |
+
|
118 |
+
|
119 |
+
def get_model_options() -> ModelOptions:
|
120 |
+
model_name = state_manager.get_item('face_editor_model')
|
121 |
+
return create_static_model_set('full').get(model_name)
|
122 |
+
|
123 |
+
|
124 |
+
def register_args(program : ArgumentParser) -> None:
|
125 |
+
group_processors = find_argument_group(program, 'processors')
|
126 |
+
if group_processors:
|
127 |
+
group_processors.add_argument('--face-editor-model', help = wording.get('help.face_editor_model'), default = config.get_str_value('processors', 'face_editor_model', 'live_portrait'), choices = processors_choices.face_editor_models)
|
128 |
+
group_processors.add_argument('--face-editor-eyebrow-direction', help = wording.get('help.face_editor_eyebrow_direction'), type = float, default = config.get_float_value('processors', 'face_editor_eyebrow_direction', '0'), choices = processors_choices.face_editor_eyebrow_direction_range, metavar = create_float_metavar(processors_choices.face_editor_eyebrow_direction_range))
|
129 |
+
group_processors.add_argument('--face-editor-eye-gaze-horizontal', help = wording.get('help.face_editor_eye_gaze_horizontal'), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_horizontal', '0'), choices = processors_choices.face_editor_eye_gaze_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_horizontal_range))
|
130 |
+
group_processors.add_argument('--face-editor-eye-gaze-vertical', help = wording.get('help.face_editor_eye_gaze_vertical'), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_vertical', '0'), choices = processors_choices.face_editor_eye_gaze_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_vertical_range))
|
131 |
+
group_processors.add_argument('--face-editor-eye-open-ratio', help = wording.get('help.face_editor_eye_open_ratio'), type = float, default = config.get_float_value('processors', 'face_editor_eye_open_ratio', '0'), choices = processors_choices.face_editor_eye_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_eye_open_ratio_range))
|
132 |
+
group_processors.add_argument('--face-editor-lip-open-ratio', help = wording.get('help.face_editor_lip_open_ratio'), type = float, default = config.get_float_value('processors', 'face_editor_lip_open_ratio', '0'), choices = processors_choices.face_editor_lip_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_lip_open_ratio_range))
|
133 |
+
group_processors.add_argument('--face-editor-mouth-grim', help = wording.get('help.face_editor_mouth_grim'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_grim', '0'), choices = processors_choices.face_editor_mouth_grim_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_grim_range))
|
134 |
+
group_processors.add_argument('--face-editor-mouth-pout', help = wording.get('help.face_editor_mouth_pout'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_pout', '0'), choices = processors_choices.face_editor_mouth_pout_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_pout_range))
|
135 |
+
group_processors.add_argument('--face-editor-mouth-purse', help = wording.get('help.face_editor_mouth_purse'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_purse', '0'), choices = processors_choices.face_editor_mouth_purse_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_purse_range))
|
136 |
+
group_processors.add_argument('--face-editor-mouth-smile', help = wording.get('help.face_editor_mouth_smile'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_smile', '0'), choices = processors_choices.face_editor_mouth_smile_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_smile_range))
|
137 |
+
group_processors.add_argument('--face-editor-mouth-position-horizontal', help = wording.get('help.face_editor_mouth_position_horizontal'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_horizontal', '0'), choices = processors_choices.face_editor_mouth_position_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_horizontal_range))
|
138 |
+
group_processors.add_argument('--face-editor-mouth-position-vertical', help = wording.get('help.face_editor_mouth_position_vertical'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_vertical', '0'), choices = processors_choices.face_editor_mouth_position_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_vertical_range))
|
139 |
+
group_processors.add_argument('--face-editor-head-pitch', help = wording.get('help.face_editor_head_pitch'), type = float, default = config.get_float_value('processors', 'face_editor_head_pitch', '0'), choices = processors_choices.face_editor_head_pitch_range, metavar = create_float_metavar(processors_choices.face_editor_head_pitch_range))
|
140 |
+
group_processors.add_argument('--face-editor-head-yaw', help = wording.get('help.face_editor_head_yaw'), type = float, default = config.get_float_value('processors', 'face_editor_head_yaw', '0'), choices = processors_choices.face_editor_head_yaw_range, metavar = create_float_metavar(processors_choices.face_editor_head_yaw_range))
|
141 |
+
group_processors.add_argument('--face-editor-head-roll', help = wording.get('help.face_editor_head_roll'), type = float, default = config.get_float_value('processors', 'face_editor_head_roll', '0'), choices = processors_choices.face_editor_head_roll_range, metavar = create_float_metavar(processors_choices.face_editor_head_roll_range))
|
142 |
+
facefusion.jobs.job_store.register_step_keys([ 'face_editor_model', 'face_editor_eyebrow_direction', 'face_editor_eye_gaze_horizontal', 'face_editor_eye_gaze_vertical', 'face_editor_eye_open_ratio', 'face_editor_lip_open_ratio', 'face_editor_mouth_grim', 'face_editor_mouth_pout', 'face_editor_mouth_purse', 'face_editor_mouth_smile', 'face_editor_mouth_position_horizontal', 'face_editor_mouth_position_vertical', 'face_editor_head_pitch', 'face_editor_head_yaw', 'face_editor_head_roll' ])
|
143 |
+
|
144 |
+
|
145 |
+
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
|
146 |
+
apply_state_item('face_editor_model', args.get('face_editor_model'))
|
147 |
+
apply_state_item('face_editor_eyebrow_direction', args.get('face_editor_eyebrow_direction'))
|
148 |
+
apply_state_item('face_editor_eye_gaze_horizontal', args.get('face_editor_eye_gaze_horizontal'))
|
149 |
+
apply_state_item('face_editor_eye_gaze_vertical', args.get('face_editor_eye_gaze_vertical'))
|
150 |
+
apply_state_item('face_editor_eye_open_ratio', args.get('face_editor_eye_open_ratio'))
|
151 |
+
apply_state_item('face_editor_lip_open_ratio', args.get('face_editor_lip_open_ratio'))
|
152 |
+
apply_state_item('face_editor_mouth_grim', args.get('face_editor_mouth_grim'))
|
153 |
+
apply_state_item('face_editor_mouth_pout', args.get('face_editor_mouth_pout'))
|
154 |
+
apply_state_item('face_editor_mouth_purse', args.get('face_editor_mouth_purse'))
|
155 |
+
apply_state_item('face_editor_mouth_smile', args.get('face_editor_mouth_smile'))
|
156 |
+
apply_state_item('face_editor_mouth_position_horizontal', args.get('face_editor_mouth_position_horizontal'))
|
157 |
+
apply_state_item('face_editor_mouth_position_vertical', args.get('face_editor_mouth_position_vertical'))
|
158 |
+
apply_state_item('face_editor_head_pitch', args.get('face_editor_head_pitch'))
|
159 |
+
apply_state_item('face_editor_head_yaw', args.get('face_editor_head_yaw'))
|
160 |
+
apply_state_item('face_editor_head_roll', args.get('face_editor_head_roll'))
|
161 |
+
|
162 |
+
|
163 |
+
def pre_check() -> bool:
|
164 |
+
model_hash_set = get_model_options().get('hashes')
|
165 |
+
model_source_set = get_model_options().get('sources')
|
166 |
+
|
167 |
+
return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
|
168 |
+
|
169 |
+
|
170 |
+
def pre_process(mode : ProcessMode) -> bool:
|
171 |
+
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
|
172 |
+
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
|
173 |
+
return False
|
174 |
+
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
|
175 |
+
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
|
176 |
+
return False
|
177 |
+
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
|
178 |
+
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
|
179 |
+
return False
|
180 |
+
return True
|
181 |
+
|
182 |
+
|
183 |
+
def post_process() -> None:
|
184 |
+
read_static_image.cache_clear()
|
185 |
+
video_manager.clear_video_pool()
|
186 |
+
if state_manager.get_item('video_memory_strategy') in [ 'strict', 'moderate' ]:
|
187 |
+
clear_inference_pool()
|
188 |
+
if state_manager.get_item('video_memory_strategy') == 'strict':
|
189 |
+
content_analyser.clear_inference_pool()
|
190 |
+
face_classifier.clear_inference_pool()
|
191 |
+
face_detector.clear_inference_pool()
|
192 |
+
face_landmarker.clear_inference_pool()
|
193 |
+
face_masker.clear_inference_pool()
|
194 |
+
face_recognizer.clear_inference_pool()
|
195 |
+
|
196 |
+
|
197 |
+
def edit_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
198 |
+
model_template = get_model_options().get('template')
|
199 |
+
model_size = get_model_options().get('size')
|
200 |
+
face_landmark_5 = scale_face_landmark_5(target_face.landmark_set.get('5/68'), 1.5)
|
201 |
+
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_template, model_size)
|
202 |
+
box_mask = create_box_mask(crop_vision_frame, state_manager.get_item('face_mask_blur'), (0, 0, 0, 0))
|
203 |
+
crop_vision_frame = prepare_crop_frame(crop_vision_frame)
|
204 |
+
crop_vision_frame = apply_edit(crop_vision_frame, target_face.landmark_set.get('68'))
|
205 |
+
crop_vision_frame = normalize_crop_frame(crop_vision_frame)
|
206 |
+
temp_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, box_mask, affine_matrix)
|
207 |
+
return temp_vision_frame
|
208 |
+
|
209 |
+
|
210 |
+
def apply_edit(crop_vision_frame : VisionFrame, face_landmark_68 : FaceLandmark68) -> VisionFrame:
|
211 |
+
feature_volume = forward_extract_feature(crop_vision_frame)
|
212 |
+
pitch, yaw, roll, scale, translation, expression, motion_points = forward_extract_motion(crop_vision_frame)
|
213 |
+
rotation = create_rotation(pitch, yaw, roll)
|
214 |
+
motion_points_target = scale * (motion_points @ rotation.T + expression) + translation
|
215 |
+
expression = edit_eye_gaze(expression)
|
216 |
+
expression = edit_mouth_grim(expression)
|
217 |
+
expression = edit_mouth_position(expression)
|
218 |
+
expression = edit_mouth_pout(expression)
|
219 |
+
expression = edit_mouth_purse(expression)
|
220 |
+
expression = edit_mouth_smile(expression)
|
221 |
+
expression = edit_eyebrow_direction(expression)
|
222 |
+
expression = limit_expression(expression)
|
223 |
+
rotation = edit_head_rotation(pitch, yaw, roll)
|
224 |
+
motion_points_source = motion_points @ rotation.T
|
225 |
+
motion_points_source += expression
|
226 |
+
motion_points_source *= scale
|
227 |
+
motion_points_source += translation
|
228 |
+
motion_points_source += edit_eye_open(motion_points_target, face_landmark_68)
|
229 |
+
motion_points_source += edit_lip_open(motion_points_target, face_landmark_68)
|
230 |
+
motion_points_source = forward_stitch_motion_points(motion_points_source, motion_points_target)
|
231 |
+
crop_vision_frame = forward_generate_frame(feature_volume, motion_points_source, motion_points_target)
|
232 |
+
return crop_vision_frame
|
233 |
+
|
234 |
+
|
235 |
+
def forward_extract_feature(crop_vision_frame : VisionFrame) -> LivePortraitFeatureVolume:
|
236 |
+
feature_extractor = get_inference_pool().get('feature_extractor')
|
237 |
+
|
238 |
+
with conditional_thread_semaphore():
|
239 |
+
feature_volume = feature_extractor.run(None,
|
240 |
+
{
|
241 |
+
'input': crop_vision_frame
|
242 |
+
})[0]
|
243 |
+
|
244 |
+
return feature_volume
|
245 |
+
|
246 |
+
|
247 |
+
def forward_extract_motion(crop_vision_frame : VisionFrame) -> Tuple[LivePortraitPitch, LivePortraitYaw, LivePortraitRoll, LivePortraitScale, LivePortraitTranslation, LivePortraitExpression, LivePortraitMotionPoints]:
|
248 |
+
motion_extractor = get_inference_pool().get('motion_extractor')
|
249 |
+
|
250 |
+
with conditional_thread_semaphore():
|
251 |
+
pitch, yaw, roll, scale, translation, expression, motion_points = motion_extractor.run(None,
|
252 |
+
{
|
253 |
+
'input': crop_vision_frame
|
254 |
+
})
|
255 |
+
|
256 |
+
return pitch, yaw, roll, scale, translation, expression, motion_points
|
257 |
+
|
258 |
+
|
259 |
+
def forward_retarget_eye(eye_motion_points : LivePortraitMotionPoints) -> LivePortraitMotionPoints:
|
260 |
+
eye_retargeter = get_inference_pool().get('eye_retargeter')
|
261 |
+
|
262 |
+
with conditional_thread_semaphore():
|
263 |
+
eye_motion_points = eye_retargeter.run(None,
|
264 |
+
{
|
265 |
+
'input': eye_motion_points
|
266 |
+
})[0]
|
267 |
+
|
268 |
+
return eye_motion_points
|
269 |
+
|
270 |
+
|
271 |
+
def forward_retarget_lip(lip_motion_points : LivePortraitMotionPoints) -> LivePortraitMotionPoints:
|
272 |
+
lip_retargeter = get_inference_pool().get('lip_retargeter')
|
273 |
+
|
274 |
+
with conditional_thread_semaphore():
|
275 |
+
lip_motion_points = lip_retargeter.run(None,
|
276 |
+
{
|
277 |
+
'input': lip_motion_points
|
278 |
+
})[0]
|
279 |
+
|
280 |
+
return lip_motion_points
|
281 |
+
|
282 |
+
|
283 |
+
def forward_stitch_motion_points(source_motion_points : LivePortraitMotionPoints, target_motion_points : LivePortraitMotionPoints) -> LivePortraitMotionPoints:
|
284 |
+
stitcher = get_inference_pool().get('stitcher')
|
285 |
+
|
286 |
+
with thread_semaphore():
|
287 |
+
motion_points = stitcher.run(None,
|
288 |
+
{
|
289 |
+
'source': source_motion_points,
|
290 |
+
'target': target_motion_points
|
291 |
+
})[0]
|
292 |
+
|
293 |
+
return motion_points
|
294 |
+
|
295 |
+
|
296 |
+
def forward_generate_frame(feature_volume : LivePortraitFeatureVolume, source_motion_points : LivePortraitMotionPoints, target_motion_points : LivePortraitMotionPoints) -> VisionFrame:
|
297 |
+
generator = get_inference_pool().get('generator')
|
298 |
+
|
299 |
+
with thread_semaphore():
|
300 |
+
crop_vision_frame = generator.run(None,
|
301 |
+
{
|
302 |
+
'feature_volume': feature_volume,
|
303 |
+
'source': source_motion_points,
|
304 |
+
'target': target_motion_points
|
305 |
+
})[0][0]
|
306 |
+
|
307 |
+
return crop_vision_frame
|
308 |
+
|
309 |
+
|
310 |
+
def edit_eyebrow_direction(expression : LivePortraitExpression) -> LivePortraitExpression:
|
311 |
+
face_editor_eyebrow = state_manager.get_item('face_editor_eyebrow_direction')
|
312 |
+
|
313 |
+
if face_editor_eyebrow > 0:
|
314 |
+
expression[0, 1, 1] += numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.015, 0.015 ])
|
315 |
+
expression[0, 2, 1] -= numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.020, 0.020 ])
|
316 |
+
else:
|
317 |
+
expression[0, 1, 0] -= numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.015, 0.015 ])
|
318 |
+
expression[0, 2, 0] += numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.020, 0.020 ])
|
319 |
+
expression[0, 1, 1] += numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.005, 0.005 ])
|
320 |
+
expression[0, 2, 1] -= numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.005, 0.005 ])
|
321 |
+
return expression
|
322 |
+
|
323 |
+
|
324 |
+
def edit_eye_gaze(expression : LivePortraitExpression) -> LivePortraitExpression:
|
325 |
+
face_editor_eye_gaze_horizontal = state_manager.get_item('face_editor_eye_gaze_horizontal')
|
326 |
+
face_editor_eye_gaze_vertical = state_manager.get_item('face_editor_eye_gaze_vertical')
|
327 |
+
|
328 |
+
if face_editor_eye_gaze_horizontal > 0:
|
329 |
+
expression[0, 11, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.015, 0.015 ])
|
330 |
+
expression[0, 15, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.020, 0.020 ])
|
331 |
+
else:
|
332 |
+
expression[0, 11, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.020, 0.020 ])
|
333 |
+
expression[0, 15, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.015, 0.015 ])
|
334 |
+
expression[0, 1, 1] += numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.0025, 0.0025 ])
|
335 |
+
expression[0, 2, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.0025, 0.0025 ])
|
336 |
+
expression[0, 11, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.010, 0.010 ])
|
337 |
+
expression[0, 13, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.005, 0.005 ])
|
338 |
+
expression[0, 15, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.010, 0.010 ])
|
339 |
+
expression[0, 16, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.005, 0.005 ])
|
340 |
+
return expression
|
341 |
+
|
342 |
+
|
343 |
+
def edit_eye_open(motion_points : LivePortraitMotionPoints, face_landmark_68 : FaceLandmark68) -> LivePortraitMotionPoints:
|
344 |
+
face_editor_eye_open_ratio = state_manager.get_item('face_editor_eye_open_ratio')
|
345 |
+
left_eye_ratio = calc_distance_ratio(face_landmark_68, 37, 40, 39, 36)
|
346 |
+
right_eye_ratio = calc_distance_ratio(face_landmark_68, 43, 46, 45, 42)
|
347 |
+
|
348 |
+
if face_editor_eye_open_ratio < 0:
|
349 |
+
eye_motion_points = numpy.concatenate([ motion_points.ravel(), [ left_eye_ratio, right_eye_ratio, 0.0 ] ])
|
350 |
+
else:
|
351 |
+
eye_motion_points = numpy.concatenate([ motion_points.ravel(), [ left_eye_ratio, right_eye_ratio, 0.6 ] ])
|
352 |
+
eye_motion_points = eye_motion_points.reshape(1, -1).astype(numpy.float32)
|
353 |
+
eye_motion_points = forward_retarget_eye(eye_motion_points) * numpy.abs(face_editor_eye_open_ratio)
|
354 |
+
eye_motion_points = eye_motion_points.reshape(-1, 21, 3)
|
355 |
+
return eye_motion_points
|
356 |
+
|
357 |
+
|
358 |
+
def edit_lip_open(motion_points : LivePortraitMotionPoints, face_landmark_68 : FaceLandmark68) -> LivePortraitMotionPoints:
|
359 |
+
face_editor_lip_open_ratio = state_manager.get_item('face_editor_lip_open_ratio')
|
360 |
+
lip_ratio = calc_distance_ratio(face_landmark_68, 62, 66, 54, 48)
|
361 |
+
|
362 |
+
if face_editor_lip_open_ratio < 0:
|
363 |
+
lip_motion_points = numpy.concatenate([ motion_points.ravel(), [ lip_ratio, 0.0 ] ])
|
364 |
+
else:
|
365 |
+
lip_motion_points = numpy.concatenate([ motion_points.ravel(), [ lip_ratio, 1.0 ] ])
|
366 |
+
lip_motion_points = lip_motion_points.reshape(1, -1).astype(numpy.float32)
|
367 |
+
lip_motion_points = forward_retarget_lip(lip_motion_points) * numpy.abs(face_editor_lip_open_ratio)
|
368 |
+
lip_motion_points = lip_motion_points.reshape(-1, 21, 3)
|
369 |
+
return lip_motion_points
|
370 |
+
|
371 |
+
|
372 |
+
def edit_mouth_grim(expression : LivePortraitExpression) -> LivePortraitExpression:
|
373 |
+
face_editor_mouth_grim = state_manager.get_item('face_editor_mouth_grim')
|
374 |
+
if face_editor_mouth_grim > 0:
|
375 |
+
expression[0, 17, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.005, 0.005 ])
|
376 |
+
expression[0, 19, 2] += numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.01, 0.01 ])
|
377 |
+
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.06, 0.06 ])
|
378 |
+
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.03, 0.03 ])
|
379 |
+
else:
|
380 |
+
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.05, 0.05 ])
|
381 |
+
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.02, 0.02 ])
|
382 |
+
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.03, 0.03 ])
|
383 |
+
return expression
|
384 |
+
|
385 |
+
|
386 |
+
def edit_mouth_position(expression : LivePortraitExpression) -> LivePortraitExpression:
|
387 |
+
face_editor_mouth_position_horizontal = state_manager.get_item('face_editor_mouth_position_horizontal')
|
388 |
+
face_editor_mouth_position_vertical = state_manager.get_item('face_editor_mouth_position_vertical')
|
389 |
+
expression[0, 19, 0] += numpy.interp(face_editor_mouth_position_horizontal, [ -1, 1 ], [ -0.05, 0.05 ])
|
390 |
+
expression[0, 20, 0] += numpy.interp(face_editor_mouth_position_horizontal, [ -1, 1 ], [ -0.04, 0.04 ])
|
391 |
+
if face_editor_mouth_position_vertical > 0:
|
392 |
+
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.04, 0.04 ])
|
393 |
+
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.02, 0.02 ])
|
394 |
+
else:
|
395 |
+
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.05, 0.05 ])
|
396 |
+
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.04, 0.04 ])
|
397 |
+
return expression
|
398 |
+
|
399 |
+
|
400 |
+
def edit_mouth_pout(expression : LivePortraitExpression) -> LivePortraitExpression:
|
401 |
+
face_editor_mouth_pout = state_manager.get_item('face_editor_mouth_pout')
|
402 |
+
if face_editor_mouth_pout > 0:
|
403 |
+
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.022, 0.022 ])
|
404 |
+
expression[0, 19, 2] += numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.025, 0.025 ])
|
405 |
+
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.002, 0.002 ])
|
406 |
+
else:
|
407 |
+
expression[0, 19, 1] += numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.022, 0.022 ])
|
408 |
+
expression[0, 19, 2] += numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.025, 0.025 ])
|
409 |
+
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.002, 0.002 ])
|
410 |
+
return expression
|
411 |
+
|
412 |
+
|
413 |
+
def edit_mouth_purse(expression : LivePortraitExpression) -> LivePortraitExpression:
|
414 |
+
face_editor_mouth_purse = state_manager.get_item('face_editor_mouth_purse')
|
415 |
+
if face_editor_mouth_purse > 0:
|
416 |
+
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.04, 0.04 ])
|
417 |
+
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.02, 0.02 ])
|
418 |
+
else:
|
419 |
+
expression[0, 14, 1] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.02, 0.02 ])
|
420 |
+
expression[0, 17, 2] += numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.01, 0.01 ])
|
421 |
+
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.015, 0.015 ])
|
422 |
+
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.002, 0.002 ])
|
423 |
+
return expression
|
424 |
+
|
425 |
+
|
426 |
+
def edit_mouth_smile(expression : LivePortraitExpression) -> LivePortraitExpression:
|
427 |
+
face_editor_mouth_smile = state_manager.get_item('face_editor_mouth_smile')
|
428 |
+
if face_editor_mouth_smile > 0:
|
429 |
+
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.015, 0.015 ])
|
430 |
+
expression[0, 14, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.025, 0.025 ])
|
431 |
+
expression[0, 17, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.01, 0.01 ])
|
432 |
+
expression[0, 17, 2] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.004, 0.004 ])
|
433 |
+
expression[0, 3, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
434 |
+
expression[0, 7, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
435 |
+
else:
|
436 |
+
expression[0, 14, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.02, 0.02 ])
|
437 |
+
expression[0, 17, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.003, 0.003 ])
|
438 |
+
expression[0, 19, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.02, 0.02 ])
|
439 |
+
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.005, 0.005 ])
|
440 |
+
expression[0, 20, 2] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.01, 0.01 ])
|
441 |
+
expression[0, 3, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
442 |
+
expression[0, 7, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
443 |
+
return expression
|
444 |
+
|
445 |
+
|
446 |
+
def edit_head_rotation(pitch : LivePortraitPitch, yaw : LivePortraitYaw, roll : LivePortraitRoll) -> LivePortraitRotation:
|
447 |
+
face_editor_head_pitch = state_manager.get_item('face_editor_head_pitch')
|
448 |
+
face_editor_head_yaw = state_manager.get_item('face_editor_head_yaw')
|
449 |
+
face_editor_head_roll = state_manager.get_item('face_editor_head_roll')
|
450 |
+
edit_pitch = pitch + float(numpy.interp(face_editor_head_pitch, [ -1, 1 ], [ 20, -20 ]))
|
451 |
+
edit_yaw = yaw + float(numpy.interp(face_editor_head_yaw, [ -1, 1 ], [ 60, -60 ]))
|
452 |
+
edit_roll = roll + float(numpy.interp(face_editor_head_roll, [ -1, 1 ], [ -15, 15 ]))
|
453 |
+
edit_pitch, edit_yaw, edit_roll = limit_euler_angles(pitch, yaw, roll, edit_pitch, edit_yaw, edit_roll)
|
454 |
+
rotation = create_rotation(edit_pitch, edit_yaw, edit_roll)
|
455 |
+
return rotation
|
456 |
+
|
457 |
+
|
458 |
+
def calc_distance_ratio(face_landmark_68 : FaceLandmark68, top_index : int, bottom_index : int, left_index : int, right_index : int) -> float:
|
459 |
+
vertical_direction = face_landmark_68[top_index] - face_landmark_68[bottom_index]
|
460 |
+
horizontal_direction = face_landmark_68[left_index] - face_landmark_68[right_index]
|
461 |
+
distance_ratio = float(numpy.linalg.norm(vertical_direction) / (numpy.linalg.norm(horizontal_direction) + 1e-6))
|
462 |
+
return distance_ratio
|
463 |
+
|
464 |
+
|
465 |
+
def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
466 |
+
model_size = get_model_options().get('size')
|
467 |
+
prepare_size = (model_size[0] // 2, model_size[1] // 2)
|
468 |
+
crop_vision_frame = cv2.resize(crop_vision_frame, prepare_size, interpolation = cv2.INTER_AREA)
|
469 |
+
crop_vision_frame = crop_vision_frame[:, :, ::-1] / 255.0
|
470 |
+
crop_vision_frame = numpy.expand_dims(crop_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
|
471 |
+
return crop_vision_frame
|
472 |
+
|
473 |
+
|
474 |
+
def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
475 |
+
crop_vision_frame = crop_vision_frame.transpose(1, 2, 0).clip(0, 1)
|
476 |
+
crop_vision_frame = (crop_vision_frame * 255.0)
|
477 |
+
crop_vision_frame = crop_vision_frame.astype(numpy.uint8)[:, :, ::-1]
|
478 |
+
return crop_vision_frame
|
479 |
+
|
480 |
+
|
481 |
+
def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
482 |
+
pass
|
483 |
+
|
484 |
+
|
485 |
+
def process_frame(inputs : FaceEditorInputs) -> VisionFrame:
|
486 |
+
reference_faces = inputs.get('reference_faces')
|
487 |
+
target_vision_frame = inputs.get('target_vision_frame')
|
488 |
+
many_faces = sort_and_filter_faces(get_many_faces([ target_vision_frame ]))
|
489 |
+
|
490 |
+
if state_manager.get_item('face_selector_mode') == 'many':
|
491 |
+
if many_faces:
|
492 |
+
for target_face in many_faces:
|
493 |
+
target_vision_frame = edit_face(target_face, target_vision_frame)
|
494 |
+
if state_manager.get_item('face_selector_mode') == 'one':
|
495 |
+
target_face = get_one_face(many_faces)
|
496 |
+
if target_face:
|
497 |
+
target_vision_frame = edit_face(target_face, target_vision_frame)
|
498 |
+
if state_manager.get_item('face_selector_mode') == 'reference':
|
499 |
+
similar_faces = find_similar_faces(many_faces, reference_faces, state_manager.get_item('reference_face_distance'))
|
500 |
+
if similar_faces:
|
501 |
+
for similar_face in similar_faces:
|
502 |
+
target_vision_frame = edit_face(similar_face, target_vision_frame)
|
503 |
+
return target_vision_frame
|
504 |
+
|
505 |
+
|
506 |
+
def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None:
|
507 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
508 |
+
|
509 |
+
for queue_payload in process_manager.manage(queue_payloads):
|
510 |
+
target_vision_path = queue_payload['frame_path']
|
511 |
+
target_vision_frame = read_image(target_vision_path)
|
512 |
+
output_vision_frame = process_frame(
|
513 |
+
{
|
514 |
+
'reference_faces': reference_faces,
|
515 |
+
'target_vision_frame': target_vision_frame
|
516 |
+
})
|
517 |
+
write_image(target_vision_path, output_vision_frame)
|
518 |
+
update_progress(1)
|
519 |
+
|
520 |
+
|
521 |
+
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
522 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
523 |
+
target_vision_frame = read_static_image(target_path)
|
524 |
+
output_vision_frame = process_frame(
|
525 |
+
{
|
526 |
+
'reference_faces': reference_faces,
|
527 |
+
'target_vision_frame': target_vision_frame
|
528 |
+
})
|
529 |
+
write_image(output_path, output_vision_frame)
|
530 |
+
|
531 |
+
|
532 |
+
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
533 |
+
processors.multi_process_frames(None, temp_frame_paths, process_frames)
|
facefusion/processors/modules/lip_syncer.py
ADDED
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from argparse import ArgumentParser
|
2 |
+
from functools import lru_cache
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
import cv2
|
6 |
+
import numpy
|
7 |
+
|
8 |
+
import facefusion.jobs.job_manager
|
9 |
+
import facefusion.jobs.job_store
|
10 |
+
import facefusion.processors.core as processors
|
11 |
+
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, video_manager, voice_extractor, wording
|
12 |
+
from facefusion.audio import create_empty_audio_frame, get_voice_frame, read_static_voice
|
13 |
+
from facefusion.common_helper import create_float_metavar
|
14 |
+
from facefusion.common_helper import get_first
|
15 |
+
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
|
16 |
+
from facefusion.face_analyser import get_many_faces, get_one_face
|
17 |
+
from facefusion.face_helper import create_bounding_box, paste_back, warp_face_by_bounding_box, warp_face_by_face_landmark_5
|
18 |
+
from facefusion.face_masker import create_area_mask, create_box_mask, create_occlusion_mask
|
19 |
+
from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
|
20 |
+
from facefusion.face_store import get_reference_faces
|
21 |
+
from facefusion.filesystem import filter_audio_paths, has_audio, in_directory, is_image, is_video, resolve_relative_path, same_file_extension
|
22 |
+
from facefusion.processors import choices as processors_choices
|
23 |
+
from facefusion.processors.types import LipSyncerInputs, LipSyncerWeight
|
24 |
+
from facefusion.program_helper import find_argument_group
|
25 |
+
from facefusion.thread_helper import conditional_thread_semaphore
|
26 |
+
from facefusion.types import ApplyStateItem, Args, AudioFrame, BoundingBox, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
|
27 |
+
from facefusion.vision import read_image, read_static_image, restrict_video_fps, write_image
|
28 |
+
|
29 |
+
|
30 |
+
@lru_cache(maxsize = None)
|
31 |
+
def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
|
32 |
+
return\
|
33 |
+
{
|
34 |
+
'edtalk_256':
|
35 |
+
{
|
36 |
+
'hashes':
|
37 |
+
{
|
38 |
+
'lip_syncer':
|
39 |
+
{
|
40 |
+
'url': resolve_download_url('models-3.3.0', 'edtalk_256.hash'),
|
41 |
+
'path': resolve_relative_path('../.assets/models/edtalk_256.hash')
|
42 |
+
}
|
43 |
+
},
|
44 |
+
'sources':
|
45 |
+
{
|
46 |
+
'lip_syncer':
|
47 |
+
{
|
48 |
+
'url': resolve_download_url('models-3.3.0', 'edtalk_256.onnx'),
|
49 |
+
'path': resolve_relative_path('../.assets/models/edtalk_256.onnx')
|
50 |
+
}
|
51 |
+
},
|
52 |
+
'type': 'edtalk',
|
53 |
+
'size': (256, 256)
|
54 |
+
},
|
55 |
+
'wav2lip_96':
|
56 |
+
{
|
57 |
+
'hashes':
|
58 |
+
{
|
59 |
+
'lip_syncer':
|
60 |
+
{
|
61 |
+
'url': resolve_download_url('models-3.0.0', 'wav2lip_96.hash'),
|
62 |
+
'path': resolve_relative_path('../.assets/models/wav2lip_96.hash')
|
63 |
+
}
|
64 |
+
},
|
65 |
+
'sources':
|
66 |
+
{
|
67 |
+
'lip_syncer':
|
68 |
+
{
|
69 |
+
'url': resolve_download_url('models-3.0.0', 'wav2lip_96.onnx'),
|
70 |
+
'path': resolve_relative_path('../.assets/models/wav2lip_96.onnx')
|
71 |
+
}
|
72 |
+
},
|
73 |
+
'type': 'wav2lip',
|
74 |
+
'size': (96, 96)
|
75 |
+
},
|
76 |
+
'wav2lip_gan_96':
|
77 |
+
{
|
78 |
+
'hashes':
|
79 |
+
{
|
80 |
+
'lip_syncer':
|
81 |
+
{
|
82 |
+
'url': resolve_download_url('models-3.0.0', 'wav2lip_gan_96.hash'),
|
83 |
+
'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.hash')
|
84 |
+
}
|
85 |
+
},
|
86 |
+
'sources':
|
87 |
+
{
|
88 |
+
'lip_syncer':
|
89 |
+
{
|
90 |
+
'url': resolve_download_url('models-3.0.0', 'wav2lip_gan_96.onnx'),
|
91 |
+
'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.onnx')
|
92 |
+
}
|
93 |
+
},
|
94 |
+
'type': 'wav2lip',
|
95 |
+
'size': (96, 96)
|
96 |
+
}
|
97 |
+
}
|
98 |
+
|
99 |
+
|
100 |
+
def get_inference_pool() -> InferencePool:
|
101 |
+
model_names = [ state_manager.get_item('lip_syncer_model') ]
|
102 |
+
model_source_set = get_model_options().get('sources')
|
103 |
+
|
104 |
+
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
|
105 |
+
|
106 |
+
|
107 |
+
def clear_inference_pool() -> None:
|
108 |
+
model_names = [ state_manager.get_item('lip_syncer_model') ]
|
109 |
+
inference_manager.clear_inference_pool(__name__, model_names)
|
110 |
+
|
111 |
+
|
112 |
+
def get_model_options() -> ModelOptions:
|
113 |
+
model_name = state_manager.get_item('lip_syncer_model')
|
114 |
+
return create_static_model_set('full').get(model_name)
|
115 |
+
|
116 |
+
|
117 |
+
def register_args(program : ArgumentParser) -> None:
|
118 |
+
group_processors = find_argument_group(program, 'processors')
|
119 |
+
if group_processors:
|
120 |
+
group_processors.add_argument('--lip-syncer-model', help = wording.get('help.lip_syncer_model'), default = config.get_str_value('processors', 'lip_syncer_model', 'wav2lip_gan_96'), choices = processors_choices.lip_syncer_models)
|
121 |
+
group_processors.add_argument('--lip-syncer-weight', help = wording.get('help.lip_syncer_weight'), type = float, default = config.get_float_value('processors', 'lip_syncer_weight', '0.5'), choices = processors_choices.lip_syncer_weight_range, metavar = create_float_metavar(processors_choices.lip_syncer_weight_range))
|
122 |
+
facefusion.jobs.job_store.register_step_keys([ 'lip_syncer_model', 'lip_syncer_weight' ])
|
123 |
+
|
124 |
+
|
125 |
+
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
|
126 |
+
apply_state_item('lip_syncer_model', args.get('lip_syncer_model'))
|
127 |
+
apply_state_item('lip_syncer_weight', args.get('lip_syncer_weight'))
|
128 |
+
|
129 |
+
|
130 |
+
def pre_check() -> bool:
|
131 |
+
model_hash_set = get_model_options().get('hashes')
|
132 |
+
model_source_set = get_model_options().get('sources')
|
133 |
+
|
134 |
+
return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
|
135 |
+
|
136 |
+
|
137 |
+
def pre_process(mode : ProcessMode) -> bool:
|
138 |
+
if not has_audio(state_manager.get_item('source_paths')):
|
139 |
+
logger.error(wording.get('choose_audio_source') + wording.get('exclamation_mark'), __name__)
|
140 |
+
return False
|
141 |
+
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
|
142 |
+
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
|
143 |
+
return False
|
144 |
+
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
|
145 |
+
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
|
146 |
+
return False
|
147 |
+
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
|
148 |
+
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
|
149 |
+
return False
|
150 |
+
return True
|
151 |
+
|
152 |
+
|
153 |
+
def post_process() -> None:
|
154 |
+
read_static_image.cache_clear()
|
155 |
+
read_static_voice.cache_clear()
|
156 |
+
video_manager.clear_video_pool()
|
157 |
+
if state_manager.get_item('video_memory_strategy') in [ 'strict', 'moderate' ]:
|
158 |
+
clear_inference_pool()
|
159 |
+
if state_manager.get_item('video_memory_strategy') == 'strict':
|
160 |
+
content_analyser.clear_inference_pool()
|
161 |
+
face_classifier.clear_inference_pool()
|
162 |
+
face_detector.clear_inference_pool()
|
163 |
+
face_landmarker.clear_inference_pool()
|
164 |
+
face_masker.clear_inference_pool()
|
165 |
+
face_recognizer.clear_inference_pool()
|
166 |
+
voice_extractor.clear_inference_pool()
|
167 |
+
|
168 |
+
|
169 |
+
def sync_lip(target_face : Face, temp_audio_frame : AudioFrame, temp_vision_frame : VisionFrame) -> VisionFrame:
|
170 |
+
model_type = get_model_options().get('type')
|
171 |
+
model_size = get_model_options().get('size')
|
172 |
+
temp_audio_frame = prepare_audio_frame(temp_audio_frame)
|
173 |
+
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmark_set.get('5/68'), 'ffhq_512', (512, 512))
|
174 |
+
crop_masks = []
|
175 |
+
|
176 |
+
if 'occlusion' in state_manager.get_item('face_mask_types'):
|
177 |
+
occlusion_mask = create_occlusion_mask(crop_vision_frame)
|
178 |
+
crop_masks.append(occlusion_mask)
|
179 |
+
|
180 |
+
if model_type == 'edtalk':
|
181 |
+
lip_syncer_weight = numpy.array([ state_manager.get_item('lip_syncer_weight') ]).astype(numpy.float32)
|
182 |
+
box_mask = create_box_mask(crop_vision_frame, state_manager.get_item('face_mask_blur'), state_manager.get_item('face_mask_padding'))
|
183 |
+
crop_masks.append(box_mask)
|
184 |
+
crop_vision_frame = prepare_crop_frame(crop_vision_frame)
|
185 |
+
crop_vision_frame = forward_edtalk(temp_audio_frame, crop_vision_frame, lip_syncer_weight)
|
186 |
+
crop_vision_frame = normalize_crop_frame(crop_vision_frame)
|
187 |
+
if model_type == 'wav2lip':
|
188 |
+
face_landmark_68 = cv2.transform(target_face.landmark_set.get('68').reshape(1, -1, 2), affine_matrix).reshape(-1, 2)
|
189 |
+
area_mask = create_area_mask(crop_vision_frame, face_landmark_68, [ 'lower-face' ])
|
190 |
+
crop_masks.append(area_mask)
|
191 |
+
bounding_box = create_bounding_box(face_landmark_68)
|
192 |
+
bounding_box = resize_bounding_box(bounding_box, 1 / 8)
|
193 |
+
area_vision_frame, area_matrix = warp_face_by_bounding_box(crop_vision_frame, bounding_box, model_size)
|
194 |
+
area_vision_frame = prepare_crop_frame(area_vision_frame)
|
195 |
+
area_vision_frame = forward_wav2lip(temp_audio_frame, area_vision_frame)
|
196 |
+
area_vision_frame = normalize_crop_frame(area_vision_frame)
|
197 |
+
crop_vision_frame = cv2.warpAffine(area_vision_frame, cv2.invertAffineTransform(area_matrix), (512, 512), borderMode = cv2.BORDER_REPLICATE)
|
198 |
+
|
199 |
+
crop_mask = numpy.minimum.reduce(crop_masks)
|
200 |
+
paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix)
|
201 |
+
return paste_vision_frame
|
202 |
+
|
203 |
+
|
204 |
+
def forward_edtalk(temp_audio_frame : AudioFrame, crop_vision_frame : VisionFrame, lip_syncer_weight : LipSyncerWeight) -> VisionFrame:
|
205 |
+
lip_syncer = get_inference_pool().get('lip_syncer')
|
206 |
+
|
207 |
+
with conditional_thread_semaphore():
|
208 |
+
crop_vision_frame = lip_syncer.run(None,
|
209 |
+
{
|
210 |
+
'source': temp_audio_frame,
|
211 |
+
'target': crop_vision_frame,
|
212 |
+
'weight': lip_syncer_weight
|
213 |
+
})[0]
|
214 |
+
|
215 |
+
return crop_vision_frame
|
216 |
+
|
217 |
+
|
218 |
+
def forward_wav2lip(temp_audio_frame : AudioFrame, area_vision_frame : VisionFrame) -> VisionFrame:
|
219 |
+
lip_syncer = get_inference_pool().get('lip_syncer')
|
220 |
+
|
221 |
+
with conditional_thread_semaphore():
|
222 |
+
area_vision_frame = lip_syncer.run(None,
|
223 |
+
{
|
224 |
+
'source': temp_audio_frame,
|
225 |
+
'target': area_vision_frame
|
226 |
+
})[0]
|
227 |
+
|
228 |
+
return area_vision_frame
|
229 |
+
|
230 |
+
|
231 |
+
def prepare_audio_frame(temp_audio_frame : AudioFrame) -> AudioFrame:
|
232 |
+
model_type = get_model_options().get('type')
|
233 |
+
temp_audio_frame = numpy.maximum(numpy.exp(-5 * numpy.log(10)), temp_audio_frame)
|
234 |
+
temp_audio_frame = numpy.log10(temp_audio_frame) * 1.6 + 3.2
|
235 |
+
temp_audio_frame = temp_audio_frame.clip(-4, 4).astype(numpy.float32)
|
236 |
+
|
237 |
+
if model_type == 'wav2lip':
|
238 |
+
temp_audio_frame = temp_audio_frame * state_manager.get_item('lip_syncer_weight') * 2.0
|
239 |
+
|
240 |
+
temp_audio_frame = numpy.expand_dims(temp_audio_frame, axis = (0, 1))
|
241 |
+
return temp_audio_frame
|
242 |
+
|
243 |
+
|
244 |
+
def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
245 |
+
model_type = get_model_options().get('type')
|
246 |
+
model_size = get_model_options().get('size')
|
247 |
+
|
248 |
+
if model_type == 'edtalk':
|
249 |
+
crop_vision_frame = cv2.resize(crop_vision_frame, model_size, interpolation = cv2.INTER_AREA)
|
250 |
+
crop_vision_frame = crop_vision_frame[:, :, ::-1] / 255.0
|
251 |
+
crop_vision_frame = numpy.expand_dims(crop_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
|
252 |
+
if model_type == 'wav2lip':
|
253 |
+
crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0)
|
254 |
+
prepare_vision_frame = crop_vision_frame.copy()
|
255 |
+
prepare_vision_frame[:, model_size[0] // 2:] = 0
|
256 |
+
crop_vision_frame = numpy.concatenate((prepare_vision_frame, crop_vision_frame), axis = 3)
|
257 |
+
crop_vision_frame = crop_vision_frame.transpose(0, 3, 1, 2).astype('float32') / 255.0
|
258 |
+
|
259 |
+
return crop_vision_frame
|
260 |
+
|
261 |
+
|
262 |
+
def resize_bounding_box(bounding_box : BoundingBox, aspect_ratio : float) -> BoundingBox:
|
263 |
+
x1, y1, x2, y2 = bounding_box
|
264 |
+
y1 -= numpy.abs(y2 - y1) * aspect_ratio
|
265 |
+
bounding_box[1] = max(y1, 0)
|
266 |
+
return bounding_box
|
267 |
+
|
268 |
+
|
269 |
+
def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
270 |
+
model_type = get_model_options().get('type')
|
271 |
+
crop_vision_frame = crop_vision_frame[0].transpose(1, 2, 0)
|
272 |
+
crop_vision_frame = crop_vision_frame.clip(0, 1) * 255
|
273 |
+
crop_vision_frame = crop_vision_frame.astype(numpy.uint8)
|
274 |
+
|
275 |
+
if model_type == 'edtalk':
|
276 |
+
crop_vision_frame = crop_vision_frame[:, :, ::-1]
|
277 |
+
crop_vision_frame = cv2.resize(crop_vision_frame, (512, 512), interpolation = cv2.INTER_CUBIC)
|
278 |
+
|
279 |
+
return crop_vision_frame
|
280 |
+
|
281 |
+
|
282 |
+
def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
283 |
+
pass
|
284 |
+
|
285 |
+
|
286 |
+
def process_frame(inputs : LipSyncerInputs) -> VisionFrame:
|
287 |
+
reference_faces = inputs.get('reference_faces')
|
288 |
+
source_audio_frame = inputs.get('source_audio_frame')
|
289 |
+
target_vision_frame = inputs.get('target_vision_frame')
|
290 |
+
many_faces = sort_and_filter_faces(get_many_faces([ target_vision_frame ]))
|
291 |
+
|
292 |
+
if state_manager.get_item('face_selector_mode') == 'many':
|
293 |
+
if many_faces:
|
294 |
+
for target_face in many_faces:
|
295 |
+
target_vision_frame = sync_lip(target_face, source_audio_frame, target_vision_frame)
|
296 |
+
if state_manager.get_item('face_selector_mode') == 'one':
|
297 |
+
target_face = get_one_face(many_faces)
|
298 |
+
if target_face:
|
299 |
+
target_vision_frame = sync_lip(target_face, source_audio_frame, target_vision_frame)
|
300 |
+
if state_manager.get_item('face_selector_mode') == 'reference':
|
301 |
+
similar_faces = find_similar_faces(many_faces, reference_faces, state_manager.get_item('reference_face_distance'))
|
302 |
+
if similar_faces:
|
303 |
+
for similar_face in similar_faces:
|
304 |
+
target_vision_frame = sync_lip(similar_face, source_audio_frame, target_vision_frame)
|
305 |
+
return target_vision_frame
|
306 |
+
|
307 |
+
|
308 |
+
def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None:
|
309 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
310 |
+
source_audio_path = get_first(filter_audio_paths(source_paths))
|
311 |
+
temp_video_fps = restrict_video_fps(state_manager.get_item('target_path'), state_manager.get_item('output_video_fps'))
|
312 |
+
|
313 |
+
for queue_payload in process_manager.manage(queue_payloads):
|
314 |
+
frame_number = queue_payload.get('frame_number')
|
315 |
+
target_vision_path = queue_payload.get('frame_path')
|
316 |
+
source_audio_frame = get_voice_frame(source_audio_path, temp_video_fps, frame_number)
|
317 |
+
if not numpy.any(source_audio_frame):
|
318 |
+
source_audio_frame = create_empty_audio_frame()
|
319 |
+
target_vision_frame = read_image(target_vision_path)
|
320 |
+
output_vision_frame = process_frame(
|
321 |
+
{
|
322 |
+
'reference_faces': reference_faces,
|
323 |
+
'source_audio_frame': source_audio_frame,
|
324 |
+
'target_vision_frame': target_vision_frame
|
325 |
+
})
|
326 |
+
write_image(target_vision_path, output_vision_frame)
|
327 |
+
update_progress(1)
|
328 |
+
|
329 |
+
|
330 |
+
def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
|
331 |
+
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
332 |
+
source_audio_frame = create_empty_audio_frame()
|
333 |
+
target_vision_frame = read_static_image(target_path)
|
334 |
+
output_vision_frame = process_frame(
|
335 |
+
{
|
336 |
+
'reference_faces': reference_faces,
|
337 |
+
'source_audio_frame': source_audio_frame,
|
338 |
+
'target_vision_frame': target_vision_frame
|
339 |
+
})
|
340 |
+
write_image(output_path, output_vision_frame)
|
341 |
+
|
342 |
+
|
343 |
+
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
344 |
+
source_audio_paths = filter_audio_paths(state_manager.get_item('source_paths'))
|
345 |
+
temp_video_fps = restrict_video_fps(state_manager.get_item('target_path'), state_manager.get_item('output_video_fps'))
|
346 |
+
for source_audio_path in source_audio_paths:
|
347 |
+
read_static_voice(source_audio_path, temp_video_fps)
|
348 |
+
processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
|
facefusion/uis/layouts/default.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio
|
2 |
|
3 |
from facefusion import state_manager
|
4 |
-
from facefusion.uis.components import about,
|
5 |
|
6 |
|
7 |
def pre_check() -> bool:
|
@@ -16,16 +16,16 @@ def render() -> gradio.Blocks:
|
|
16 |
about.render()
|
17 |
with gradio.Blocks():
|
18 |
processors.render()
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
with gradio.Blocks():
|
30 |
face_enhancer_options.render()
|
31 |
with gradio.Blocks():
|
@@ -34,18 +34,18 @@ def render() -> gradio.Blocks:
|
|
34 |
frame_colorizer_options.render()
|
35 |
with gradio.Blocks():
|
36 |
frame_enhancer_options.render()
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
with gradio.Blocks():
|
44 |
download.render()
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
with gradio.Blocks():
|
50 |
output_options.render()
|
51 |
with gradio.Column(scale = 4):
|
@@ -57,11 +57,11 @@ def render() -> gradio.Blocks:
|
|
57 |
output.render()
|
58 |
with gradio.Blocks():
|
59 |
terminal.render()
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
with gradio.Column(scale = 7):
|
66 |
with gradio.Blocks():
|
67 |
preview.render()
|
@@ -73,8 +73,8 @@ def render() -> gradio.Blocks:
|
|
73 |
face_masker.render()
|
74 |
with gradio.Blocks():
|
75 |
face_detector.render()
|
76 |
-
|
77 |
-
|
78 |
with gradio.Blocks():
|
79 |
common_options.render()
|
80 |
return layout
|
@@ -82,36 +82,36 @@ def render() -> gradio.Blocks:
|
|
82 |
|
83 |
def listen() -> None:
|
84 |
processors.listen()
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
face_enhancer_options.listen()
|
91 |
face_swapper_options.listen()
|
92 |
frame_colorizer_options.listen()
|
93 |
frame_enhancer_options.listen()
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
download.listen()
|
99 |
-
|
100 |
-
|
101 |
output_options.listen()
|
102 |
source.listen()
|
103 |
target.listen()
|
104 |
output.listen()
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
terminal.listen()
|
109 |
preview.listen()
|
110 |
trim_frame.listen()
|
111 |
face_selector.listen()
|
112 |
face_masker.listen()
|
113 |
face_detector.listen()
|
114 |
-
|
115 |
common_options.listen()
|
116 |
|
117 |
|
|
|
1 |
import gradio
|
2 |
|
3 |
from facefusion import state_manager
|
4 |
+
from facefusion.uis.components import about, age_modifier_options, common_options, deep_swapper_options, download, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_detector, face_editor_options, face_enhancer_options, face_landmarker, face_masker, face_selector, face_swapper_options, frame_colorizer_options, frame_enhancer_options, instant_runner, job_manager, job_runner, lip_syncer_options, memory, output, output_options, preview, processors, source, target, temp_frame, terminal, trim_frame, ui_workflow
|
5 |
|
6 |
|
7 |
def pre_check() -> bool:
|
|
|
16 |
about.render()
|
17 |
with gradio.Blocks():
|
18 |
processors.render()
|
19 |
+
with gradio.Blocks():
|
20 |
+
age_modifier_options.render()
|
21 |
+
with gradio.Blocks():
|
22 |
+
deep_swapper_options.render()
|
23 |
+
with gradio.Blocks():
|
24 |
+
expression_restorer_options.render()
|
25 |
+
with gradio.Blocks():
|
26 |
+
face_debugger_options.render()
|
27 |
+
with gradio.Blocks():
|
28 |
+
face_editor_options.render()
|
29 |
with gradio.Blocks():
|
30 |
face_enhancer_options.render()
|
31 |
with gradio.Blocks():
|
|
|
34 |
frame_colorizer_options.render()
|
35 |
with gradio.Blocks():
|
36 |
frame_enhancer_options.render()
|
37 |
+
with gradio.Blocks():
|
38 |
+
lip_syncer_options.render()
|
39 |
+
with gradio.Blocks():
|
40 |
+
execution.render()
|
41 |
+
execution_thread_count.render()
|
42 |
+
execution_queue_count.render()
|
43 |
with gradio.Blocks():
|
44 |
download.render()
|
45 |
+
with gradio.Blocks():
|
46 |
+
memory.render()
|
47 |
+
with gradio.Blocks():
|
48 |
+
temp_frame.render()
|
49 |
with gradio.Blocks():
|
50 |
output_options.render()
|
51 |
with gradio.Column(scale = 4):
|
|
|
57 |
output.render()
|
58 |
with gradio.Blocks():
|
59 |
terminal.render()
|
60 |
+
with gradio.Blocks():
|
61 |
+
ui_workflow.render()
|
62 |
+
instant_runner.render()
|
63 |
+
job_runner.render()
|
64 |
+
job_manager.render()
|
65 |
with gradio.Column(scale = 7):
|
66 |
with gradio.Blocks():
|
67 |
preview.render()
|
|
|
73 |
face_masker.render()
|
74 |
with gradio.Blocks():
|
75 |
face_detector.render()
|
76 |
+
with gradio.Blocks():
|
77 |
+
face_landmarker.render()
|
78 |
with gradio.Blocks():
|
79 |
common_options.render()
|
80 |
return layout
|
|
|
82 |
|
83 |
def listen() -> None:
|
84 |
processors.listen()
|
85 |
+
age_modifier_options.listen()
|
86 |
+
deep_swapper_options.listen()
|
87 |
+
expression_restorer_options.listen()
|
88 |
+
face_debugger_options.listen()
|
89 |
+
face_editor_options.listen()
|
90 |
face_enhancer_options.listen()
|
91 |
face_swapper_options.listen()
|
92 |
frame_colorizer_options.listen()
|
93 |
frame_enhancer_options.listen()
|
94 |
+
lip_syncer_options.listen()
|
95 |
+
execution.listen()
|
96 |
+
execution_thread_count.listen()
|
97 |
+
execution_queue_count.listen()
|
98 |
download.listen()
|
99 |
+
memory.listen()
|
100 |
+
temp_frame.listen()
|
101 |
output_options.listen()
|
102 |
source.listen()
|
103 |
target.listen()
|
104 |
output.listen()
|
105 |
+
instant_runner.listen()
|
106 |
+
job_runner.listen()
|
107 |
+
job_manager.listen()
|
108 |
terminal.listen()
|
109 |
preview.listen()
|
110 |
trim_frame.listen()
|
111 |
face_selector.listen()
|
112 |
face_masker.listen()
|
113 |
face_detector.listen()
|
114 |
+
face_landmarker.listen()
|
115 |
common_options.listen()
|
116 |
|
117 |
|
facefusion/uis/types.py
CHANGED
@@ -3,15 +3,15 @@ from typing import Any, Dict, IO, Literal, TypeAlias
|
|
3 |
File : TypeAlias = IO[Any]
|
4 |
ComponentName = Literal\
|
5 |
[
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
'face_detector_angles_checkbox_group',
|
16 |
'face_detector_model_dropdown',
|
17 |
'face_detector_score_slider',
|
@@ -34,8 +34,8 @@ ComponentName = Literal\
|
|
34 |
'face_enhancer_blend_slider',
|
35 |
'face_enhancer_model_dropdown',
|
36 |
'face_enhancer_weight_slider',
|
37 |
-
|
38 |
-
|
39 |
'face_mask_types_checkbox_group',
|
40 |
'face_mask_areas_checkbox_group',
|
41 |
'face_mask_regions_checkbox_group',
|
@@ -73,10 +73,10 @@ ComponentName = Literal\
|
|
73 |
'target_image',
|
74 |
'target_video',
|
75 |
'ui_workflow_dropdown',
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
]
|
81 |
Component : TypeAlias = Any
|
82 |
ComponentOptions : TypeAlias = Dict[str, Any]
|
|
|
3 |
File : TypeAlias = IO[Any]
|
4 |
ComponentName = Literal\
|
5 |
[
|
6 |
+
'age_modifier_direction_slider',
|
7 |
+
'age_modifier_model_dropdown',
|
8 |
+
'benchmark_cycle_count_slider',
|
9 |
+
'benchmark_resolutions_checkbox_group',
|
10 |
+
'deep_swapper_model_dropdown',
|
11 |
+
'deep_swapper_morph_slider',
|
12 |
+
'expression_restorer_factor_slider',
|
13 |
+
'expression_restorer_model_dropdown',
|
14 |
+
'face_debugger_items_checkbox_group',
|
15 |
'face_detector_angles_checkbox_group',
|
16 |
'face_detector_model_dropdown',
|
17 |
'face_detector_score_slider',
|
|
|
34 |
'face_enhancer_blend_slider',
|
35 |
'face_enhancer_model_dropdown',
|
36 |
'face_enhancer_weight_slider',
|
37 |
+
'face_landmarker_model_dropdown',
|
38 |
+
'face_landmarker_score_slider',
|
39 |
'face_mask_types_checkbox_group',
|
40 |
'face_mask_areas_checkbox_group',
|
41 |
'face_mask_regions_checkbox_group',
|
|
|
73 |
'target_image',
|
74 |
'target_video',
|
75 |
'ui_workflow_dropdown',
|
76 |
+
'webcam_device_id_dropdown',
|
77 |
+
'webcam_fps_slider',
|
78 |
+
'webcam_mode_radio',
|
79 |
+
'webcam_resolution_dropdown'
|
80 |
]
|
81 |
Component : TypeAlias = Any
|
82 |
ComponentOptions : TypeAlias = Dict[str, Any]
|