Spaces:
Sleeping
Sleeping
Delete facefusion/processors/modules/face_editor.py
Browse files
facefusion/processors/modules/face_editor.py
DELETED
@@ -1,533 +0,0 @@
|
|
1 |
-
from argparse import ArgumentParser
|
2 |
-
from functools import lru_cache
|
3 |
-
from typing import List, Tuple
|
4 |
-
|
5 |
-
import cv2
|
6 |
-
import numpy
|
7 |
-
|
8 |
-
import facefusion.jobs.job_manager
|
9 |
-
import facefusion.jobs.job_store
|
10 |
-
import facefusion.processors.core as processors
|
11 |
-
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, video_manager, wording
|
12 |
-
from facefusion.common_helper import create_float_metavar
|
13 |
-
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url
|
14 |
-
from facefusion.face_analyser import get_many_faces, get_one_face
|
15 |
-
from facefusion.face_helper import paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5
|
16 |
-
from facefusion.face_masker import create_box_mask
|
17 |
-
from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
|
18 |
-
from facefusion.face_store import get_reference_faces
|
19 |
-
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
|
20 |
-
from facefusion.processors import choices as processors_choices
|
21 |
-
from facefusion.processors.live_portrait import create_rotation, limit_euler_angles, limit_expression
|
22 |
-
from facefusion.processors.types import FaceEditorInputs, LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitRotation, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw
|
23 |
-
from facefusion.program_helper import find_argument_group
|
24 |
-
from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore
|
25 |
-
from facefusion.types import ApplyStateItem, Args, DownloadScope, Face, FaceLandmark68, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
|
26 |
-
from facefusion.vision import read_image, read_static_image, write_image
|
27 |
-
|
28 |
-
|
29 |
-
@lru_cache(maxsize = None)
|
30 |
-
def create_static_model_set(download_scope : DownloadScope) -> ModelSet:
|
31 |
-
return\
|
32 |
-
{
|
33 |
-
'live_portrait':
|
34 |
-
{
|
35 |
-
'hashes':
|
36 |
-
{
|
37 |
-
'feature_extractor':
|
38 |
-
{
|
39 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.hash'),
|
40 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash')
|
41 |
-
},
|
42 |
-
'motion_extractor':
|
43 |
-
{
|
44 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.hash'),
|
45 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash')
|
46 |
-
},
|
47 |
-
'eye_retargeter':
|
48 |
-
{
|
49 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_eye_retargeter.hash'),
|
50 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.hash')
|
51 |
-
},
|
52 |
-
'lip_retargeter':
|
53 |
-
{
|
54 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_lip_retargeter.hash'),
|
55 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.hash')
|
56 |
-
},
|
57 |
-
'stitcher':
|
58 |
-
{
|
59 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_stitcher.hash'),
|
60 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.hash')
|
61 |
-
},
|
62 |
-
'generator':
|
63 |
-
{
|
64 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.hash'),
|
65 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash')
|
66 |
-
}
|
67 |
-
},
|
68 |
-
'sources':
|
69 |
-
{
|
70 |
-
'feature_extractor':
|
71 |
-
{
|
72 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.onnx'),
|
73 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx')
|
74 |
-
},
|
75 |
-
'motion_extractor':
|
76 |
-
{
|
77 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.onnx'),
|
78 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx')
|
79 |
-
},
|
80 |
-
'eye_retargeter':
|
81 |
-
{
|
82 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_eye_retargeter.onnx'),
|
83 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.onnx')
|
84 |
-
},
|
85 |
-
'lip_retargeter':
|
86 |
-
{
|
87 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_lip_retargeter.onnx'),
|
88 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.onnx')
|
89 |
-
},
|
90 |
-
'stitcher':
|
91 |
-
{
|
92 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_stitcher.onnx'),
|
93 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.onnx')
|
94 |
-
},
|
95 |
-
'generator':
|
96 |
-
{
|
97 |
-
'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.onnx'),
|
98 |
-
'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx')
|
99 |
-
}
|
100 |
-
},
|
101 |
-
'template': 'ffhq_512',
|
102 |
-
'size': (512, 512)
|
103 |
-
}
|
104 |
-
}
|
105 |
-
|
106 |
-
|
107 |
-
def get_inference_pool() -> InferencePool:
|
108 |
-
model_names = [ state_manager.get_item('face_editor_model') ]
|
109 |
-
model_source_set = get_model_options().get('sources')
|
110 |
-
|
111 |
-
return inference_manager.get_inference_pool(__name__, model_names, model_source_set)
|
112 |
-
|
113 |
-
|
114 |
-
def clear_inference_pool() -> None:
|
115 |
-
model_names = [ state_manager.get_item('face_editor_model') ]
|
116 |
-
inference_manager.clear_inference_pool(__name__, model_names)
|
117 |
-
|
118 |
-
|
119 |
-
def get_model_options() -> ModelOptions:
|
120 |
-
model_name = state_manager.get_item('face_editor_model')
|
121 |
-
return create_static_model_set('full').get(model_name)
|
122 |
-
|
123 |
-
|
124 |
-
def register_args(program : ArgumentParser) -> None:
|
125 |
-
group_processors = find_argument_group(program, 'processors')
|
126 |
-
if group_processors:
|
127 |
-
group_processors.add_argument('--face-editor-model', help = wording.get('help.face_editor_model'), default = config.get_str_value('processors', 'face_editor_model', 'live_portrait'), choices = processors_choices.face_editor_models)
|
128 |
-
group_processors.add_argument('--face-editor-eyebrow-direction', help = wording.get('help.face_editor_eyebrow_direction'), type = float, default = config.get_float_value('processors', 'face_editor_eyebrow_direction', '0'), choices = processors_choices.face_editor_eyebrow_direction_range, metavar = create_float_metavar(processors_choices.face_editor_eyebrow_direction_range))
|
129 |
-
group_processors.add_argument('--face-editor-eye-gaze-horizontal', help = wording.get('help.face_editor_eye_gaze_horizontal'), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_horizontal', '0'), choices = processors_choices.face_editor_eye_gaze_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_horizontal_range))
|
130 |
-
group_processors.add_argument('--face-editor-eye-gaze-vertical', help = wording.get('help.face_editor_eye_gaze_vertical'), type = float, default = config.get_float_value('processors', 'face_editor_eye_gaze_vertical', '0'), choices = processors_choices.face_editor_eye_gaze_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_eye_gaze_vertical_range))
|
131 |
-
group_processors.add_argument('--face-editor-eye-open-ratio', help = wording.get('help.face_editor_eye_open_ratio'), type = float, default = config.get_float_value('processors', 'face_editor_eye_open_ratio', '0'), choices = processors_choices.face_editor_eye_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_eye_open_ratio_range))
|
132 |
-
group_processors.add_argument('--face-editor-lip-open-ratio', help = wording.get('help.face_editor_lip_open_ratio'), type = float, default = config.get_float_value('processors', 'face_editor_lip_open_ratio', '0'), choices = processors_choices.face_editor_lip_open_ratio_range, metavar = create_float_metavar(processors_choices.face_editor_lip_open_ratio_range))
|
133 |
-
group_processors.add_argument('--face-editor-mouth-grim', help = wording.get('help.face_editor_mouth_grim'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_grim', '0'), choices = processors_choices.face_editor_mouth_grim_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_grim_range))
|
134 |
-
group_processors.add_argument('--face-editor-mouth-pout', help = wording.get('help.face_editor_mouth_pout'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_pout', '0'), choices = processors_choices.face_editor_mouth_pout_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_pout_range))
|
135 |
-
group_processors.add_argument('--face-editor-mouth-purse', help = wording.get('help.face_editor_mouth_purse'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_purse', '0'), choices = processors_choices.face_editor_mouth_purse_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_purse_range))
|
136 |
-
group_processors.add_argument('--face-editor-mouth-smile', help = wording.get('help.face_editor_mouth_smile'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_smile', '0'), choices = processors_choices.face_editor_mouth_smile_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_smile_range))
|
137 |
-
group_processors.add_argument('--face-editor-mouth-position-horizontal', help = wording.get('help.face_editor_mouth_position_horizontal'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_horizontal', '0'), choices = processors_choices.face_editor_mouth_position_horizontal_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_horizontal_range))
|
138 |
-
group_processors.add_argument('--face-editor-mouth-position-vertical', help = wording.get('help.face_editor_mouth_position_vertical'), type = float, default = config.get_float_value('processors', 'face_editor_mouth_position_vertical', '0'), choices = processors_choices.face_editor_mouth_position_vertical_range, metavar = create_float_metavar(processors_choices.face_editor_mouth_position_vertical_range))
|
139 |
-
group_processors.add_argument('--face-editor-head-pitch', help = wording.get('help.face_editor_head_pitch'), type = float, default = config.get_float_value('processors', 'face_editor_head_pitch', '0'), choices = processors_choices.face_editor_head_pitch_range, metavar = create_float_metavar(processors_choices.face_editor_head_pitch_range))
|
140 |
-
group_processors.add_argument('--face-editor-head-yaw', help = wording.get('help.face_editor_head_yaw'), type = float, default = config.get_float_value('processors', 'face_editor_head_yaw', '0'), choices = processors_choices.face_editor_head_yaw_range, metavar = create_float_metavar(processors_choices.face_editor_head_yaw_range))
|
141 |
-
group_processors.add_argument('--face-editor-head-roll', help = wording.get('help.face_editor_head_roll'), type = float, default = config.get_float_value('processors', 'face_editor_head_roll', '0'), choices = processors_choices.face_editor_head_roll_range, metavar = create_float_metavar(processors_choices.face_editor_head_roll_range))
|
142 |
-
facefusion.jobs.job_store.register_step_keys([ 'face_editor_model', 'face_editor_eyebrow_direction', 'face_editor_eye_gaze_horizontal', 'face_editor_eye_gaze_vertical', 'face_editor_eye_open_ratio', 'face_editor_lip_open_ratio', 'face_editor_mouth_grim', 'face_editor_mouth_pout', 'face_editor_mouth_purse', 'face_editor_mouth_smile', 'face_editor_mouth_position_horizontal', 'face_editor_mouth_position_vertical', 'face_editor_head_pitch', 'face_editor_head_yaw', 'face_editor_head_roll' ])
|
143 |
-
|
144 |
-
|
145 |
-
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
|
146 |
-
apply_state_item('face_editor_model', args.get('face_editor_model'))
|
147 |
-
apply_state_item('face_editor_eyebrow_direction', args.get('face_editor_eyebrow_direction'))
|
148 |
-
apply_state_item('face_editor_eye_gaze_horizontal', args.get('face_editor_eye_gaze_horizontal'))
|
149 |
-
apply_state_item('face_editor_eye_gaze_vertical', args.get('face_editor_eye_gaze_vertical'))
|
150 |
-
apply_state_item('face_editor_eye_open_ratio', args.get('face_editor_eye_open_ratio'))
|
151 |
-
apply_state_item('face_editor_lip_open_ratio', args.get('face_editor_lip_open_ratio'))
|
152 |
-
apply_state_item('face_editor_mouth_grim', args.get('face_editor_mouth_grim'))
|
153 |
-
apply_state_item('face_editor_mouth_pout', args.get('face_editor_mouth_pout'))
|
154 |
-
apply_state_item('face_editor_mouth_purse', args.get('face_editor_mouth_purse'))
|
155 |
-
apply_state_item('face_editor_mouth_smile', args.get('face_editor_mouth_smile'))
|
156 |
-
apply_state_item('face_editor_mouth_position_horizontal', args.get('face_editor_mouth_position_horizontal'))
|
157 |
-
apply_state_item('face_editor_mouth_position_vertical', args.get('face_editor_mouth_position_vertical'))
|
158 |
-
apply_state_item('face_editor_head_pitch', args.get('face_editor_head_pitch'))
|
159 |
-
apply_state_item('face_editor_head_yaw', args.get('face_editor_head_yaw'))
|
160 |
-
apply_state_item('face_editor_head_roll', args.get('face_editor_head_roll'))
|
161 |
-
|
162 |
-
|
163 |
-
def pre_check() -> bool:
|
164 |
-
model_hash_set = get_model_options().get('hashes')
|
165 |
-
model_source_set = get_model_options().get('sources')
|
166 |
-
|
167 |
-
return conditional_download_hashes(model_hash_set) and conditional_download_sources(model_source_set)
|
168 |
-
|
169 |
-
|
170 |
-
def pre_process(mode : ProcessMode) -> bool:
|
171 |
-
if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')):
|
172 |
-
logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__)
|
173 |
-
return False
|
174 |
-
if mode == 'output' and not in_directory(state_manager.get_item('output_path')):
|
175 |
-
logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__)
|
176 |
-
return False
|
177 |
-
if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')):
|
178 |
-
logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__)
|
179 |
-
return False
|
180 |
-
return True
|
181 |
-
|
182 |
-
|
183 |
-
def post_process() -> None:
|
184 |
-
read_static_image.cache_clear()
|
185 |
-
video_manager.clear_video_pool()
|
186 |
-
if state_manager.get_item('video_memory_strategy') in [ 'strict', 'moderate' ]:
|
187 |
-
clear_inference_pool()
|
188 |
-
if state_manager.get_item('video_memory_strategy') == 'strict':
|
189 |
-
content_analyser.clear_inference_pool()
|
190 |
-
face_classifier.clear_inference_pool()
|
191 |
-
face_detector.clear_inference_pool()
|
192 |
-
face_landmarker.clear_inference_pool()
|
193 |
-
face_masker.clear_inference_pool()
|
194 |
-
face_recognizer.clear_inference_pool()
|
195 |
-
|
196 |
-
|
197 |
-
def edit_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
198 |
-
model_template = get_model_options().get('template')
|
199 |
-
model_size = get_model_options().get('size')
|
200 |
-
face_landmark_5 = scale_face_landmark_5(target_face.landmark_set.get('5/68'), 1.5)
|
201 |
-
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_template, model_size)
|
202 |
-
box_mask = create_box_mask(crop_vision_frame, state_manager.get_item('face_mask_blur'), (0, 0, 0, 0))
|
203 |
-
crop_vision_frame = prepare_crop_frame(crop_vision_frame)
|
204 |
-
crop_vision_frame = apply_edit(crop_vision_frame, target_face.landmark_set.get('68'))
|
205 |
-
crop_vision_frame = normalize_crop_frame(crop_vision_frame)
|
206 |
-
temp_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, box_mask, affine_matrix)
|
207 |
-
return temp_vision_frame
|
208 |
-
|
209 |
-
|
210 |
-
def apply_edit(crop_vision_frame : VisionFrame, face_landmark_68 : FaceLandmark68) -> VisionFrame:
|
211 |
-
feature_volume = forward_extract_feature(crop_vision_frame)
|
212 |
-
pitch, yaw, roll, scale, translation, expression, motion_points = forward_extract_motion(crop_vision_frame)
|
213 |
-
rotation = create_rotation(pitch, yaw, roll)
|
214 |
-
motion_points_target = scale * (motion_points @ rotation.T + expression) + translation
|
215 |
-
expression = edit_eye_gaze(expression)
|
216 |
-
expression = edit_mouth_grim(expression)
|
217 |
-
expression = edit_mouth_position(expression)
|
218 |
-
expression = edit_mouth_pout(expression)
|
219 |
-
expression = edit_mouth_purse(expression)
|
220 |
-
expression = edit_mouth_smile(expression)
|
221 |
-
expression = edit_eyebrow_direction(expression)
|
222 |
-
expression = limit_expression(expression)
|
223 |
-
rotation = edit_head_rotation(pitch, yaw, roll)
|
224 |
-
motion_points_source = motion_points @ rotation.T
|
225 |
-
motion_points_source += expression
|
226 |
-
motion_points_source *= scale
|
227 |
-
motion_points_source += translation
|
228 |
-
motion_points_source += edit_eye_open(motion_points_target, face_landmark_68)
|
229 |
-
motion_points_source += edit_lip_open(motion_points_target, face_landmark_68)
|
230 |
-
motion_points_source = forward_stitch_motion_points(motion_points_source, motion_points_target)
|
231 |
-
crop_vision_frame = forward_generate_frame(feature_volume, motion_points_source, motion_points_target)
|
232 |
-
return crop_vision_frame
|
233 |
-
|
234 |
-
|
235 |
-
def forward_extract_feature(crop_vision_frame : VisionFrame) -> LivePortraitFeatureVolume:
|
236 |
-
feature_extractor = get_inference_pool().get('feature_extractor')
|
237 |
-
|
238 |
-
with conditional_thread_semaphore():
|
239 |
-
feature_volume = feature_extractor.run(None,
|
240 |
-
{
|
241 |
-
'input': crop_vision_frame
|
242 |
-
})[0]
|
243 |
-
|
244 |
-
return feature_volume
|
245 |
-
|
246 |
-
|
247 |
-
def forward_extract_motion(crop_vision_frame : VisionFrame) -> Tuple[LivePortraitPitch, LivePortraitYaw, LivePortraitRoll, LivePortraitScale, LivePortraitTranslation, LivePortraitExpression, LivePortraitMotionPoints]:
|
248 |
-
motion_extractor = get_inference_pool().get('motion_extractor')
|
249 |
-
|
250 |
-
with conditional_thread_semaphore():
|
251 |
-
pitch, yaw, roll, scale, translation, expression, motion_points = motion_extractor.run(None,
|
252 |
-
{
|
253 |
-
'input': crop_vision_frame
|
254 |
-
})
|
255 |
-
|
256 |
-
return pitch, yaw, roll, scale, translation, expression, motion_points
|
257 |
-
|
258 |
-
|
259 |
-
def forward_retarget_eye(eye_motion_points : LivePortraitMotionPoints) -> LivePortraitMotionPoints:
|
260 |
-
eye_retargeter = get_inference_pool().get('eye_retargeter')
|
261 |
-
|
262 |
-
with conditional_thread_semaphore():
|
263 |
-
eye_motion_points = eye_retargeter.run(None,
|
264 |
-
{
|
265 |
-
'input': eye_motion_points
|
266 |
-
})[0]
|
267 |
-
|
268 |
-
return eye_motion_points
|
269 |
-
|
270 |
-
|
271 |
-
def forward_retarget_lip(lip_motion_points : LivePortraitMotionPoints) -> LivePortraitMotionPoints:
|
272 |
-
lip_retargeter = get_inference_pool().get('lip_retargeter')
|
273 |
-
|
274 |
-
with conditional_thread_semaphore():
|
275 |
-
lip_motion_points = lip_retargeter.run(None,
|
276 |
-
{
|
277 |
-
'input': lip_motion_points
|
278 |
-
})[0]
|
279 |
-
|
280 |
-
return lip_motion_points
|
281 |
-
|
282 |
-
|
283 |
-
def forward_stitch_motion_points(source_motion_points : LivePortraitMotionPoints, target_motion_points : LivePortraitMotionPoints) -> LivePortraitMotionPoints:
|
284 |
-
stitcher = get_inference_pool().get('stitcher')
|
285 |
-
|
286 |
-
with thread_semaphore():
|
287 |
-
motion_points = stitcher.run(None,
|
288 |
-
{
|
289 |
-
'source': source_motion_points,
|
290 |
-
'target': target_motion_points
|
291 |
-
})[0]
|
292 |
-
|
293 |
-
return motion_points
|
294 |
-
|
295 |
-
|
296 |
-
def forward_generate_frame(feature_volume : LivePortraitFeatureVolume, source_motion_points : LivePortraitMotionPoints, target_motion_points : LivePortraitMotionPoints) -> VisionFrame:
|
297 |
-
generator = get_inference_pool().get('generator')
|
298 |
-
|
299 |
-
with thread_semaphore():
|
300 |
-
crop_vision_frame = generator.run(None,
|
301 |
-
{
|
302 |
-
'feature_volume': feature_volume,
|
303 |
-
'source': source_motion_points,
|
304 |
-
'target': target_motion_points
|
305 |
-
})[0][0]
|
306 |
-
|
307 |
-
return crop_vision_frame
|
308 |
-
|
309 |
-
|
310 |
-
def edit_eyebrow_direction(expression : LivePortraitExpression) -> LivePortraitExpression:
|
311 |
-
face_editor_eyebrow = state_manager.get_item('face_editor_eyebrow_direction')
|
312 |
-
|
313 |
-
if face_editor_eyebrow > 0:
|
314 |
-
expression[0, 1, 1] += numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.015, 0.015 ])
|
315 |
-
expression[0, 2, 1] -= numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.020, 0.020 ])
|
316 |
-
else:
|
317 |
-
expression[0, 1, 0] -= numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.015, 0.015 ])
|
318 |
-
expression[0, 2, 0] += numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.020, 0.020 ])
|
319 |
-
expression[0, 1, 1] += numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.005, 0.005 ])
|
320 |
-
expression[0, 2, 1] -= numpy.interp(face_editor_eyebrow, [ -1, 1 ], [ -0.005, 0.005 ])
|
321 |
-
return expression
|
322 |
-
|
323 |
-
|
324 |
-
def edit_eye_gaze(expression : LivePortraitExpression) -> LivePortraitExpression:
|
325 |
-
face_editor_eye_gaze_horizontal = state_manager.get_item('face_editor_eye_gaze_horizontal')
|
326 |
-
face_editor_eye_gaze_vertical = state_manager.get_item('face_editor_eye_gaze_vertical')
|
327 |
-
|
328 |
-
if face_editor_eye_gaze_horizontal > 0:
|
329 |
-
expression[0, 11, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.015, 0.015 ])
|
330 |
-
expression[0, 15, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.020, 0.020 ])
|
331 |
-
else:
|
332 |
-
expression[0, 11, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.020, 0.020 ])
|
333 |
-
expression[0, 15, 0] += numpy.interp(face_editor_eye_gaze_horizontal, [ -1, 1 ], [ -0.015, 0.015 ])
|
334 |
-
expression[0, 1, 1] += numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.0025, 0.0025 ])
|
335 |
-
expression[0, 2, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.0025, 0.0025 ])
|
336 |
-
expression[0, 11, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.010, 0.010 ])
|
337 |
-
expression[0, 13, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.005, 0.005 ])
|
338 |
-
expression[0, 15, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.010, 0.010 ])
|
339 |
-
expression[0, 16, 1] -= numpy.interp(face_editor_eye_gaze_vertical, [ -1, 1 ], [ -0.005, 0.005 ])
|
340 |
-
return expression
|
341 |
-
|
342 |
-
|
343 |
-
def edit_eye_open(motion_points : LivePortraitMotionPoints, face_landmark_68 : FaceLandmark68) -> LivePortraitMotionPoints:
|
344 |
-
face_editor_eye_open_ratio = state_manager.get_item('face_editor_eye_open_ratio')
|
345 |
-
left_eye_ratio = calc_distance_ratio(face_landmark_68, 37, 40, 39, 36)
|
346 |
-
right_eye_ratio = calc_distance_ratio(face_landmark_68, 43, 46, 45, 42)
|
347 |
-
|
348 |
-
if face_editor_eye_open_ratio < 0:
|
349 |
-
eye_motion_points = numpy.concatenate([ motion_points.ravel(), [ left_eye_ratio, right_eye_ratio, 0.0 ] ])
|
350 |
-
else:
|
351 |
-
eye_motion_points = numpy.concatenate([ motion_points.ravel(), [ left_eye_ratio, right_eye_ratio, 0.6 ] ])
|
352 |
-
eye_motion_points = eye_motion_points.reshape(1, -1).astype(numpy.float32)
|
353 |
-
eye_motion_points = forward_retarget_eye(eye_motion_points) * numpy.abs(face_editor_eye_open_ratio)
|
354 |
-
eye_motion_points = eye_motion_points.reshape(-1, 21, 3)
|
355 |
-
return eye_motion_points
|
356 |
-
|
357 |
-
|
358 |
-
def edit_lip_open(motion_points : LivePortraitMotionPoints, face_landmark_68 : FaceLandmark68) -> LivePortraitMotionPoints:
|
359 |
-
face_editor_lip_open_ratio = state_manager.get_item('face_editor_lip_open_ratio')
|
360 |
-
lip_ratio = calc_distance_ratio(face_landmark_68, 62, 66, 54, 48)
|
361 |
-
|
362 |
-
if face_editor_lip_open_ratio < 0:
|
363 |
-
lip_motion_points = numpy.concatenate([ motion_points.ravel(), [ lip_ratio, 0.0 ] ])
|
364 |
-
else:
|
365 |
-
lip_motion_points = numpy.concatenate([ motion_points.ravel(), [ lip_ratio, 1.0 ] ])
|
366 |
-
lip_motion_points = lip_motion_points.reshape(1, -1).astype(numpy.float32)
|
367 |
-
lip_motion_points = forward_retarget_lip(lip_motion_points) * numpy.abs(face_editor_lip_open_ratio)
|
368 |
-
lip_motion_points = lip_motion_points.reshape(-1, 21, 3)
|
369 |
-
return lip_motion_points
|
370 |
-
|
371 |
-
|
372 |
-
def edit_mouth_grim(expression : LivePortraitExpression) -> LivePortraitExpression:
|
373 |
-
face_editor_mouth_grim = state_manager.get_item('face_editor_mouth_grim')
|
374 |
-
if face_editor_mouth_grim > 0:
|
375 |
-
expression[0, 17, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.005, 0.005 ])
|
376 |
-
expression[0, 19, 2] += numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.01, 0.01 ])
|
377 |
-
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.06, 0.06 ])
|
378 |
-
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.03, 0.03 ])
|
379 |
-
else:
|
380 |
-
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.05, 0.05 ])
|
381 |
-
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.02, 0.02 ])
|
382 |
-
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_grim, [ -1, 1 ], [ -0.03, 0.03 ])
|
383 |
-
return expression
|
384 |
-
|
385 |
-
|
386 |
-
def edit_mouth_position(expression : LivePortraitExpression) -> LivePortraitExpression:
|
387 |
-
face_editor_mouth_position_horizontal = state_manager.get_item('face_editor_mouth_position_horizontal')
|
388 |
-
face_editor_mouth_position_vertical = state_manager.get_item('face_editor_mouth_position_vertical')
|
389 |
-
expression[0, 19, 0] += numpy.interp(face_editor_mouth_position_horizontal, [ -1, 1 ], [ -0.05, 0.05 ])
|
390 |
-
expression[0, 20, 0] += numpy.interp(face_editor_mouth_position_horizontal, [ -1, 1 ], [ -0.04, 0.04 ])
|
391 |
-
if face_editor_mouth_position_vertical > 0:
|
392 |
-
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.04, 0.04 ])
|
393 |
-
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.02, 0.02 ])
|
394 |
-
else:
|
395 |
-
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.05, 0.05 ])
|
396 |
-
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_position_vertical, [ -1, 1 ], [ -0.04, 0.04 ])
|
397 |
-
return expression
|
398 |
-
|
399 |
-
|
400 |
-
def edit_mouth_pout(expression : LivePortraitExpression) -> LivePortraitExpression:
|
401 |
-
face_editor_mouth_pout = state_manager.get_item('face_editor_mouth_pout')
|
402 |
-
if face_editor_mouth_pout > 0:
|
403 |
-
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.022, 0.022 ])
|
404 |
-
expression[0, 19, 2] += numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.025, 0.025 ])
|
405 |
-
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.002, 0.002 ])
|
406 |
-
else:
|
407 |
-
expression[0, 19, 1] += numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.022, 0.022 ])
|
408 |
-
expression[0, 19, 2] += numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.025, 0.025 ])
|
409 |
-
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_pout, [ -1, 1 ], [ -0.002, 0.002 ])
|
410 |
-
return expression
|
411 |
-
|
412 |
-
|
413 |
-
def edit_mouth_purse(expression : LivePortraitExpression) -> LivePortraitExpression:
|
414 |
-
face_editor_mouth_purse = state_manager.get_item('face_editor_mouth_purse')
|
415 |
-
if face_editor_mouth_purse > 0:
|
416 |
-
expression[0, 19, 1] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.04, 0.04 ])
|
417 |
-
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.02, 0.02 ])
|
418 |
-
else:
|
419 |
-
expression[0, 14, 1] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.02, 0.02 ])
|
420 |
-
expression[0, 17, 2] += numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.01, 0.01 ])
|
421 |
-
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.015, 0.015 ])
|
422 |
-
expression[0, 20, 2] -= numpy.interp(face_editor_mouth_purse, [ -1, 1 ], [ -0.002, 0.002 ])
|
423 |
-
return expression
|
424 |
-
|
425 |
-
|
426 |
-
def edit_mouth_smile(expression : LivePortraitExpression) -> LivePortraitExpression:
|
427 |
-
face_editor_mouth_smile = state_manager.get_item('face_editor_mouth_smile')
|
428 |
-
if face_editor_mouth_smile > 0:
|
429 |
-
expression[0, 20, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.015, 0.015 ])
|
430 |
-
expression[0, 14, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.025, 0.025 ])
|
431 |
-
expression[0, 17, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.01, 0.01 ])
|
432 |
-
expression[0, 17, 2] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.004, 0.004 ])
|
433 |
-
expression[0, 3, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
434 |
-
expression[0, 7, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
435 |
-
else:
|
436 |
-
expression[0, 14, 1] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.02, 0.02 ])
|
437 |
-
expression[0, 17, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.003, 0.003 ])
|
438 |
-
expression[0, 19, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.02, 0.02 ])
|
439 |
-
expression[0, 19, 2] -= numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.005, 0.005 ])
|
440 |
-
expression[0, 20, 2] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.01, 0.01 ])
|
441 |
-
expression[0, 3, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
442 |
-
expression[0, 7, 1] += numpy.interp(face_editor_mouth_smile, [ -1, 1 ], [ -0.0045, 0.0045 ])
|
443 |
-
return expression
|
444 |
-
|
445 |
-
|
446 |
-
def edit_head_rotation(pitch : LivePortraitPitch, yaw : LivePortraitYaw, roll : LivePortraitRoll) -> LivePortraitRotation:
|
447 |
-
face_editor_head_pitch = state_manager.get_item('face_editor_head_pitch')
|
448 |
-
face_editor_head_yaw = state_manager.get_item('face_editor_head_yaw')
|
449 |
-
face_editor_head_roll = state_manager.get_item('face_editor_head_roll')
|
450 |
-
edit_pitch = pitch + float(numpy.interp(face_editor_head_pitch, [ -1, 1 ], [ 20, -20 ]))
|
451 |
-
edit_yaw = yaw + float(numpy.interp(face_editor_head_yaw, [ -1, 1 ], [ 60, -60 ]))
|
452 |
-
edit_roll = roll + float(numpy.interp(face_editor_head_roll, [ -1, 1 ], [ -15, 15 ]))
|
453 |
-
edit_pitch, edit_yaw, edit_roll = limit_euler_angles(pitch, yaw, roll, edit_pitch, edit_yaw, edit_roll)
|
454 |
-
rotation = create_rotation(edit_pitch, edit_yaw, edit_roll)
|
455 |
-
return rotation
|
456 |
-
|
457 |
-
|
458 |
-
def calc_distance_ratio(face_landmark_68 : FaceLandmark68, top_index : int, bottom_index : int, left_index : int, right_index : int) -> float:
|
459 |
-
vertical_direction = face_landmark_68[top_index] - face_landmark_68[bottom_index]
|
460 |
-
horizontal_direction = face_landmark_68[left_index] - face_landmark_68[right_index]
|
461 |
-
distance_ratio = float(numpy.linalg.norm(vertical_direction) / (numpy.linalg.norm(horizontal_direction) + 1e-6))
|
462 |
-
return distance_ratio
|
463 |
-
|
464 |
-
|
465 |
-
def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
466 |
-
model_size = get_model_options().get('size')
|
467 |
-
prepare_size = (model_size[0] // 2, model_size[1] // 2)
|
468 |
-
crop_vision_frame = cv2.resize(crop_vision_frame, prepare_size, interpolation = cv2.INTER_AREA)
|
469 |
-
crop_vision_frame = crop_vision_frame[:, :, ::-1] / 255.0
|
470 |
-
crop_vision_frame = numpy.expand_dims(crop_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
|
471 |
-
return crop_vision_frame
|
472 |
-
|
473 |
-
|
474 |
-
def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
475 |
-
crop_vision_frame = crop_vision_frame.transpose(1, 2, 0).clip(0, 1)
|
476 |
-
crop_vision_frame = (crop_vision_frame * 255.0)
|
477 |
-
crop_vision_frame = crop_vision_frame.astype(numpy.uint8)[:, :, ::-1]
|
478 |
-
return crop_vision_frame
|
479 |
-
|
480 |
-
|
481 |
-
def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
482 |
-
pass
|
483 |
-
|
484 |
-
|
485 |
-
def process_frame(inputs : FaceEditorInputs) -> VisionFrame:
|
486 |
-
reference_faces = inputs.get('reference_faces')
|
487 |
-
target_vision_frame = inputs.get('target_vision_frame')
|
488 |
-
many_faces = sort_and_filter_faces(get_many_faces([ target_vision_frame ]))
|
489 |
-
|
490 |
-
if state_manager.get_item('face_selector_mode') == 'many':
|
491 |
-
if many_faces:
|
492 |
-
for target_face in many_faces:
|
493 |
-
target_vision_frame = edit_face(target_face, target_vision_frame)
|
494 |
-
if state_manager.get_item('face_selector_mode') == 'one':
|
495 |
-
target_face = get_one_face(many_faces)
|
496 |
-
if target_face:
|
497 |
-
target_vision_frame = edit_face(target_face, target_vision_frame)
|
498 |
-
if state_manager.get_item('face_selector_mode') == 'reference':
|
499 |
-
similar_faces = find_similar_faces(many_faces, reference_faces, state_manager.get_item('reference_face_distance'))
|
500 |
-
if similar_faces:
|
501 |
-
for similar_face in similar_faces:
|
502 |
-
target_vision_frame = edit_face(similar_face, target_vision_frame)
|
503 |
-
return target_vision_frame
|
504 |
-
|
505 |
-
|
506 |
-
def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None:
|
507 |
-
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
508 |
-
|
509 |
-
for queue_payload in process_manager.manage(queue_payloads):
|
510 |
-
target_vision_path = queue_payload['frame_path']
|
511 |
-
target_vision_frame = read_image(target_vision_path)
|
512 |
-
output_vision_frame = process_frame(
|
513 |
-
{
|
514 |
-
'reference_faces': reference_faces,
|
515 |
-
'target_vision_frame': target_vision_frame
|
516 |
-
})
|
517 |
-
write_image(target_vision_path, output_vision_frame)
|
518 |
-
update_progress(1)
|
519 |
-
|
520 |
-
|
521 |
-
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
522 |
-
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
523 |
-
target_vision_frame = read_static_image(target_path)
|
524 |
-
output_vision_frame = process_frame(
|
525 |
-
{
|
526 |
-
'reference_faces': reference_faces,
|
527 |
-
'target_vision_frame': target_vision_frame
|
528 |
-
})
|
529 |
-
write_image(output_path, output_vision_frame)
|
530 |
-
|
531 |
-
|
532 |
-
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
533 |
-
processors.multi_process_frames(None, temp_frame_paths, process_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|