v0.0.2.beta
Browse files
app.py
CHANGED
|
@@ -25,6 +25,12 @@ def sadtalker_demo():
|
|
| 25 |
<a style='font-size:18px;color: #efefef' href='https://sadtalker.github.io'>Homepage</a> \
|
| 26 |
<a style='font-size:18px;color: #efefef' href='https://github.com/Winfredy/SadTalker'> Github </div>")
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
with gr.Row().style(equal_height=False):
|
| 29 |
with gr.Column(variant='panel'):
|
| 30 |
with gr.Tabs(elem_id="sadtalker_source_image"):
|
|
@@ -130,7 +136,7 @@ def sadtalker_demo():
|
|
| 130 |
enhancer],
|
| 131 |
outputs=[gen_video],
|
| 132 |
fn=sad_talker.test,
|
| 133 |
-
cache_examples=
|
| 134 |
|
| 135 |
submit.click(
|
| 136 |
fn=sad_talker.test,
|
|
@@ -148,6 +154,7 @@ def sadtalker_demo():
|
|
| 148 |
if __name__ == "__main__":
|
| 149 |
|
| 150 |
demo = sadtalker_demo()
|
| 151 |
-
demo.
|
|
|
|
| 152 |
|
| 153 |
|
|
|
|
| 25 |
<a style='font-size:18px;color: #efefef' href='https://sadtalker.github.io'>Homepage</a> \
|
| 26 |
<a style='font-size:18px;color: #efefef' href='https://github.com/Winfredy/SadTalker'> Github </div>")
|
| 27 |
|
| 28 |
+
|
| 29 |
+
gr.Markdown("""
|
| 30 |
+
<b>You may duplicate the space and upgrade to GPU in settings for better performance and faster inference without waiting in the queue. <a style='display:inline-block' href="https://huggingface.co/spaces/vinthony/SadTalker?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a></b> \
|
| 31 |
+
<br/><b>Alternatively, try our GitHub <a href=https://github.com/Winfredy/SadTalker> code </a> on your own GPU. </b> <a style='display:inline-block' href="https://github.com/Winfredy/SadTalker"><img src="https://img.shields.io/github/stars/Winfredy/SadTalker?style=social"/></a> \
|
| 32 |
+
""")
|
| 33 |
+
|
| 34 |
with gr.Row().style(equal_height=False):
|
| 35 |
with gr.Column(variant='panel'):
|
| 36 |
with gr.Tabs(elem_id="sadtalker_source_image"):
|
|
|
|
| 136 |
enhancer],
|
| 137 |
outputs=[gen_video],
|
| 138 |
fn=sad_talker.test,
|
| 139 |
+
cache_examples=os.getenv('SYSTEM') == 'spaces') #
|
| 140 |
|
| 141 |
submit.click(
|
| 142 |
fn=sad_talker.test,
|
|
|
|
| 154 |
if __name__ == "__main__":
|
| 155 |
|
| 156 |
demo = sadtalker_demo()
|
| 157 |
+
demo.queue(max_size=10)
|
| 158 |
+
demo.launch(debug=True)
|
| 159 |
|
| 160 |
|
src/face3d/__pycache__/extract_kp_videos.cpython-38.pyc
CHANGED
|
Binary files a/src/face3d/__pycache__/extract_kp_videos.cpython-38.pyc and b/src/face3d/__pycache__/extract_kp_videos.cpython-38.pyc differ
|
|
|
src/face3d/extract_kp_videos.py
CHANGED
|
@@ -13,7 +13,7 @@ from torch.multiprocessing import Pool, Process, set_start_method
|
|
| 13 |
|
| 14 |
class KeypointExtractor():
|
| 15 |
def __init__(self, device):
|
| 16 |
-
self.detector = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device=
|
| 17 |
|
| 18 |
def extract_keypoint(self, images, name=None, info=True):
|
| 19 |
if isinstance(images, list):
|
|
|
|
| 13 |
|
| 14 |
class KeypointExtractor():
|
| 15 |
def __init__(self, device):
|
| 16 |
+
self.detector = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device=device)
|
| 17 |
|
| 18 |
def extract_keypoint(self, images, name=None, info=True):
|
| 19 |
if isinstance(images, list):
|
src/gradio_demo.py
CHANGED
|
@@ -42,8 +42,6 @@ class SadTalker():
|
|
| 42 |
|
| 43 |
self.free_view_checkpoint = os.path.join( checkpoint_path, 'facevid2vid_00189-model.pth.tar')
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
self.lazy_load = lazy_load
|
| 48 |
|
| 49 |
if not self.lazy_load:
|
|
@@ -75,6 +73,7 @@ class SadTalker():
|
|
| 75 |
self.mapping_checkpoint = os.path.join(self.checkpoint_path, 'mapping_00229-model.pth.tar')
|
| 76 |
self.facerender_yaml_path = os.path.join(self.config_path, 'facerender.yaml')
|
| 77 |
|
|
|
|
| 78 |
print(self.free_view_checkpoint)
|
| 79 |
self.animate_from_coeff = AnimateFromCoeff(self.free_view_checkpoint, self.mapping_checkpoint,
|
| 80 |
self.facerender_yaml_path, self.device)
|
|
@@ -117,7 +116,7 @@ class SadTalker():
|
|
| 117 |
batch = get_data(first_coeff_path, audio_path, self.device, ref_eyeblink_coeff_path=None, still=still_mode) # longer audio?
|
| 118 |
coeff_path = self.audio_to_coeff.generate(batch, save_dir, pose_style)
|
| 119 |
#coeff2video
|
| 120 |
-
batch_size =
|
| 121 |
data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode, preprocess=preprocess)
|
| 122 |
return_path = self.animate_from_coeff.generate(data, save_dir, pic_path, crop_info, enhancer='gfpgan' if use_enhancer else None, preprocess=preprocess)
|
| 123 |
video_name = data['video_name']
|
|
|
|
| 42 |
|
| 43 |
self.free_view_checkpoint = os.path.join( checkpoint_path, 'facevid2vid_00189-model.pth.tar')
|
| 44 |
|
|
|
|
|
|
|
| 45 |
self.lazy_load = lazy_load
|
| 46 |
|
| 47 |
if not self.lazy_load:
|
|
|
|
| 73 |
self.mapping_checkpoint = os.path.join(self.checkpoint_path, 'mapping_00229-model.pth.tar')
|
| 74 |
self.facerender_yaml_path = os.path.join(self.config_path, 'facerender.yaml')
|
| 75 |
|
| 76 |
+
print(self.mapping_checkpoint)
|
| 77 |
print(self.free_view_checkpoint)
|
| 78 |
self.animate_from_coeff = AnimateFromCoeff(self.free_view_checkpoint, self.mapping_checkpoint,
|
| 79 |
self.facerender_yaml_path, self.device)
|
|
|
|
| 116 |
batch = get_data(first_coeff_path, audio_path, self.device, ref_eyeblink_coeff_path=None, still=still_mode) # longer audio?
|
| 117 |
coeff_path = self.audio_to_coeff.generate(batch, save_dir, pose_style)
|
| 118 |
#coeff2video
|
| 119 |
+
batch_size = 8
|
| 120 |
data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode, preprocess=preprocess)
|
| 121 |
return_path = self.animate_from_coeff.generate(data, save_dir, pic_path, crop_info, enhancer='gfpgan' if use_enhancer else None, preprocess=preprocess)
|
| 122 |
video_name = data['video_name']
|