Spaces:
Running
on
Zero
Running
on
Zero
Update difpoint/inference.py
Browse files- difpoint/inference.py +3 -3
difpoint/inference.py
CHANGED
@@ -162,19 +162,19 @@ class Inferencer(object):
|
|
162 |
self.device = 'cuda'
|
163 |
from difpoint.model import get_model
|
164 |
self.point_diffusion = get_model()
|
165 |
-
ckpt = torch.load('/
|
166 |
|
167 |
self.point_diffusion.load_state_dict(ckpt['model'])
|
168 |
print('model', self.point_diffusion.children())
|
169 |
self.point_diffusion.eval()
|
170 |
self.point_diffusion.to(self.device)
|
171 |
|
172 |
-
lm_croper_checkpoint =
|
173 |
self.croper = Croper(lm_croper_checkpoint)
|
174 |
|
175 |
self.norm_info = dict(np.load(r'difpoint/datasets/norm_info_d6.5_c8.5_vox1_train.npz'))
|
176 |
|
177 |
-
wav2lip_checkpoint = '
|
178 |
self.wav2lip_model = AudioEncoder(wav2lip_checkpoint, 'cuda')
|
179 |
self.wav2lip_model.cuda()
|
180 |
self.wav2lip_model.eval()
|
|
|
162 |
self.device = 'cuda'
|
163 |
from difpoint.model import get_model
|
164 |
self.point_diffusion = get_model()
|
165 |
+
ckpt = torch.load('./downloaded_repo/ckpts/KDTalker.pth', weights_only=False)
|
166 |
|
167 |
self.point_diffusion.load_state_dict(ckpt['model'])
|
168 |
print('model', self.point_diffusion.children())
|
169 |
self.point_diffusion.eval()
|
170 |
self.point_diffusion.to(self.device)
|
171 |
|
172 |
+
lm_croper_checkpoint = './downloaded_repo/ckpts/shape_predictor_68_face_landmarks.dat'
|
173 |
self.croper = Croper(lm_croper_checkpoint)
|
174 |
|
175 |
self.norm_info = dict(np.load(r'difpoint/datasets/norm_info_d6.5_c8.5_vox1_train.npz'))
|
176 |
|
177 |
+
wav2lip_checkpoint = './downloaded_repo/ckpts/wav2lip.pth'
|
178 |
self.wav2lip_model = AudioEncoder(wav2lip_checkpoint, 'cuda')
|
179 |
self.wav2lip_model.cuda()
|
180 |
self.wav2lip_model.eval()
|