YinuoGuo27 commited on
Commit
32d77e3
·
verified ·
1 Parent(s): 63ad0a5

Update difpoint/inference.py

Browse files
Files changed (1) hide show
  1. difpoint/inference.py +3 -5
difpoint/inference.py CHANGED
@@ -169,7 +169,7 @@ class Inferencer(object):
169
  self.point_diffusion.to(self.device)
170
 
171
  self.lm_croper_checkpoint = './downloaded_repo/ckpts/shape_predictor_68_face_landmarks.dat'
172
- #self.croper = Croper(lm_croper_checkpoint)
173
 
174
  self.norm_info = dict(np.load(r'difpoint/datasets/norm_info_d6.5_c8.5_vox1_train.npz'))
175
 
@@ -182,7 +182,7 @@ class Inferencer(object):
182
  self.inf_cfg = OmegaConf.load("difpoint/configs/onnx_infer.yaml")
183
  self.inf_cfg.infer_params.flag_pasteback = False
184
 
185
- #self.live_portrait_pipeline = FasterLivePortraitPipeline(cfg=self.inf_cfg, is_animal=False)
186
  #ret = self.live_portrait_pipeline.prepare_source(source_image)
187
 
188
  print('#'*25+f'End initialization, cost time {time.time()-st}'+'#'*25)
@@ -273,9 +273,7 @@ class Inferencer(object):
273
  # 2024.06.26
274
  @torch.no_grad()
275
  def generate_with_audio_img(self, upload_audio_path, tts_audio_path, audio_type, image_path, smoothed_pitch, smoothed_yaw, smoothed_roll, smoothed_t, save_path='results'):
276
- self.live_portrait_pipeline = FasterLivePortraitPipeline(cfg=self.inf_cfg, is_animal=False)
277
- self.croper = Croper(self.lm_croper_checkpoint)
278
- self.live_portrait_pipeline.init()
279
  if audio_type == 'upload':
280
  audio_path = upload_audio_path
281
  elif audio_type == 'tts':
 
169
  self.point_diffusion.to(self.device)
170
 
171
  self.lm_croper_checkpoint = './downloaded_repo/ckpts/shape_predictor_68_face_landmarks.dat'
172
+ self.croper = Croper(lm_croper_checkpoint)
173
 
174
  self.norm_info = dict(np.load(r'difpoint/datasets/norm_info_d6.5_c8.5_vox1_train.npz'))
175
 
 
182
  self.inf_cfg = OmegaConf.load("difpoint/configs/onnx_infer.yaml")
183
  self.inf_cfg.infer_params.flag_pasteback = False
184
 
185
+ self.live_portrait_pipeline = FasterLivePortraitPipeline(cfg=self.inf_cfg, is_animal=False)
186
  #ret = self.live_portrait_pipeline.prepare_source(source_image)
187
 
188
  print('#'*25+f'End initialization, cost time {time.time()-st}'+'#'*25)
 
273
  # 2024.06.26
274
  @torch.no_grad()
275
  def generate_with_audio_img(self, upload_audio_path, tts_audio_path, audio_type, image_path, smoothed_pitch, smoothed_yaw, smoothed_roll, smoothed_t, save_path='results'):
276
+
 
 
277
  if audio_type == 'upload':
278
  audio_path = upload_audio_path
279
  elif audio_type == 'tts':