jhj0517 commited on
Commit
e5db983
·
1 Parent(s): f742699

Auto cast torch for faster speed

Browse files
modules/live_portrait/live_portrait_inferencer.py CHANGED
@@ -278,49 +278,50 @@ class LivePortraitInferencer:
278
  d_0_es = None
279
 
280
  psi = None
281
- for i in range(total_length):
 
282
 
283
- if i == 0:
284
- psi = self.psi_list[i]
285
- s_info = psi.x_s_info
286
- s_es = ExpressionSet(erst=(s_info['kp'] + s_info['exp'], torch.Tensor([0, 0, 0]), s_info['scale'], s_info['t']))
287
 
288
- new_es = ExpressionSet(es=s_es)
289
 
290
- if i < driving_length:
291
- d_i_info = self.driving_values[i]
292
- d_i_r = torch.Tensor([d_i_info['pitch'], d_i_info['yaw'], d_i_info['roll']]) # .float().to(device="cuda:0")
293
 
294
- if d_0_es is None:
295
- d_0_es = ExpressionSet(erst = (d_i_info['exp'], d_i_r, d_i_info['scale'], d_i_info['t']))
296
 
297
- self.retargeting(s_es.e, d_0_es.e, retargeting_eyes, (11, 13, 15, 16))
298
- self.retargeting(s_es.e, d_0_es.e, retargeting_mouth, (14, 17, 19, 20))
299
 
300
- new_es.e += d_i_info['exp'] - d_0_es.e
301
- new_es.r += d_i_r - d_0_es.r
302
- new_es.t += d_i_info['t'] - d_0_es.t
303
 
304
- r_new = get_rotation_matrix(
305
- s_info['pitch'] + new_es.r[0], s_info['yaw'] + new_es.r[1], s_info['roll'] + new_es.r[2])
306
- d_new = new_es.s * (new_es.e @ r_new) + new_es.t
307
- d_new = self.pipeline.stitching(psi.x_s_user, d_new)
308
- crop_out = self.pipeline.warp_decode(psi.f_s_user, psi.x_s_user, d_new)
309
- crop_out = self.pipeline.parse_output(crop_out['out'])[0]
310
 
311
- crop_with_fullsize = cv2.warpAffine(crop_out, psi.crop_trans_m, get_rgb_size(psi.src_rgb),
312
- cv2.INTER_LINEAR)
313
- out = np.clip(psi.mask_ori * crop_with_fullsize + (1 - psi.mask_ori) * psi.src_rgb, 0, 255).astype(
314
- np.uint8)
315
 
316
- out_frame_path = get_auto_incremental_file_path(os.path.join(self.output_dir, "temp", "video_frames", "out"), "png")
317
- save_image(out, out_frame_path)
318
 
319
- progress(i/total_length, desc=f"Generating frames {i}/{total_length} ..")
320
 
321
- video_path = create_video_from_frames(TEMP_VIDEO_OUT_FRAMES_DIR, frame_rate=vid_info.frame_rate, output_dir=os.path.join(self.output_dir, "videos"))
322
 
323
- return video_path
324
 
325
  def download_if_no_models(self,
326
  model_type: str = ModelType.HUMAN.value,
 
278
  d_0_es = None
279
 
280
  psi = None
281
+ with torch.autocast(device_type=self.device, enabled=(self.device == "cuda")):
282
+ for i in range(total_length):
283
 
284
+ if i == 0:
285
+ psi = self.psi_list[i]
286
+ s_info = psi.x_s_info
287
+ s_es = ExpressionSet(erst=(s_info['kp'] + s_info['exp'], torch.Tensor([0, 0, 0]), s_info['scale'], s_info['t']))
288
 
289
+ new_es = ExpressionSet(es=s_es)
290
 
291
+ if i < driving_length:
292
+ d_i_info = self.driving_values[i]
293
+ d_i_r = torch.Tensor([d_i_info['pitch'], d_i_info['yaw'], d_i_info['roll']]) # .float().to(device="cuda:0")
294
 
295
+ if d_0_es is None:
296
+ d_0_es = ExpressionSet(erst = (d_i_info['exp'], d_i_r, d_i_info['scale'], d_i_info['t']))
297
 
298
+ self.retargeting(s_es.e, d_0_es.e, retargeting_eyes, (11, 13, 15, 16))
299
+ self.retargeting(s_es.e, d_0_es.e, retargeting_mouth, (14, 17, 19, 20))
300
 
301
+ new_es.e += d_i_info['exp'] - d_0_es.e
302
+ new_es.r += d_i_r - d_0_es.r
303
+ new_es.t += d_i_info['t'] - d_0_es.t
304
 
305
+ r_new = get_rotation_matrix(
306
+ s_info['pitch'] + new_es.r[0], s_info['yaw'] + new_es.r[1], s_info['roll'] + new_es.r[2])
307
+ d_new = new_es.s * (new_es.e @ r_new) + new_es.t
308
+ d_new = self.pipeline.stitching(psi.x_s_user, d_new)
309
+ crop_out = self.pipeline.warp_decode(psi.f_s_user, psi.x_s_user, d_new)
310
+ crop_out = self.pipeline.parse_output(crop_out['out'])[0]
311
 
312
+ crop_with_fullsize = cv2.warpAffine(crop_out, psi.crop_trans_m, get_rgb_size(psi.src_rgb),
313
+ cv2.INTER_LINEAR)
314
+ out = np.clip(psi.mask_ori * crop_with_fullsize + (1 - psi.mask_ori) * psi.src_rgb, 0, 255).astype(
315
+ np.uint8)
316
 
317
+ out_frame_path = get_auto_incremental_file_path(os.path.join(self.output_dir, "temp", "video_frames", "out"), "png")
318
+ save_image(out, out_frame_path)
319
 
320
+ progress(i/total_length, desc=f"Generating frames {i}/{total_length} ..")
321
 
322
+ video_path = create_video_from_frames(TEMP_VIDEO_OUT_FRAMES_DIR, frame_rate=vid_info.frame_rate, output_dir=os.path.join(self.output_dir, "videos"))
323
 
324
+ return video_path
325
 
326
  def download_if_no_models(self,
327
  model_type: str = ModelType.HUMAN.value,