jhj0517
commited on
Commit
·
e5db983
1
Parent(s):
f742699
Auto cast torch for faster speed
Browse files
modules/live_portrait/live_portrait_inferencer.py
CHANGED
@@ -278,49 +278,50 @@ class LivePortraitInferencer:
|
|
278 |
d_0_es = None
|
279 |
|
280 |
psi = None
|
281 |
-
|
|
|
282 |
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
|
288 |
-
|
289 |
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
|
294 |
-
|
295 |
-
|
296 |
|
297 |
-
|
298 |
-
|
299 |
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
|
316 |
-
|
317 |
-
|
318 |
|
319 |
-
|
320 |
|
321 |
-
|
322 |
|
323 |
-
|
324 |
|
325 |
def download_if_no_models(self,
|
326 |
model_type: str = ModelType.HUMAN.value,
|
|
|
278 |
d_0_es = None
|
279 |
|
280 |
psi = None
|
281 |
+
with torch.autocast(device_type=self.device, enabled=(self.device == "cuda")):
|
282 |
+
for i in range(total_length):
|
283 |
|
284 |
+
if i == 0:
|
285 |
+
psi = self.psi_list[i]
|
286 |
+
s_info = psi.x_s_info
|
287 |
+
s_es = ExpressionSet(erst=(s_info['kp'] + s_info['exp'], torch.Tensor([0, 0, 0]), s_info['scale'], s_info['t']))
|
288 |
|
289 |
+
new_es = ExpressionSet(es=s_es)
|
290 |
|
291 |
+
if i < driving_length:
|
292 |
+
d_i_info = self.driving_values[i]
|
293 |
+
d_i_r = torch.Tensor([d_i_info['pitch'], d_i_info['yaw'], d_i_info['roll']]) # .float().to(device="cuda:0")
|
294 |
|
295 |
+
if d_0_es is None:
|
296 |
+
d_0_es = ExpressionSet(erst = (d_i_info['exp'], d_i_r, d_i_info['scale'], d_i_info['t']))
|
297 |
|
298 |
+
self.retargeting(s_es.e, d_0_es.e, retargeting_eyes, (11, 13, 15, 16))
|
299 |
+
self.retargeting(s_es.e, d_0_es.e, retargeting_mouth, (14, 17, 19, 20))
|
300 |
|
301 |
+
new_es.e += d_i_info['exp'] - d_0_es.e
|
302 |
+
new_es.r += d_i_r - d_0_es.r
|
303 |
+
new_es.t += d_i_info['t'] - d_0_es.t
|
304 |
|
305 |
+
r_new = get_rotation_matrix(
|
306 |
+
s_info['pitch'] + new_es.r[0], s_info['yaw'] + new_es.r[1], s_info['roll'] + new_es.r[2])
|
307 |
+
d_new = new_es.s * (new_es.e @ r_new) + new_es.t
|
308 |
+
d_new = self.pipeline.stitching(psi.x_s_user, d_new)
|
309 |
+
crop_out = self.pipeline.warp_decode(psi.f_s_user, psi.x_s_user, d_new)
|
310 |
+
crop_out = self.pipeline.parse_output(crop_out['out'])[0]
|
311 |
|
312 |
+
crop_with_fullsize = cv2.warpAffine(crop_out, psi.crop_trans_m, get_rgb_size(psi.src_rgb),
|
313 |
+
cv2.INTER_LINEAR)
|
314 |
+
out = np.clip(psi.mask_ori * crop_with_fullsize + (1 - psi.mask_ori) * psi.src_rgb, 0, 255).astype(
|
315 |
+
np.uint8)
|
316 |
|
317 |
+
out_frame_path = get_auto_incremental_file_path(os.path.join(self.output_dir, "temp", "video_frames", "out"), "png")
|
318 |
+
save_image(out, out_frame_path)
|
319 |
|
320 |
+
progress(i/total_length, desc=f"Generating frames {i}/{total_length} ..")
|
321 |
|
322 |
+
video_path = create_video_from_frames(TEMP_VIDEO_OUT_FRAMES_DIR, frame_rate=vid_info.frame_rate, output_dir=os.path.join(self.output_dir, "videos"))
|
323 |
|
324 |
+
return video_path
|
325 |
|
326 |
def download_if_no_models(self,
|
327 |
model_type: str = ModelType.HUMAN.value,
|