Spaces:
Running
on
Zero
Running
on
Zero
刘虹雨
commited on
Commit
·
23f9e19
1
Parent(s):
cd8a54f
update code
Browse files
app.py
CHANGED
@@ -433,15 +433,10 @@ def model_define():
|
|
433 |
# Load average latent vector
|
434 |
ws_avg = torch.load(default_config.ws_avg_pkl).to(device)[0]
|
435 |
|
436 |
-
|
437 |
-
base_coff = np.load(
|
438 |
-
'pretrained_model/temp.npy').astype(
|
439 |
-
np.float32)
|
440 |
-
base_coff = torch.from_numpy(base_coff).float()
|
441 |
-
Faceverse = Faceverse_manager(device=device, base_coeff=base_coff)
|
442 |
|
443 |
return motion_aware_render_model, sample_steps, DiT_model, \
|
444 |
-
vae_triplane, image_encoder, dinov2, dino_img_processor, clip_image_processor, triplane_std, triplane_mean, ws_avg,
|
445 |
|
446 |
|
447 |
def duplicate_batch(tensor, batch_size=2):
|
@@ -482,9 +477,15 @@ def avatar_generation(items, save_path_base, video_path_input, source_type, is_s
|
|
482 |
image_encoder.to(device)
|
483 |
vae_triplane.to(device)
|
484 |
dinov2.to(device)
|
485 |
-
Faceverse.to(device)
|
486 |
ws_avg = ws_avg.to(device)
|
487 |
DiT_model = DiT_model.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
488 |
if source_type == 'example':
|
489 |
input_img_fvid = './demo_data/source_img/img_generate_different_domain/coeffs/demo_imgs'
|
490 |
input_img_motion = './demo_data/source_img/img_generate_different_domain/motions/demo_imgs'
|
@@ -999,7 +1000,7 @@ if __name__ == '__main__':
|
|
999 |
image_folder = "./demo_data/source_img/img_generate_different_domain/images512x512/demo_imgs"
|
1000 |
example_img_names = os.listdir(image_folder)
|
1001 |
render_model, sample_steps, DiT_model, \
|
1002 |
-
vae_triplane, image_encoder, dinov2, dino_img_processor, clip_image_processor, std, mean, ws_avg,
|
1003 |
controlnet_path = './pretrained_model/control'
|
1004 |
controlnet = ControlNetModel.from_pretrained(
|
1005 |
controlnet_path, torch_dtype=torch.float16
|
|
|
433 |
# Load average latent vector
|
434 |
ws_avg = torch.load(default_config.ws_avg_pkl).to(device)[0]
|
435 |
|
436 |
+
|
|
|
|
|
|
|
|
|
|
|
437 |
|
438 |
return motion_aware_render_model, sample_steps, DiT_model, \
|
439 |
+
vae_triplane, image_encoder, dinov2, dino_img_processor, clip_image_processor, triplane_std, triplane_mean, ws_avg, device, input_process_model
|
440 |
|
441 |
|
442 |
def duplicate_batch(tensor, batch_size=2):
|
|
|
477 |
image_encoder.to(device)
|
478 |
vae_triplane.to(device)
|
479 |
dinov2.to(device)
|
|
|
480 |
ws_avg = ws_avg.to(device)
|
481 |
DiT_model = DiT_model.to(device)
|
482 |
+
# Set up face verse for amimation
|
483 |
+
base_coff = np.load(
|
484 |
+
'pretrained_model/temp.npy').astype(
|
485 |
+
np.float32)
|
486 |
+
base_coff = torch.from_numpy(base_coff).float()
|
487 |
+
Faceverse = Faceverse_manager(device=device, base_coeff=base_coff)
|
488 |
+
|
489 |
if source_type == 'example':
|
490 |
input_img_fvid = './demo_data/source_img/img_generate_different_domain/coeffs/demo_imgs'
|
491 |
input_img_motion = './demo_data/source_img/img_generate_different_domain/motions/demo_imgs'
|
|
|
1000 |
image_folder = "./demo_data/source_img/img_generate_different_domain/images512x512/demo_imgs"
|
1001 |
example_img_names = os.listdir(image_folder)
|
1002 |
render_model, sample_steps, DiT_model, \
|
1003 |
+
vae_triplane, image_encoder, dinov2, dino_img_processor, clip_image_processor, std, mean, ws_avg, device, input_process_model = model_define()
|
1004 |
controlnet_path = './pretrained_model/control'
|
1005 |
controlnet = ControlNetModel.from_pretrained(
|
1006 |
controlnet_path, torch_dtype=torch.float16
|