Spaces:
Running
on
Zero
Running
on
Zero
刘虹雨
commited on
Commit
·
ee74488
1
Parent(s):
6dce7fe
update code
Browse files
app.py
CHANGED
@@ -480,11 +480,7 @@ def avatar_generation(items, save_path_base, video_path_input, source_type, is_s
|
|
480 |
# ws_avg.to(device)
|
481 |
# DiT_model.to(device)
|
482 |
# Set up face verse for amimation
|
483 |
-
|
484 |
-
'pretrained_model/temp.npy').astype(
|
485 |
-
np.float32)
|
486 |
-
base_coff = torch.from_numpy(base_coff).float()
|
487 |
-
Faceverse = Faceverse_manager(device=device, base_coeff=base_coff)
|
488 |
|
489 |
if source_type == 'example':
|
490 |
input_img_fvid = './demo_data/source_img/img_generate_different_domain/coeffs/demo_imgs'
|
@@ -527,9 +523,9 @@ def avatar_generation(items, save_path_base, video_path_input, source_type, is_s
|
|
527 |
samples = samples * std + mean
|
528 |
torch.cuda.empty_cache()
|
529 |
torch.cuda.ipc_collect()
|
530 |
-
save_frames_path_out = os.path.join(save_path_base, image_name, 'out')
|
531 |
-
save_frames_path_outshow = os.path.join(save_path_base, image_name, 'out_show')
|
532 |
-
save_frames_path_depth = os.path.join(save_path_base, image_name, 'depth')
|
533 |
|
534 |
os.makedirs(save_frames_path_out, exist_ok=True)
|
535 |
os.makedirs(save_frames_path_outshow, exist_ok=True)
|
@@ -611,12 +607,10 @@ def avatar_generation(items, save_path_base, video_path_input, source_type, is_s
|
|
611 |
|
612 |
# Generate videos
|
613 |
images_to_video(save_frames_path_out, os.path.join(save_path_base, image_name + '_out.mp4'))
|
614 |
-
images_to_video(save_frames_path_outshow, os.path.join(save_path_base, image_name + '_outshow.mp4'))
|
615 |
images_to_video(save_frames_path_depth, os.path.join(save_path_base, image_name + '_depth.mp4'))
|
616 |
|
617 |
logging.info(f"✅ Video generation completed successfully!")
|
618 |
-
return os.path.join(save_path_base, image_name + '_out.mp4'),
|
619 |
-
image_name + '_outshow.mp4'), os.path.join(save_path_base, image_name + '_depth.mp4')
|
620 |
|
621 |
|
622 |
def get_image_base64(path):
|
@@ -630,7 +624,6 @@ def assert_input_image(input_image):
|
|
630 |
raise gr.Error("No image selected or uploaded!")
|
631 |
|
632 |
@spaces.GPU(duration=100)
|
633 |
-
@torch.no_grad()
|
634 |
def process_image(input_image, source_type, is_style, save_dir):
|
635 |
""" 🎯 处理 input_image,根据是否是示例图片执行不同逻辑 """
|
636 |
process_img_input_dir = os.path.join(save_dir, 'input_image')
|
@@ -934,13 +927,7 @@ def launch_gradio_app():
|
|
934 |
autoplay=True
|
935 |
)
|
936 |
|
937 |
-
|
938 |
-
label="Generated Animation Rotate View",
|
939 |
-
format="mp4", height=512, width=512,
|
940 |
-
autoplay=True
|
941 |
-
)
|
942 |
-
|
943 |
-
output_video_3 = gr.Video(
|
944 |
label="Generated Animation Rotate View Depth",
|
945 |
format="mp4", height=512, width=512,
|
946 |
autoplay=True
|
@@ -971,7 +958,7 @@ def launch_gradio_app():
|
|
971 |
submit.click(
|
972 |
fn=avatar_generation,
|
973 |
inputs=[processed_image, working_dir, video_input, source_type, is_styled, style_image],
|
974 |
-
outputs=[output_video,
|
975 |
queue=True
|
976 |
)
|
977 |
|
@@ -1011,4 +998,9 @@ if __name__ == '__main__':
|
|
1011 |
use_safetensors=True, controlnet=controlnet, variant="fp16"
|
1012 |
).to(device)
|
1013 |
demo_cam = False
|
|
|
|
|
|
|
|
|
|
|
1014 |
launch_gradio_app()
|
|
|
480 |
# ws_avg.to(device)
|
481 |
# DiT_model.to(device)
|
482 |
# Set up face verse for amimation
|
483 |
+
|
|
|
|
|
|
|
|
|
484 |
|
485 |
if source_type == 'example':
|
486 |
input_img_fvid = './demo_data/source_img/img_generate_different_domain/coeffs/demo_imgs'
|
|
|
523 |
samples = samples * std + mean
|
524 |
torch.cuda.empty_cache()
|
525 |
torch.cuda.ipc_collect()
|
526 |
+
save_frames_path_out = os.path.join(save_path_base, image_name, video_name, 'out')
|
527 |
+
save_frames_path_outshow = os.path.join(save_path_base, image_name, video_name,'out_show')
|
528 |
+
save_frames_path_depth = os.path.join(save_path_base, image_name, video_name, 'depth')
|
529 |
|
530 |
os.makedirs(save_frames_path_out, exist_ok=True)
|
531 |
os.makedirs(save_frames_path_outshow, exist_ok=True)
|
|
|
607 |
|
608 |
# Generate videos
|
609 |
images_to_video(save_frames_path_out, os.path.join(save_path_base, image_name + '_out.mp4'))
|
|
|
610 |
images_to_video(save_frames_path_depth, os.path.join(save_path_base, image_name + '_depth.mp4'))
|
611 |
|
612 |
logging.info(f"✅ Video generation completed successfully!")
|
613 |
+
return os.path.join(save_path_base, image_name + video_name+ '_out.mp4'), os.path.join(save_path_base, image_name + video_name+'_depth.mp4')
|
|
|
614 |
|
615 |
|
616 |
def get_image_base64(path):
|
|
|
624 |
raise gr.Error("No image selected or uploaded!")
|
625 |
|
626 |
@spaces.GPU(duration=100)
|
|
|
627 |
def process_image(input_image, source_type, is_style, save_dir):
|
628 |
""" 🎯 处理 input_image,根据是否是示例图片执行不同逻辑 """
|
629 |
process_img_input_dir = os.path.join(save_dir, 'input_image')
|
|
|
927 |
autoplay=True
|
928 |
)
|
929 |
|
930 |
+
output_video_1 = gr.Video(
|
|
|
|
|
|
|
|
|
|
|
|
|
931 |
label="Generated Animation Rotate View Depth",
|
932 |
format="mp4", height=512, width=512,
|
933 |
autoplay=True
|
|
|
958 |
submit.click(
|
959 |
fn=avatar_generation,
|
960 |
inputs=[processed_image, working_dir, video_input, source_type, is_styled, style_image],
|
961 |
+
outputs=[output_video, output_video_1], # ⏳ 稍后展示视频
|
962 |
queue=True
|
963 |
)
|
964 |
|
|
|
998 |
use_safetensors=True, controlnet=controlnet, variant="fp16"
|
999 |
).to(device)
|
1000 |
demo_cam = False
|
1001 |
+
base_coff = np.load(
|
1002 |
+
'pretrained_model/temp.npy').astype(
|
1003 |
+
np.float32)
|
1004 |
+
base_coff = torch.from_numpy(base_coff).float()
|
1005 |
+
Faceverse = Faceverse_manager(device=device, base_coeff=base_coff)
|
1006 |
launch_gradio_app()
|