Spaces:
Running
on
Zero
Running
on
Zero
刘虹雨
commited on
Commit
·
23af8a2
1
Parent(s):
ab06a25
update code
Browse files
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
app.py
CHANGED
@@ -271,6 +271,8 @@ def prepare_working_dir(dir, style):
|
|
271 |
def launch_pretrained():
|
272 |
from huggingface_hub import hf_hub_download, snapshot_download
|
273 |
hf_hub_download(repo_id="KumaPower/AvatarArtist", repo_type='model', local_dir="./pretrained_model")
|
|
|
|
|
274 |
|
275 |
|
276 |
def prepare_image_list(img_dir, selected_img):
|
@@ -394,8 +396,8 @@ def avatar_generation(items, save_path_base, video_path_input, source_type, is_s
|
|
394 |
label_file_test = os.path.join(target_path, 'images512x512/dataset_realcam.json')
|
395 |
|
396 |
if source_type == 'example':
|
397 |
-
input_img_fvid = './demo_data/source_img/img_generate_different_domain/coeffs/
|
398 |
-
input_img_motion = './demo_data/source_img/img_generate_different_domain/motions/
|
399 |
elif source_type == 'custom':
|
400 |
input_img_fvid = os.path.join(save_path_base, 'processed_img/dataset/coeffs/input_image')
|
401 |
input_img_motion = os.path.join(save_path_base, 'processed_img/dataset/motions/input_image')
|
@@ -704,7 +706,7 @@ def launch_gradio_app():
|
|
704 |
"""
|
705 |
)
|
706 |
# DISPLAY
|
707 |
-
image_folder = "./demo_data/source_img/img_generate_different_domain/images512x512/
|
708 |
video_folder = "./demo_data/target_video"
|
709 |
|
710 |
examples_images = sorted(
|
@@ -886,17 +888,17 @@ def launch_gradio_app():
|
|
886 |
|
887 |
if __name__ == '__main__':
|
888 |
import torch.multiprocessing as mp
|
889 |
-
|
890 |
mp.set_start_method('spawn', force=True)
|
891 |
-
|
|
|
892 |
example_img_names = os.listdir(image_folder)
|
893 |
render_model, sample_steps, DiT_model, \
|
894 |
vae_triplane, image_encoder, dinov2, dino_img_processor, clip_image_processor, std, mean, ws_avg, Faceverse, device, input_process_model = model_define()
|
895 |
-
controlnet_path = '/
|
896 |
controlnet = ControlNetModel.from_pretrained(
|
897 |
controlnet_path, torch_dtype=torch.float16
|
898 |
)
|
899 |
-
sd_path = '/
|
900 |
pipeline_sd = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
901 |
sd_path, torch_dtype=torch.float16,
|
902 |
use_safetensors=True, controlnet=controlnet, variant="fp16"
|
|
|
271 |
def launch_pretrained():
|
272 |
from huggingface_hub import hf_hub_download, snapshot_download
|
273 |
hf_hub_download(repo_id="KumaPower/AvatarArtist", repo_type='model', local_dir="./pretrained_model")
|
274 |
+
hf_hub_download(repo_id="stabilityai/stable-diffusion-2-1-base", repo_type='model', local_dir="./pretrained_model/sd21")
|
275 |
+
hf_hub_download(repo_id="CrucibleAI/ControlNetMediaPipeFace", repo_type='model', local_dir="./pretrained_model/control")
|
276 |
|
277 |
|
278 |
def prepare_image_list(img_dir, selected_img):
|
|
|
396 |
label_file_test = os.path.join(target_path, 'images512x512/dataset_realcam.json')
|
397 |
|
398 |
if source_type == 'example':
|
399 |
+
input_img_fvid = './demo_data/source_img/img_generate_different_domain/coeffs/demo_imgs'
|
400 |
+
input_img_motion = './demo_data/source_img/img_generate_different_domain/motions/demo_imgs'
|
401 |
elif source_type == 'custom':
|
402 |
input_img_fvid = os.path.join(save_path_base, 'processed_img/dataset/coeffs/input_image')
|
403 |
input_img_motion = os.path.join(save_path_base, 'processed_img/dataset/motions/input_image')
|
|
|
706 |
"""
|
707 |
)
|
708 |
# DISPLAY
|
709 |
+
image_folder = "./demo_data/source_img/img_generate_different_domain/images512x512/demo_imgs"
|
710 |
video_folder = "./demo_data/target_video"
|
711 |
|
712 |
examples_images = sorted(
|
|
|
888 |
|
889 |
if __name__ == '__main__':
|
890 |
import torch.multiprocessing as mp
|
|
|
891 |
mp.set_start_method('spawn', force=True)
|
892 |
+
launch_pretrained()
|
893 |
+
image_folder = "./demo_data/source_img/img_generate_different_domain/images512x512/demo_imgs"
|
894 |
example_img_names = os.listdir(image_folder)
|
895 |
render_model, sample_steps, DiT_model, \
|
896 |
vae_triplane, image_encoder, dinov2, dino_img_processor, clip_image_processor, std, mean, ws_avg, Faceverse, device, input_process_model = model_define()
|
897 |
+
controlnet_path = './pretrained_model/control'
|
898 |
controlnet = ControlNetModel.from_pretrained(
|
899 |
controlnet_path, torch_dtype=torch.float16
|
900 |
)
|
901 |
+
sd_path = './pretrained_model/sd21'
|
902 |
pipeline_sd = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
903 |
sd_path, torch_dtype=torch.float16,
|
904 |
use_safetensors=True, controlnet=controlnet, variant="fp16"
|