File size: 5,484 Bytes
4665864
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44477f1
4665864
 
 
 
 
 
 
 
 
 
 
 
44477f1
4665864
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6c364cf
4665864
 
 
 
 
44477f1
4665864
44477f1
4665864
 
 
 
 
 
 
 
 
 
 
44477f1
 
 
 
4665864
 
44477f1
4665864
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import subprocess
import os
import gradio as gr


subprocess.run(["git", "clone", "https://github.com/fat-ai/MuseV.git"])

os.chdir("./MuseV")
subprocess.run(["pip", "install", "-r", "requirements.txt"])
subprocess.run(["pip", "install", "--no-cache-dir", "-U", "openmim"])
subprocess.run(["mim", "install", "mmengine"])
subprocess.run(["mim", "install", "mmcv>=2.0.1"])
subprocess.run(["mim", "install", "mmdet>=3.1.0"])
subprocess.run(["mim", "install", "mmpose>=1.1.0"])
subprocess.run(["git", "clone", "--recursive", "https://github.com/fat-ai/MuseV.git"])
subprocess.run(["git", "clone", "https://huggingface.co/TMElyralab/MuseV", "./checkpoints"])

os.chdir("..")
command = "\"import sys; sys.path.append('./MuseV/MuseV'); sys.path.append('./MuseV/MuseV/MMCM'); sys.path.append('./MuseV/MuseV/diffusers/src'); sys.path.append('./MuseV/MuseV/controlnet_aux/src')\""
subprocess.run(["python","-c",command])

subprocess.run(["mv", "./MuseV/scripts/inference/text2video.py", "./MuseV/text2video.py"])
subprocess.run(["mv", "./MuseV/scripts/inference/video2video.py", "./MuseV/video2video.py"])

with open ("./MuseV/configs/model/motion_model.py","r+") as scrip:
    s = scrip.read()
    s = s.replace('/content/MuseV/checkpoints', "/home/user/app/MuseV/checkpoints")
    scrip.write(s)
    scrip.truncate()
    scrip.seek(0)
    
with open ("./MuseV/configs/model/ip_adapter.py","r+") as scrip:
    s = scrip.read()
    s = s.replace('/content/MuseV/checkpoints', "/home/user/app/MuseV/checkpoints")
    scrip.write(s)
    scrip.truncate()
    scrip.seek(0)

with open ("./MuseV/configs/model/T2I_all_model.py","r+") as scrip:
    s = scrip.read()
    s = s.replace('/content/MuseV/checkpoints', "/home/user/app/MuseV/checkpoints")
    scrip.write(s)
    scrip.truncate()
    scrip.seek(0)

from PIL import Image

def add_new_image(prompt,image):
    image = Image.fromarray(image)
    height = image.height
    width = image.width
    lr = 0.5
    ip_img = "${.condition_images}"
    image.save("/content/img.png")
    img_settings = f"""- condition_images: /content/img.png
  eye_blinks_factor: 1.8
  height: {height}
  img_length_ratio: {lr}
  ipadapter_image: {ip_img}
  name: image
  prompt: {prompt}
  refer_image: {ip_img}
  video_path: null
  width: {width}"""
    print(img_settings)
    with open ("/home/user/app/MuseV/configs/tasks/example.yaml","r+") as configs:
        configs.write(img_settings)
        configs.truncate()
        configs.seek(0)

def add_new_video(video):
    print(video)
    lr = 1.0
    ip_img = "${.condition_images}"
    img_settings = f"""- name: "dance2"
  prompt: "(best quality), ((masterpiece)), (highres), illustration, original, extremely detailed wallpaper"
  video_path:  ./MuseV/data/source_video/video1_girl_poseseq.mp4 
  condition_images: ./MuseV/data/images/cyber_girl.png
  refer_image: {ip_img}
  ipadapter_image: {ip_img}
  height: 960
  width: 512
  img_length_ratio: 1.0
  video_is_middle: True """
    print(img_settings)
    with open ("/content/MuseV/configs/tasks/example.yaml","r+") as configs:
        configs.write(img_settings)
        configs.truncate()
        configs.seek(0)
        

def run(frames,fps):
    #subprocess.run(["python", "./MuseV/text2video.py", "--sd_model_name", "majicmixRealv6Fp16", "--unet_model_name", "musev", "-test_data_path", "./MuseV/configs/tasks/example.yaml", "--n_batch", "1", "--target_datas", "image", "--vae_model_path", "./MuseV/checkpoints/vae/sd-vae-ft-mse", "--motion_speed", "5.0", "--time_size", "12", "--fps", "12"])
    subprocess.run(["python", "./MuseV/text2video.py", "--sd_model_name", "majicmixRealv6Fp16", "--unet_model_name", "musev_referencenet", "--referencenet_model_name", "musev_referencenet", "--ip_adapter_model_name", "musev_referencenet", "-test_data_path", "./MuseV/configs/tasks/example.yaml", "--output_dir", "./MuseV",  "--n_batch", "1",  "--target_datas", "image",  "--vision_clip_extractor_class_name", "ImageClipVisionFeatureExtractor", "--vision_clip_model_path", "./MuseV/checkpoints/IP-Adapter/models/image_encoder", "--motion_speed", "5.0", "--vae_model_path", "./MuseV/checkpoints/vae/sd-vae-ft-mse", "--time_size", frames, "--fps", fps])
    return "./output.mp4"

def run_video():
    subprocess.run(["python", "./MuseV/video2video.py", "--sd_model_name", "fantasticmix_v10",  "--unet_model_name", "musev", "-test_data_path", "./MuseV/configs/tasks/example.yaml", "--output_dir", "./output",  "--n_batch", "1", "--controlnet_name", "dwpose_body_hand",  "--which2video", "video_middle",  "--target_datas",  "dance1", "--fps", "12", "--time_size", "12"])
    return "./output.mp4"
    
with gr.Blocks() as demo:
  title = gr.Markdown("""# MuseV Image2Vid & Vid2Vid """)
  subtitle1 = gr.Markdown("""Image2Vid""")
  image = gr.Image()
  button1 = gr.Button(value="Save Image")
  frames = gr.Number(value=12)
  fps = gr.Number(value=12)
  prompt = gr.Text(value="(masterpiece, best quality, highres:1),(1person, solo:1),(eye blinks:1.8),(head wave:1.3)")
  button1.click(fn=add_new_image,inputs=[prompt,image])
  button2 = gr.Button(value="Generate Img2Vid")
  video = gr.Video()
  button2.click(fn=run,inputs=[frames,fps],outputs=video)
  subtitle2 = gr.Markdown("""Vid2Vid""")
  video_in = gr.Video()
  button3 = gr.Button(value="Save Video")
  button3.click(fn=add_new_video,inputs=[video_in])
  button4 = gr.Button(value="Generate Vid2Vid")
  video_out = gr.Video()
  button4.click(fn=run_video,outputs=video_out)  

demo.launch(share=True)