File size: 3,630 Bytes
da2362d
547bc6e
b877500
f8b1e61
da2362d
 
547bc6e
d8647a5
547bc6e
 
 
 
 
 
cecc2bb
b877500
cecc2bb
b877500
cecc2bb
 
33948f0
c21ae8a
 
 
33948f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe59321
33948f0
 
 
 
cecc2bb
099d06e
692f7c8
cb806f5
692f7c8
 
cb806f5
 
 
 
 
 
 
 
 
 
 
 
 
f8b1e61
3126ea4
e6dd72d
cad05d8
1dac678
93216ff
b877500
cb806f5
 
 
b877500
 
cecc2bb
d62198c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import subprocess
import os
import gradio as gr
import spaces

subprocess.run(["git", "clone", "https://github.com/fat-ai/MuseV.git"])

os.chdir("./MuseV")
subprocess.run(["pip", "install", "-r", "requirements.txt"])
subprocess.run(["pip", "install", "--no-cache-dir", "-U", "openmim"])
subprocess.run(["mim", "install", "mmengine"])
subprocess.run(["mim", "install", "mmcv>=2.0.1"])
subprocess.run(["mim", "install", "mmdet>=3.1.0"])
subprocess.run(["mim", "install", "mmpose>=1.1.0"])
subprocess.run(["git", "clone", "--recursive", "https://github.com/fat-ai/MuseV.git"])
subprocess.run(["git", "clone", "https://huggingface.co/TMElyralab/MuseV", "./checkpoints"])

os.chdir("..")
command = "\"import sys; sys.path.append('./MuseV/MuseV'); sys.path.append('./MuseV/MuseV/MMCM'); sys.path.append('./MuseV/MuseV/diffusers/src'); sys.path.append('./MuseV/MuseV/controlnet_aux/src')\""
subprocess.run(["python","-c",command])

subprocess.run(["mv", "./MuseV/scripts/inference/text2video.py", "./MuseV/text2video.py"])
subprocess.run(["mv", "./MuseV/scripts/inference/video2video.py", "./MuseV/video2video.py"])

with open ("./MuseV/configs/model/motion_model.py","r+") as scrip:
    s = scrip.read()
    s = s.replace('/content/MuseV/checkpoints', "/home/user/app/MuseV/checkpoints")
    scrip.write(s)
    scrip.truncate()
    scrip.seek(0)
    
with open ("./MuseV/configs/model/ip_adapter.py","r+") as scrip:
    s = scrip.read()
    s = s.replace('/content/MuseV/checkpoints', "/home/user/app/MuseV/checkpoints")
    scrip.write(s)
    scrip.truncate()
    scrip.seek(0)

with open ("./MuseV/configs/model/T2I_all_model.py","r+") as scrip:
    s = scrip.read()
    s = s.replace('/content/MuseV/checkpoints', "/home/user/app/MuseV/checkpoints")
    scrip.write(s)
    scrip.truncate()
    scrip.seek(0)

from PIL import Image

def add_new_image(image):
    print(image)
    image = Image.open(image)
    img_settings = """- condition_images: ./MuseV/data/images/yongen.jpeg
  eye_blinks_factor: 1.8
  height: 1308
  img_length_ratio: 0.957
  ipadapter_image: ${.condition_images}
  name: yongen
  prompt: (masterpiece, best quality, highres:1),(1boy, solo:1),(eye blinks:1.8),(head wave:1.3)
  refer_image: ${.condition_images}
  video_path: null
  width: 736"""
    with open ("./MuseV/configs/tasks/run.yaml","r+") as configs:
        configs.write(img_settings)
        
@spaces.GPU
def run(duration=180):
    subprocess.run(["python", "./MuseV/text2video.py", "--sd_model_name", "majicmixRealv6Fp16", "--unet_model_name", "musev", "-test_data_path", "./MuseV/configs/tasks/example.yaml", "--n_batch", "1", "--target_datas", "yongen", "--vae_model_path", "./MuseV/checkpoints/vae/sd-vae-ft-mse", "--time_size", "12", "--fps", "12"])
    #subprocess.run(["python", "./MuseV/text2video.py", "--sd_model_name", "majicmixRealv6Fp16", "--unet_model_name", "musev_referencenet", "--referencenet_model_name", "musev_referencenet", "--ip_adapter_model_name", "musev_referencenet", "-test_data_path", "./MuseV/configs/tasks/example.yaml", "--output_dir", "./MuseV",  "--n_batch", "1",  "--target_datas", "jinkesi2",  "--vision_clip_extractor_class_name", "ImageClipVisionFeatureExtractor", "--vision_clip_model_path", "./MuseV/checkpoints/IP-Adapter/models/image_encoder", "--motion_speed", "5.0", "--vae_model_path", "./MuseV/checkpoints/vae/sd-vae-ft-mse", "--time_size", "120", "--fps", "24"])
    return "./output.mp4"

with gr.Blocks() as demo:
  image = gr.Image()
  image.change(fn=add_new_image,inputs=image)
  button = gr.Button(label="Generate Video")
  video = gr.Video()
  button.click(fn=run,outputs=video)

demo.launch()