Muhammad Taqi Raza
commited on
Commit
·
cdb41ad
1
Parent(s):
7c99c8e
adding files
Browse files- download/download.py +1 -1
- download/download_models.sh +0 -13
- gradio_app.py +2 -2
- inference/cli_demo_camera_i2v_pcd.py +1 -0
- inference/v2v_data/inference.py +1 -1
download/download.py
CHANGED
@@ -17,7 +17,7 @@ def download_model():
|
|
17 |
local_dir_use_symlinks=False,
|
18 |
)
|
19 |
snapshot_download(
|
20 |
-
repo_id="THUDM/
|
21 |
local_dir="/app/pretrained/CogVideoX-5b-I2V",
|
22 |
local_dir_use_symlinks=False,
|
23 |
)
|
|
|
17 |
local_dir_use_symlinks=False,
|
18 |
)
|
19 |
snapshot_download(
|
20 |
+
repo_id="THUDM/CogVideoX1.5-5B-SAT",
|
21 |
local_dir="/app/pretrained/CogVideoX-5b-I2V",
|
22 |
local_dir_use_symlinks=False,
|
23 |
)
|
download/download_models.sh
CHANGED
@@ -1,16 +1,3 @@
|
|
1 |
-
|
2 |
mkdir -p /app/pretrained/RAFT
|
3 |
-
echo "=== Current Directory ==="
|
4 |
-
pwd
|
5 |
-
|
6 |
-
echo -e "\n=== Path: ./ ==="
|
7 |
-
realpath ./
|
8 |
-
|
9 |
-
echo -e "\n=== Path: ../ ==="
|
10 |
-
realpath ../
|
11 |
-
|
12 |
-
echo -e "\n=== Path: ../../ ==="
|
13 |
-
realpath ../../
|
14 |
-
|
15 |
gdown 1MqDajR89k-xLV0HIrmJ0k-n8ZpG6_suM -O /app/pretrained/RAFT/raft-things.pth
|
16 |
python /app/download/download.py
|
|
|
|
|
1 |
mkdir -p /app/pretrained/RAFT
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
gdown 1MqDajR89k-xLV0HIrmJ0k-n8ZpG6_suM -O /app/pretrained/RAFT/raft-things.pth
|
3 |
python /app/download/download.py
|
gradio_app.py
CHANGED
@@ -115,7 +115,7 @@ def inference(
|
|
115 |
seed, height, width, downscale_coef, vae_channels,
|
116 |
controlnet_input_channels, controlnet_transformer_num_layers
|
117 |
):
|
118 |
-
|
119 |
ckpt_path = "/app/out/EPiC_pretrained/checkpoint-500.pt"
|
120 |
video_root_dir = "/app/output_anchor"
|
121 |
out_dir = "/app/output"
|
@@ -123,7 +123,7 @@ def inference(
|
|
123 |
command = [
|
124 |
"python", "/app/inference/cli_demo_camera_i2v_pcd.py",
|
125 |
"--video_root_dir", video_root_dir,
|
126 |
-
"--base_model_path",
|
127 |
"--controlnet_model_path", ckpt_path,
|
128 |
"--output_path", out_dir,
|
129 |
"--controlnet_weights", str(controlnet_weights),
|
|
|
115 |
seed, height, width, downscale_coef, vae_channels,
|
116 |
controlnet_input_channels, controlnet_transformer_num_layers
|
117 |
):
|
118 |
+
model_path = "/app/pretrained/CogVideoX-5b-I2V"
|
119 |
ckpt_path = "/app/out/EPiC_pretrained/checkpoint-500.pt"
|
120 |
video_root_dir = "/app/output_anchor"
|
121 |
out_dir = "/app/output"
|
|
|
123 |
command = [
|
124 |
"python", "/app/inference/cli_demo_camera_i2v_pcd.py",
|
125 |
"--video_root_dir", video_root_dir,
|
126 |
+
"--base_model_path", model_path,
|
127 |
"--controlnet_model_path", ckpt_path,
|
128 |
"--output_path", out_dir,
|
129 |
"--controlnet_weights", str(controlnet_weights),
|
inference/cli_demo_camera_i2v_pcd.py
CHANGED
@@ -252,6 +252,7 @@ def generate_video(
|
|
252 |
controlnet=controlnet,
|
253 |
scheduler=scheduler,
|
254 |
).to('cuda')
|
|
|
255 |
# If you're using with lora, add this code
|
256 |
if lora_path:
|
257 |
pipe.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors", adapter_name="test_1")
|
|
|
252 |
controlnet=controlnet,
|
253 |
scheduler=scheduler,
|
254 |
).to('cuda')
|
255 |
+
|
256 |
# If you're using with lora, add this code
|
257 |
if lora_path:
|
258 |
pipe.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors", adapter_name="test_1")
|
inference/v2v_data/inference.py
CHANGED
@@ -109,7 +109,7 @@ def get_parser():
|
|
109 |
default='DDIM_Origin',
|
110 |
help='Choose the sampler',
|
111 |
)
|
112 |
-
# parser.add_argument('--transformer_path', type=str,
|
113 |
parser.add_argument(
|
114 |
'--transformer_path',
|
115 |
type=str,
|
|
|
109 |
default='DDIM_Origin',
|
110 |
help='Choose the sampler',
|
111 |
)
|
112 |
+
# parser.add_argument('--transformer_path', type=str,kdefault='checkpoints/TrajectoryCrafter/crosstransformer', help='Path to the pretrained transformer model')
|
113 |
parser.add_argument(
|
114 |
'--transformer_path',
|
115 |
type=str,
|