Muhammad Taqi Raza commited on
Commit
987bf72
·
1 Parent(s): e80810e

setting gradio path

Browse files
Dockerfile CHANGED
@@ -3,8 +3,8 @@ FROM pytorch/pytorch:2.2.2-cuda12.1-cudnn8-runtime
3
  SHELL ["/bin/bash", "-c"]
4
 
5
  # Environment variables for Hugging Face cache
6
- ENV HF_HOME=/home/user/EPiC/hf_cache
7
- ENV TRANSFORMERS_CACHE=/home/user/EPiC/hf_cache
8
  ENV HF_TOKEN=${HF_TOKEN}
9
  ENV PATH=/opt/conda/bin:$PATH
10
  # Install system dependencies
@@ -19,16 +19,16 @@ WORKDIR /app
19
  COPY . /app
20
 
21
  # Fix permissions for all subdirectories
22
- RUN mkdir -p /home/user/EPiC/pretrained /home/user/EPiC/hf_cache /.cache/gdown && \
23
  chmod -R 777 /app && \
24
  chmod -R 777 /.cache && \
25
  chmod -R 777 /root
26
 
27
  # Create conda environment and install dependencies
28
- COPY requirements.txt /home/user/EPiC/requirements.txt
29
  RUN conda create -n epic python=3.10 -y && \
30
  conda run -n epic pip install --upgrade pip && \
31
- conda run -n epic pip install -r /home/user/EPiC/requirements.txt
32
 
33
  RUN chmod -R 777 /app /workspace
34
 
 
3
  SHELL ["/bin/bash", "-c"]
4
 
5
  # Environment variables for Hugging Face cache
6
+ ENV HF_HOME=/home/user/app/hf_cache
7
+ ENV TRANSFORMERS_CACHE=/home/user/app/hf_cache
8
  ENV HF_TOKEN=${HF_TOKEN}
9
  ENV PATH=/opt/conda/bin:$PATH
10
  # Install system dependencies
 
19
  COPY . /app
20
 
21
  # Fix permissions for all subdirectories
22
+ RUN mkdir -p /home/user/app/pretrained /home/user/app/hf_cache /.cache/gdown && \
23
  chmod -R 777 /app && \
24
  chmod -R 777 /.cache && \
25
  chmod -R 777 /root
26
 
27
  # Create conda environment and install dependencies
28
+ COPY requirements.txt /home/user/app/requirements.txt
29
  RUN conda create -n epic python=3.10 -y && \
30
  conda run -n epic pip install --upgrade pip && \
31
+ conda run -n epic pip install -r /home/user/app/requirements.txt
32
 
33
  RUN chmod -R 777 /app /workspace
34
 
download/download.py CHANGED
@@ -3,22 +3,22 @@ from huggingface_hub import snapshot_download
3
  def download_model():
4
  snapshot_download(
5
  repo_id="tencent/DepthCrafter",
6
- local_dir="/home/user/EPiC/pretrained/DepthCrafter",
7
  local_dir_use_symlinks=False,
8
  )
9
  snapshot_download(
10
  repo_id="stabilityai/stable-video-diffusion-img2vid",
11
- local_dir="/home/user/EPiC/pretrained/stable-video-diffusion-img2vid",
12
  local_dir_use_symlinks=False,
13
  )
14
  snapshot_download(
15
  repo_id= "Qwen/Qwen2.5-VL-7B-Instruct",
16
- local_dir="/home/user/EPiC/pretrained/Qwen2.5-VL-7B-Instruct",
17
  local_dir_use_symlinks=False,
18
  )
19
  snapshot_download(
20
  repo_id="THUDM/CogVideoX1.5-5B-SAT",
21
- local_dir="/home/user/EPiC/pretrained/CogVideoX-5b-I2V",
22
  local_dir_use_symlinks=False,
23
  )
24
 
 
3
  def download_model():
4
  snapshot_download(
5
  repo_id="tencent/DepthCrafter",
6
+ local_dir="/home/user/app/pretrained/DepthCrafter",
7
  local_dir_use_symlinks=False,
8
  )
9
  snapshot_download(
10
  repo_id="stabilityai/stable-video-diffusion-img2vid",
11
+ local_dir="/home/user/app/pretrained/stable-video-diffusion-img2vid",
12
  local_dir_use_symlinks=False,
13
  )
14
  snapshot_download(
15
  repo_id= "Qwen/Qwen2.5-VL-7B-Instruct",
16
+ local_dir="/home/user/app/pretrained/Qwen2.5-VL-7B-Instruct",
17
  local_dir_use_symlinks=False,
18
  )
19
  snapshot_download(
20
  repo_id="THUDM/CogVideoX1.5-5B-SAT",
21
+ local_dir="/home/user/app/pretrained/CogVideoX-5b-I2V",
22
  local_dir_use_symlinks=False,
23
  )
24
 
download/download_models.sh CHANGED
@@ -1,3 +1,3 @@
1
- mkdir -p /home/user/EPiC/pretrained/RAFT
2
- gdown 1MqDajR89k-xLV0HIrmJ0k-n8ZpG6_suM -O /home/user/EPiC/pretrained/RAFT/raft-things.pth
3
- python /home/user/EPiC/download/download.py
 
1
+ mkdir -p /home/user/app/pretrained/RAFT
2
+ gdown 1MqDajR89k-xLV0HIrmJ0k-n8ZpG6_suM -O /home/user/app/pretrained/RAFT/raft-things.pth
3
+ python /home/user/app/download/download.py
gradio_app.py CHANGED
@@ -4,11 +4,11 @@ from datetime import datetime
4
  from pathlib import Path
5
  import gradio as gr
6
  import numpy as np
7
-
8
  # -----------------------------
9
  # Setup paths and env
10
  # -----------------------------
11
- HF_HOME = "/home/user/EPiC/hf_cache"
12
  os.environ["HF_HOME"] = HF_HOME
13
  os.environ["TRANSFORMERS_CACHE"] = HF_HOME
14
  os.makedirs(HF_HOME, exist_ok=True)
@@ -16,7 +16,7 @@ os.makedirs(HF_HOME, exist_ok=True)
16
  # hf_hub_download(repo_id="ai-forever/Real-ESRGAN", filename="RealESRGAN_x4.pth", local_dir="model_real_esran")
17
  # snapshot_download(repo_id="AlexWortega/RIFE", local_dir="model_rife")
18
 
19
- PRETRAINED_DIR = "/home/user/EPiC/pretrained"
20
  os.makedirs(PRETRAINED_DIR, exist_ok=True)
21
 
22
  # -----------------------------
@@ -48,8 +48,8 @@ def get_anchor_video(video_path, fps, num_frames, target_pose, mode,
48
  seed_input, height, width, aspect_ratio_inputs,
49
  init_dx, init_dy, init_dz):
50
 
51
- temp_input_path = "/home/user/EPiC/temp_input.mp4"
52
- output_dir = "/home/user/EPiC/output_anchor"
53
  video_output_path = f"{output_dir}/masked_videos/output.mp4"
54
 
55
  if video_path:
@@ -64,7 +64,7 @@ def get_anchor_video(video_path, fps, num_frames, target_pose, mode,
64
  h_s, w_s = sample_size.strip().split(",")
65
 
66
  command = [
67
- "python", "/home/user/EPiC/inference/v2v_data/inference.py",
68
  "--video_path", temp_input_path,
69
  "--stride", "1",
70
  "--out_dir", output_dir,
@@ -116,13 +116,13 @@ def inference(
116
  seed, height, width, downscale_coef, vae_channels,
117
  controlnet_input_channels, controlnet_transformer_num_layers
118
  ):
119
- model_path = "/home/user/EPiC/pretrained/CogVideoX-5b-I2V"
120
- ckpt_path = "/home/user/EPiC/out/EPiC_pretrained/checkpoint-500.pt"
121
- video_root_dir = "/home/user/EPiC/output_anchor"
122
- out_dir = "/home/user/EPiC/output"
123
 
124
  command = [
125
- "python", "/home/user/EPiC/inference/cli_demo_camera_i2v_pcd.py",
126
  "--video_root_dir", video_root_dir,
127
  "--base_model_path", model_path,
128
  "--controlnet_model_path", ckpt_path,
@@ -256,5 +256,6 @@ with demo:
256
  )
257
 
258
  if __name__ == "__main__":
 
259
  download_models()
260
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
4
  from pathlib import Path
5
  import gradio as gr
6
  import numpy as np
7
+ import os
8
  # -----------------------------
9
  # Setup paths and env
10
  # -----------------------------
11
+ HF_HOME = "/home/user/app/hf_cache"
12
  os.environ["HF_HOME"] = HF_HOME
13
  os.environ["TRANSFORMERS_CACHE"] = HF_HOME
14
  os.makedirs(HF_HOME, exist_ok=True)
 
16
  # hf_hub_download(repo_id="ai-forever/Real-ESRGAN", filename="RealESRGAN_x4.pth", local_dir="model_real_esran")
17
  # snapshot_download(repo_id="AlexWortega/RIFE", local_dir="model_rife")
18
 
19
+ PRETRAINED_DIR = "/home/user/app/pretrained"
20
  os.makedirs(PRETRAINED_DIR, exist_ok=True)
21
 
22
  # -----------------------------
 
48
  seed_input, height, width, aspect_ratio_inputs,
49
  init_dx, init_dy, init_dz):
50
 
51
+ temp_input_path = "/home/user/app/temp_input.mp4"
52
+ output_dir = "/home/user/app/output_anchor"
53
  video_output_path = f"{output_dir}/masked_videos/output.mp4"
54
 
55
  if video_path:
 
64
  h_s, w_s = sample_size.strip().split(",")
65
 
66
  command = [
67
+ "python", "/home/user/app/inference/v2v_data/inference.py",
68
  "--video_path", temp_input_path,
69
  "--stride", "1",
70
  "--out_dir", output_dir,
 
116
  seed, height, width, downscale_coef, vae_channels,
117
  controlnet_input_channels, controlnet_transformer_num_layers
118
  ):
119
+ model_path = "/home/user/app/pretrained/CogVideoX-5b-I2V"
120
+ ckpt_path = "/home/user/app/out/EPiC_pretrained/checkpoint-500.pt"
121
+ video_root_dir = "/home/user/app/output_anchor"
122
+ out_dir = "/home/user/app/output"
123
 
124
  command = [
125
+ "python", "/home/user/app/inference/cli_demo_camera_i2v_pcd.py",
126
  "--video_root_dir", video_root_dir,
127
  "--base_model_path", model_path,
128
  "--controlnet_model_path", ckpt_path,
 
256
  )
257
 
258
  if __name__ == "__main__":
259
+ print("Current working directory:", os.getcwd())
260
  download_models()
261
  demo.launch(server_name="0.0.0.0", server_port=7860)
inference/v2v_data/get_anchor_videos.sh CHANGED
@@ -6,7 +6,7 @@ target_pose_str="0_30_-0.6_0_0"
6
  traj_name="loop1"
7
  traj_txt="test/trajs/${traj_name}.txt"
8
 
9
- video="/home/user/EPiC/data/test_v2v/videos/amalfi-coast_traj_loop2.mp4"
10
 
11
  processed_data_name=$1
12
  # filename=$(basename "$video" .mp4)
 
6
  traj_name="loop1"
7
  traj_txt="test/trajs/${traj_name}.txt"
8
 
9
+ video="/home/user/app/data/test_v2v/videos/amalfi-coast_traj_loop2.mp4"
10
 
11
  processed_data_name=$1
12
  # filename=$(basename "$video" .mp4)
inference/v2v_data/inference.py CHANGED
@@ -99,7 +99,7 @@ def get_parser():
99
  parser.add_argument(
100
  '--model_name',
101
  type=str,
102
- default='/home/user/EPiC/pretrained/CogVideoX-Fun-V1.1-5b-InP',
103
  help='Path to the model',
104
  )
105
  parser.add_argument(
@@ -113,7 +113,7 @@ def get_parser():
113
  parser.add_argument(
114
  '--transformer_path',
115
  type=str,
116
- default="/home/user/EPiC/pretrained/TrajectoryCrafter",
117
  help='Path to the pretrained transformer model',
118
  )
119
  parser.add_argument(
@@ -150,14 +150,14 @@ def get_parser():
150
  default=". The video is of high quality, and the view is very clear. ",
151
  help='Prompt for video generation',
152
  )
153
- parser.add_argument('--qwen_path', type=str, default="/home/user/EPiC/pretrained/Qwen2.5-VL-7B-Instruct")
154
 
155
  ## depth
156
  # parser.add_argument('--unet_path', type=str, default='checkpoints/DepthCrafter', help='Path to the UNet model')
157
  parser.add_argument(
158
  '--unet_path',
159
  type=str,
160
- default="/home/user/EPiC/pretrained/DepthCrafter",
161
  help='Path to the UNet model',
162
  )
163
 
@@ -165,7 +165,7 @@ def get_parser():
165
  parser.add_argument(
166
  '--pre_train_path',
167
  type=str,
168
- default="/home/user/EPiC/pretrained/stable-video-diffusion-img2vid",
169
  help='Path to the pre-trained model',
170
  )
171
  parser.add_argument(
 
99
  parser.add_argument(
100
  '--model_name',
101
  type=str,
102
+ default='/home/user/app/pretrained/CogVideoX-Fun-V1.1-5b-InP',
103
  help='Path to the model',
104
  )
105
  parser.add_argument(
 
113
  parser.add_argument(
114
  '--transformer_path',
115
  type=str,
116
+ default="/home/user/app/pretrained/TrajectoryCrafter",
117
  help='Path to the pretrained transformer model',
118
  )
119
  parser.add_argument(
 
150
  default=". The video is of high quality, and the view is very clear. ",
151
  help='Prompt for video generation',
152
  )
153
+ parser.add_argument('--qwen_path', type=str, default="/home/user/app/pretrained/Qwen2.5-VL-7B-Instruct")
154
 
155
  ## depth
156
  # parser.add_argument('--unet_path', type=str, default='checkpoints/DepthCrafter', help='Path to the UNet model')
157
  parser.add_argument(
158
  '--unet_path',
159
  type=str,
160
+ default="/home/user/app/pretrained/DepthCrafter",
161
  help='Path to the UNet model',
162
  )
163
 
 
165
  parser.add_argument(
166
  '--pre_train_path',
167
  type=str,
168
+ default="/home/user/app/pretrained/stable-video-diffusion-img2vid",
169
  help='Path to the pre-trained model',
170
  )
171
  parser.add_argument(