roll-ai commited on
Commit
99da118
ยท
verified ยท
1 Parent(s): e0c2329

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -13
app.py CHANGED
@@ -8,20 +8,25 @@ import sys
8
  import traceback
9
  from huggingface_hub import hf_hub_download
10
 
 
 
 
 
11
  # =========================================
12
  # 1. Define Hugging Face dataset + weights
13
  # =========================================
14
 
15
- HF_DATASET_REPO = "roll-ai/FloVD-weights" # your dataset repo on HF
16
 
17
  WEIGHT_FILES = {
18
- "ckpt/FVSM/FloVD_FVSM_Controlnet.pt": "FVSM/FloVD_FVSM_Controlet.pt",
19
  "ckpt/OMSM/selected_blocks.safetensors": "OMSM/selected_blocks.safetensors",
20
  "ckpt/OMSM/pytorch_lora_weights.safetensors": "OMSM/pytorch_lora_weights.safetensors",
21
  "ckpt/others/depth_anything_v2_metric_hypersim_vitb.pth": "others/depth_anything_v2_metric_hypersim_vitb.pth"
22
  }
23
- print("")
24
- print("Downloading model...", flush=True)
 
25
  def download_weights():
26
  print("๐Ÿ”„ Downloading model weights via huggingface_hub...")
27
  for hf_path, local_rel_path in WEIGHT_FILES.items():
@@ -49,13 +54,14 @@ def print_ckpt_structure(base_path="ckpt"):
49
  for f in files:
50
  print(f"{sub_indent}๐Ÿ“„ {f}", flush=True)
51
 
52
- # Call it
53
  print_ckpt_structure()
 
54
  # =========================================
55
- # 2. Import the FloVD generation pipeline
56
  # =========================================
57
 
58
  from inference.flovd_demo import generate_video
 
59
  def run_inference(prompt, image, pose_type, speed, use_flow_integration, cam_pose_name):
60
  log_buffer = io.StringIO()
61
  sys_stdout = sys.stdout
@@ -67,7 +73,6 @@ def run_inference(prompt, image, pose_type, speed, use_flow_integration, cam_pos
67
  os.makedirs("input_images", exist_ok=True)
68
  image_path = "input_images/input_image.png"
69
 
70
- # โœ… Convert NumPy to PIL if necessary
71
  if not isinstance(image, Image.Image):
72
  image = Image.fromarray(image.astype("uint8"))
73
 
@@ -110,11 +115,12 @@ def run_inference(prompt, image, pose_type, speed, use_flow_integration, cam_pos
110
 
111
  return (video_path if video_path and os.path.exists(video_path) else None), logs
112
 
 
113
  # =========================================
114
- # 3. Gradio App Interface
115
  # =========================================
116
 
117
- demo = gr.Interface(
118
  fn=run_inference,
119
  inputs=[
120
  gr.Textbox(label="Prompt", value="A girl riding a bicycle through a park."),
@@ -137,8 +143,48 @@ demo = gr.Interface(
137
  description="Upload an image and prompt to generate motion-controlled video using FloVD and CogVideoX."
138
  )
139
 
140
- # -----------------------------
141
- # Launch the App
142
- # -----------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  if __name__ == "__main__":
144
- demo.launch(server_name="0.0.0.0",debug=True, show_error=True, server_port=7860)
 
8
  import traceback
9
  from huggingface_hub import hf_hub_download
10
 
11
+ # For live system monitoring
12
+ import psutil
13
+ import GPUtil
14
+
15
  # =========================================
16
  # 1. Define Hugging Face dataset + weights
17
  # =========================================
18
 
19
+ HF_DATASET_REPO = "roll-ai/FloVD-weights"
20
 
21
  WEIGHT_FILES = {
22
+ "ckpt/FVSM/FloVD_FVSM_Controlnet.pt": "FVSM/FloVD_FVSM_Controlnet.pt",
23
  "ckpt/OMSM/selected_blocks.safetensors": "OMSM/selected_blocks.safetensors",
24
  "ckpt/OMSM/pytorch_lora_weights.safetensors": "OMSM/pytorch_lora_weights.safetensors",
25
  "ckpt/others/depth_anything_v2_metric_hypersim_vitb.pth": "others/depth_anything_v2_metric_hypersim_vitb.pth"
26
  }
27
+
28
+ print("\nDownloading model...", flush=True)
29
+
30
  def download_weights():
31
  print("๐Ÿ”„ Downloading model weights via huggingface_hub...")
32
  for hf_path, local_rel_path in WEIGHT_FILES.items():
 
54
  for f in files:
55
  print(f"{sub_indent}๐Ÿ“„ {f}", flush=True)
56
 
 
57
  print_ckpt_structure()
58
+
59
  # =========================================
60
+ # 2. Import FloVD generation pipeline
61
  # =========================================
62
 
63
  from inference.flovd_demo import generate_video
64
+
65
  def run_inference(prompt, image, pose_type, speed, use_flow_integration, cam_pose_name):
66
  log_buffer = io.StringIO()
67
  sys_stdout = sys.stdout
 
73
  os.makedirs("input_images", exist_ok=True)
74
  image_path = "input_images/input_image.png"
75
 
 
76
  if not isinstance(image, Image.Image):
77
  image = Image.fromarray(image.astype("uint8"))
78
 
 
115
 
116
  return (video_path if video_path and os.path.exists(video_path) else None), logs
117
 
118
+
119
  # =========================================
120
+ # 3. Define FloVD Gradio Interface
121
  # =========================================
122
 
123
+ video_interface = gr.Interface(
124
  fn=run_inference,
125
  inputs=[
126
  gr.Textbox(label="Prompt", value="A girl riding a bicycle through a park."),
 
143
  description="Upload an image and prompt to generate motion-controlled video using FloVD and CogVideoX."
144
  )
145
 
146
+ # =========================================
147
+ # 4. Live System Monitor
148
+ # =========================================
149
+
150
+ def get_system_stats():
151
+ cpu = psutil.cpu_percent()
152
+ mem = psutil.virtual_memory()
153
+ disk = psutil.disk_usage('/')
154
+ try:
155
+ gpus = GPUtil.getGPUs()
156
+ gpu_info = "\n".join([
157
+ f"GPU {i}: {gpu.name}, {gpu.memoryUsed}MB / {gpu.memoryTotal}MB, Util: {gpu.load * 100:.1f}%"
158
+ for i, gpu in enumerate(gpus)
159
+ ]) if gpus else "No GPU detected"
160
+ except Exception as e:
161
+ gpu_info = f"GPU info error: {e}"
162
+
163
+ return (
164
+ f"๐Ÿง  CPU Usage: {cpu}%\n"
165
+ f"๐Ÿ’พ RAM: {mem.used / 1e9:.2f} GB / {mem.total / 1e9:.2f} GB ({mem.percent}%)\n"
166
+ f"๐Ÿ—„๏ธ Disk: {disk.used / 1e9:.2f} GB / {disk.total / 1e9:.2f} GB ({disk.percent}%)\n"
167
+ f"๐ŸŽฎ {gpu_info}"
168
+ )
169
+
170
+ with gr.Blocks() as monitor_tab:
171
+ gr.Markdown("## ๐Ÿ“Š Live System Resource Monitor")
172
+ stats_box = gr.Textbox(label="Live Stats", lines=10, interactive=False)
173
+ gr.Live(stats_box.update, fn=get_system_stats, every=2.0)
174
+
175
+ # =========================================
176
+ # 5. Combine Tabs: FloVD + Monitor
177
+ # =========================================
178
+
179
+ with gr.Blocks() as app:
180
+ with gr.Tab("๐ŸŽฅ Video Generator"):
181
+ video_interface.render()
182
+ with gr.Tab("๐Ÿ“Š System Monitor"):
183
+ monitor_tab.render()
184
+
185
+ # =========================================
186
+ # 6. Launch App
187
+ # =========================================
188
+
189
  if __name__ == "__main__":
190
+ app.launch(server_name="0.0.0.0", server_port=7860, debug=True, show_error=True)